Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Mon, 31 Mar 2014 20:56:43 +0000 (16:56 -0400)
committerDavid S. Miller <davem@davemloft.net>
Mon, 31 Mar 2014 20:56:43 +0000 (16:56 -0400)
Conflicts:
drivers/net/xen-netback/netback.c

A bug fix overlapped with changing how the netback SKB control
block is implemented.

Signed-off-by: David S. Miller <davem@davemloft.net>
1382 files changed:
Documentation/ABI/testing/sysfs-bus-mdio
Documentation/ABI/testing/sysfs-class-net [new file with mode: 0644]
Documentation/ABI/testing/sysfs-class-net-mesh
Documentation/ABI/testing/sysfs-ptp
Documentation/DocBook/80211.tmpl
Documentation/devices.txt
Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
Documentation/devicetree/bindings/net/altera_tse.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/arc_emac.txt
Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/can/sja1000.txt
Documentation/devicetree/bindings/net/cavium-mix.txt
Documentation/devicetree/bindings/net/cavium-pip.txt
Documentation/devicetree/bindings/net/cdns-emac.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/davicom-dm9000.txt
Documentation/devicetree/bindings/net/davinci_emac.txt
Documentation/devicetree/bindings/net/ethernet.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/fsl-fec.txt
Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
Documentation/devicetree/bindings/net/lpc-eth.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
Documentation/devicetree/bindings/net/marvell-orion-net.txt
Documentation/devicetree/bindings/net/micrel-ks8851.txt
Documentation/devicetree/bindings/net/micrel.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/nfc/trf7970a.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/net/samsung-sxgbe.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/sh_eth.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/smsc-lan91c111.txt
Documentation/devicetree/bindings/net/smsc911x.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt [new file with mode: 0644]
Documentation/networking/altera_tse.txt [new file with mode: 0644]
Documentation/networking/bonding.txt
Documentation/networking/filter.txt
Documentation/networking/gianfar.txt
Documentation/networking/igb.txt
Documentation/networking/phy.txt
Documentation/networking/pktgen.txt
Documentation/networking/rxrpc.txt
Documentation/networking/tcp.txt
Documentation/networking/timestamping.txt
Documentation/ptp/testptp.c
MAINTAINERS
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/net/bpf_jit_32.c
arch/powerpc/net/bpf_jit_comp.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp.c
arch/um/drivers/net_kern.c
arch/x86/net/bpf_jit_comp.c
block/blk-cgroup.h
drivers/atm/ambassador.c
drivers/atm/firestream.c
drivers/atm/idt77105.c
drivers/atm/nicstar.c
drivers/atm/solos-pci.c
drivers/bcma/driver_gpio.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/btusb.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_bcsp.c
drivers/bluetooth/hci_h5.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_vhci.c
drivers/connector/connector.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/user.h
drivers/infiniband/hw/mlx4/alias_GUID.c
drivers/infiniband/hw/mlx4/cm.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mcg.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/sysfs.c
drivers/isdn/act2000/module.c
drivers/isdn/divert/divert_procfs.c
drivers/isdn/hisax/elsa.c
drivers/isdn/hisax/elsa_ser.c
drivers/isdn/hysdn/hysdn_proclog.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/pcbit/drv.c
drivers/isdn/sc/init.c
drivers/net/Kconfig
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_3ad.h
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_options.h
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_spi.c
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can.c
drivers/net/can/cc770/cc770.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/grcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/mcp251x.c
drivers/net/can/mscan/mscan.c
drivers/net/can/pch_can.c
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/Makefile
drivers/net/can/sja1000/ems_pci.c
drivers/net/can/sja1000/ems_pcmcia.c
drivers/net/can/sja1000/kvaser_pci.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sja1000/peak_pcmcia.c
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/sja1000/sja1000_of_platform.c [deleted file]
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/slcan.c
drivers/net/can/softing/softing_main.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/usb_8dev.c
drivers/net/dummy.c
drivers/net/ethernet/3com/3c509.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/8390/lib8390.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/altera/Kconfig [new file with mode: 0644]
drivers/net/ethernet/altera/Makefile [new file with mode: 0644]
drivers/net/ethernet/altera/altera_msgdma.c [new file with mode: 0644]
drivers/net/ethernet/altera/altera_msgdma.h [new file with mode: 0644]
drivers/net/ethernet/altera/altera_msgdmahw.h [new file with mode: 0644]
drivers/net/ethernet/altera/altera_sgdma.c [new file with mode: 0644]
drivers/net/ethernet/altera/altera_sgdma.h [new file with mode: 0644]
drivers/net/ethernet/altera/altera_sgdmahw.h [new file with mode: 0644]
drivers/net/ethernet/altera/altera_tse.h [new file with mode: 0644]
drivers/net/ethernet/altera/altera_tse_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/altera/altera_tse_main.c [new file with mode: 0644]
drivers/net/ethernet/altera/altera_utils.c [new file with mode: 0644]
drivers/net/ethernet/altera/altera_utils.h [new file with mode: 0644]
drivers/net/ethernet/amd/7990.c
drivers/net/ethernet/amd/am79c961a.c
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/Makefile
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/genet/Makefile [new file with mode: 0644]
drivers/net/ethernet/broadcom/genet/bcmgenet.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/genet/bcmgenet.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/genet/bcmmii.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/emulex/benet/be_roce.c
drivers/net/ethernet/emulex/benet/be_roce.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/Makefile
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/freescale/gianfar_ptp.c
drivers/net/ethernet/freescale/gianfar_sysfs.c [deleted file]
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/80003es2lan.h
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/82571.h
drivers/net/ethernet/intel/e1000e/Makefile
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/mac.h
drivers/net/ethernet/intel/e1000e/manage.c
drivers/net/ethernet/intel/e1000e/manage.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.c
drivers/net/ethernet/intel/e1000e/nvm.h
drivers/net/ethernet/intel/e1000e/param.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/e1000e/phy.h
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/e1000e/regs.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/Makefile
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/igb/e1000_mac.h
drivers/net/ethernet/intel/igb/e1000_mbx.c
drivers/net/ethernet/intel/igb/e1000_mbx.h
drivers/net/ethernet/intel/igb/e1000_nvm.c
drivers/net/ethernet/intel/igb/e1000_nvm.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_hwmon.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/regs.h
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/Kconfig
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_clock.c
drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/samsung/Kconfig [new file with mode: 0644]
drivers/net/ethernet/samsung/Makefile [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/Kconfig [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/Makefile [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c [new file with mode: 0644]
drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h [new file with mode: 0644]
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef10_regs.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/filter.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/silan/sc92031.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/fakehard.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ifb.c
drivers/net/loopback.c
drivers/net/macvlan.c
drivers/net/nlmon.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/at803x.c
drivers/net/phy/bcm7xxx.c [new file with mode: 0644]
drivers/net/phy/broadcom.c
drivers/net/phy/dp83640.c
drivers/net/phy/mdio-sun4i.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/ppp/ppp_generic.c
drivers/net/team/team.c
drivers/net/team/team_mode_loadbalance.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lg-vl600.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/netdev.c
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/hif.h
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/usb.c
drivers/net/wireless/ath/ath6kl/wmi.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/ani.h
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/common-beacon.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/common-beacon.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/common-init.c [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/common-init.h [new file with mode: 0644]
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/dfs_debug.h
drivers/net/wireless/ath/ath9k/hif_usb.c
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/htc_hst.c
drivers/net/wireless/ath/ath9k/htc_hst.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c [deleted file]
drivers/net/wireless/ath/ath9k/rc.h [deleted file]
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/tx99.c
drivers/net/wireless/ath/ath9k/wow.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/carl9170/rx.c
drivers/net/wireless/ath/regd.c
drivers/net/wireless/ath/wcn36xx/dxe.c
drivers/net/wireless/ath/wcn36xx/dxe.h
drivers/net/wireless/ath/wcn36xx/hal.h
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/smd.h
drivers/net/wireless/ath/wcn36xx/txrx.c
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/rx_reorder.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/atmel.c
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43/debugfs.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/main.h
drivers/net/wireless/b43/phy_common.c
drivers/net/wireless/b43/pio.c
drivers/net/wireless/b43/sysfs.c
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/b43legacy/sysfs.c
drivers/net/wireless/b43legacy/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/Makefile
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/chip.c [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/chip.h [new file with mode: 0644]
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/fwil.c
drivers/net/wireless/brcm80211/brcmfmac/fwil.h
drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c [deleted file]
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h [deleted file]
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/brcm80211/include/brcmu_wifi.h
drivers/net/wireless/cw1200/fwio.c
drivers/net/wireless/hostap/hostap_cs.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/3945-rs.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlegacy/commands.h
drivers/net/wireless/iwlegacy/common.c
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/devices.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/main.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rs.h
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-debug.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-drv.h
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-io.c
drivers/net/wireless/iwlwifi/iwl-io.h
drivers/net/wireless/iwlwifi/iwl-modparams.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-phy-db.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/bt-coex.c [deleted file]
drivers/net/wireless/iwlwifi/mvm/coex.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h [deleted file]
drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/led.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/offloading.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/power_legacy.c [deleted file]
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mac80211_hwsim.h
drivers/net/wireless/mwifiex/11ac.c
drivers/net/wireless/mwifiex/11ac.h
drivers/net/wireless/mwifiex/11h.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n.h
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/11n_rxreorder.h
drivers/net/wireless/mwifiex/Makefile
drivers/net/wireless/mwifiex/README
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cfp.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/debugfs.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/pcie.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/tdls.c [new file with mode: 0644]
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c
drivers/net/wireless/mwifiex/uap_txrx.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/util.h
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/cfg.c
drivers/net/wireless/orinoco/hw.c
drivers/net/wireless/orinoco/scan.c
drivers/net/wireless/orinoco/wext.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rsi/Kconfig [new file with mode: 0644]
drivers/net/wireless/rsi/Makefile [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_core.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_debugfs.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_mac80211.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_main.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_mgmt.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_pkt.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_sdio.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_usb.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_91x_usb_ops.c [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_boot_params.h [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_common.h [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_debugfs.h [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_main.h [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_mgmt.h [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_sdio.h [new file with mode: 0644]
drivers/net/wireless/rsi/rsi_usb.h [new file with mode: 0644]
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00debug.c
drivers/net/wireless/rtl818x/Kconfig
drivers/net/wireless/rtl818x/rtl8180/Makefile
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c [new file with mode: 0644]
drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h [new file with mode: 0644]
drivers/net/wireless/rtl818x/rtl8187/dev.c
drivers/net/wireless/rtl818x/rtl818x.h
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/Makefile
drivers/net/wireless/rtlwifi/btcoexist/Makefile [new file with mode: 0644]
drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/core.h
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/pci.h
drivers/net/wireless/rtlwifi/ps.c
drivers/net/wireless/rtlwifi/ps.h
drivers/net/wireless/rtlwifi/rc.c
drivers/net/wireless/rtlwifi/rtl8188ee/Makefile
drivers/net/wireless/rtlwifi/rtl8188ee/dm.c
drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
drivers/net/wireless/rtlwifi/rtl8188ee/reg.h
drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.h
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192de/reg.h
drivers/net/wireless/rtlwifi/rtl8192de/rf.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.h
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/phy.c
drivers/net/wireless/rtlwifi/rtl8192se/reg.h
drivers/net/wireless/rtlwifi/rtl8192se/rf.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8192se/trx.h
drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
drivers/net/wireless/rtlwifi/rtl8723ae/def.h
drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
drivers/net/wireless/rtlwifi/rtl8723be/Makefile [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/def.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/dm.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/dm.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/fw.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/fw.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/hw.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/hw.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/led.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/led.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/phy.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/phy.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/reg.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/rf.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/rf.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/sw.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/sw.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/table.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/table.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/trx.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723be/trx.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/Makefile [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/main.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c [new file with mode: 0644]
drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h [new file with mode: 0644]
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wilink_platform_data.c
drivers/net/wireless/ti/wl1251/cmd.c
drivers/net/wireless/ti/wl1251/sdio.c
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wl1251/wl1251.h
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl12xx/wl12xx.h
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/tx.c
drivers/net/wireless/ti/wl18xx/wl18xx.h
drivers/net/wireless/ti/wlcore/acx.c
drivers/net/wireless/ti/wlcore/acx.h
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/event.c
drivers/net/wireless/ti/wlcore/hw_ops.h
drivers/net/wireless/ti/wlcore/init.c
drivers/net/wireless/ti/wlcore/io.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/ps.c
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/rx.h
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/sysfs.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/net/wireless/ti/wlcore/tx.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/wireless/wl3501_cs.c
drivers/net/wireless/zd1201.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/pn533.c
drivers/nfc/pn544/i2c.c
drivers/nfc/pn544/pn544.c
drivers/nfc/pn544/pn544.h
drivers/nfc/port100.c
drivers/nfc/trf7970a.c [new file with mode: 0644]
drivers/of/of_mdio.c
drivers/of/of_net.c
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_ixp46x.c
drivers/ptp/ptp_pch.c
drivers/ptp/ptp_private.h
drivers/ptp/ptp_sysfs.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/staging/octeon/ethernet-tx.c
drivers/staging/rtl8821ae/rc.c
drivers/staging/rtl8821ae/rtl8821ae/trx.c
drivers/staging/wlags49_h2/wl_netdev.c
drivers/staging/wlan-ng/cfg80211.c
include/linux/brcmphy.h
include/linux/can/dev.h
include/linux/ethtool.h
include/linux/filter.h
include/linux/ieee80211.h
include/linux/if_vlan.h
include/linux/isdn_ppp.h
include/linux/miscdevice.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx4/driver.h
include/linux/mlx4/qp.h
include/linux/mmc/sdio_ids.h
include/linux/mpls.h [new file with mode: 0644]
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/nfnetlink.h
include/linux/netpoll.h
include/linux/nl802154.h
include/linux/phy.h
include/linux/ptp_classify.h
include/linux/ptp_clock_kernel.h
include/linux/seccomp.h
include/linux/skbuff.h
include/linux/sxgbe_platform.h [new file with mode: 0644]
include/linux/tcp.h
include/linux/tty.h
include/linux/u64_stats_sync.h
include/linux/usb/cdc_ncm.h
include/linux/wl12xx.h
include/net/6lowpan.h [new file with mode: 0644]
include/net/act_api.h
include/net/addrconf.h
include/net/af_ieee802154.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/bluetooth/rfcomm.h
include/net/cfg80211.h
include/net/checksum.h
include/net/dst.h
include/net/flow.h
include/net/flowcache.h [new file with mode: 0644]
include/net/ieee80211_radiotap.h
include/net/ieee802154.h
include/net/ieee802154_netdev.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/mac80211.h
include/net/mac802154.h
include/net/net_namespace.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_labels.h
include/net/netfilter/nf_tables.h
include/net/netns/conntrack.h
include/net/netns/ieee802154_6lowpan.h [new file with mode: 0644]
include/net/netns/xfrm.h
include/net/nfc/digital.h
include/net/nfc/nfc.h
include/net/nl802154.h
include/net/regulatory.h
include/net/route.h
include/net/rtnetlink.h
include/net/sock.h
include/net/tc_act/tc_csum.h
include/net/tc_act/tc_defact.h
include/net/tc_act/tc_gact.h
include/net/tc_act/tc_ipt.h
include/net/tc_act/tc_mirred.h
include/net/tc_act/tc_nat.h
include/net/tc_act/tc_pedit.h
include/net/tc_act/tc_skbedit.h
include/net/tcp.h
include/net/wpan-phy.h
include/net/xfrm.h
include/trace/events/net.h
include/uapi/linux/can.h
include/uapi/linux/can/netlink.h
include/uapi/linux/ethtool.h
include/uapi/linux/if.h
include/uapi/linux/if_ether.h
include/uapi/linux/if_link.h
include/uapi/linux/in.h
include/uapi/linux/in6.h
include/uapi/linux/mpls.h [new file with mode: 0644]
include/uapi/linux/netdevice.h
include/uapi/linux/netfilter/ipset/ip_set.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/nfc.h
include/uapi/linux/nl80211.h
include/uapi/linux/pfkeyv2.h
include/uapi/linux/ptp_clock.h
include/uapi/linux/snmp.h
include/uapi/linux/tcp.h
include/uapi/linux/tcp_metrics.h
include/uapi/linux/usb/cdc.h
include/uapi/linux/xfrm.h
kernel/seccomp.c
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/appletalk/aarp.c
net/appletalk/ddp.c
net/atm/mpc.c
net/batman-adv/Kconfig
net/batman-adv/Makefile
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/gateway_client.c
net/batman-adv/icmp_socket.c
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c [new file with mode: 0644]
net/batman-adv/multicast.h [new file with mode: 0644]
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/6lowpan.h
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/a2mp.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/rfcomm/tty.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bridge/br_device.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/bridge/netfilter/ebt_among.c
net/bridge/netfilter/ebt_dnat.c
net/bridge/netfilter/ebt_redirect.c
net/bridge/netfilter/ebt_snat.c
net/ceph/osd_client.c
net/core/dev.c
net/core/filter.c
net/core/flow.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock_diag.c
net/core/timestamping.c
net/hsr/hsr_device.c
net/hsr/hsr_framereg.c
net/hsr/hsr_main.c
net/ieee802154/6lowpan.c [deleted file]
net/ieee802154/6lowpan.h [deleted file]
net/ieee802154/6lowpan_iphc.c
net/ieee802154/6lowpan_rtnl.c [new file with mode: 0644]
net/ieee802154/Kconfig
net/ieee802154/Makefile
net/ieee802154/af802154.h
net/ieee802154/af_ieee802154.c
net/ieee802154/dgram.c
net/ieee802154/header_ops.c [new file with mode: 0644]
net/ieee802154/ieee802154.h
net/ieee802154/netlink.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/nl_policy.c
net/ieee802154/raw.c
net/ieee802154/reassembly.c [new file with mode: 0644]
net/ieee802154/reassembly.h [new file with mode: 0644]
net/ieee802154/wpan-class.c
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/ip_forward.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipcomp.c
net/ipv4/netfilter.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_lp.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_probe.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_westwood.c
net/ipv4/tcp_yeah.c
net/ipv4/udp.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_protocol.c [new file with mode: 0644]
net/ipv6/Makefile
net/ipv6/addrlabel.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/icmp.c
net/ipv6/ip6_checksum.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ipcomp6.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/output_core.c
net/ipv6/ping.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_protocol.c [new file with mode: 0644]
net/ipx/af_ipx.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ppp.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/cfg.h
net/mac80211/chan.c
net/mac80211/debugfs_netdev.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/pm.c
net/mac80211/rate.c
net/mac80211/rate.h
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel.h
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rc80211_pid_algo.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac80211/wpa.c
net/mac802154/Makefile
net/mac802154/ieee802154_dev.c
net/mac802154/mac802154.h
net/mac802154/mac_cmd.c
net/mac802154/mib.c
net/mac802154/rx.c
net/mac802154/wpan.c
net/netfilter/ipset/Kconfig
net/netfilter/ipset/Makefile
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/ipset/pfxlen.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_compat.c
net/netfilter/nft_ct.c
net/netfilter/nft_hash.c
net/netfilter/nft_immediate.c
net/netfilter/nft_log.c
net/netfilter/nft_lookup.c
net/netfilter/nft_nat.c
net/netfilter/xt_AUDIT.c
net/netfilter/xt_connlimit.c
net/netfilter/xt_ipcomp.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/nfc/core.c
net/nfc/digital.h
net/nfc/digital_core.c
net/nfc/digital_technology.c
net/nfc/hci/llc.c
net/nfc/llcp_core.c
net/nfc/nci/core.c
net/nfc/nci/spi.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/vport.c
net/packet/af_packet.c
net/rds/iw.c
net/rfkill/core.c
net/rxrpc/Makefile
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-ack.c
net/rxrpc/ar-call.c
net/rxrpc/ar-connection.c
net/rxrpc/ar-error.c
net/rxrpc/ar-input.c
net/rxrpc/ar-internal.h
net/rxrpc/ar-output.c
net/rxrpc/ar-recvmsg.c
net/rxrpc/ar-skbuff.c
net/rxrpc/ar-transport.c
net/rxrpc/sysctl.c [new file with mode: 0644]
net/sched/act_api.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/cls_fw.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_hhf.c
net/sched/sch_htb.c
net/sched/sch_ingress.c
net/sched/sch_netem.c
net/sched/sch_tbf.c
net/sctp/associola.c
net/sctp/transport.c
net/socket.c
net/tipc/addr.h
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/config.c
net/tipc/config.h
net/tipc/core.c
net/tipc/core.h
net/tipc/discover.c
net/tipc/discover.h
net/tipc/link.c
net/tipc/link.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/net.c
net/tipc/node.c
net/tipc/node.h
net/tipc/port.c
net/tipc/port.h
net/tipc/ref.c
net/tipc/ref.h
net/tipc/socket.c
net/tipc/socket.h [new file with mode: 0644]
net/wireless/ap.c
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/genregdb.awk
net/wireless/ibss.c
net/wireless/mesh.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/rdev-ops.h
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-sme.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
security/selinux/include/xfrm.h
tools/net/bpf_dbg.c

index 6349749ebc2986ef0433263ee87487479f173549..491baaf4285f64d7a4e822359a43ad8c5f9d6c54 100644 (file)
@@ -7,3 +7,23 @@ Description:
                by the device during bus enumeration, encoded in hexadecimal.
                This ID is used to match the device with the appropriate
                driver.
+
+What:          /sys/bus/mdio_bus/devices/.../phy_interface
+Date:          February 2014
+KernelVersion: 3.15
+Contact:       netdev@vger.kernel.org
+Description:
+               This attribute contains the PHY interface as configured by the
+               Ethernet driver during bus enumeration, encoded in string.
+               This interface mode is used to configure the Ethernet MAC with the
+               appropriate mode for its data lines to the PHY hardware.
+
+What:          /sys/bus/mdio_bus/devices/.../phy_has_fixups
+Date:          February 2014
+KernelVersion: 3.15
+Contact:       netdev@vger.kernel.org
+Description:
+               This attribute contains the boolean value whether a given PHY
+               device has had any "fixup" workaround running on it, encoded as
+               a boolean. This information is provided to help troubleshooting
+               PHY configurations.
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
new file mode 100644 (file)
index 0000000..d922060
--- /dev/null
@@ -0,0 +1,199 @@
+What:          /sys/class/net/<iface>/addr_assign_type
+Date:          July 2010
+KernelVersion: 3.2
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the address assignment type. Possible values are:
+               0: permanent address
+               1: randomly generated
+               2: stolen from another device
+               3: set using dev_set_mac_address
+
+What:          /sys/class/net/<iface>/addr_len
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the hardware address size in bytes.
+               Values vary based on the lower-level protocol used by the
+               interface (Ethernet, FDDI, ATM, IEEE 802.15.4...). See
+               include/uapi/linux/if_*.h for actual values.
+
+What:          /sys/class/net/<iface>/address
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Hardware address currently assigned to this interface.
+               Format is a string, e.g: 00:11:22:33:44:55 for an Ethernet MAC
+               address.
+
+What:          /sys/class/net/<iface>/broadcast
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Hardware broadcast address for this interface. Format is a
+               string, e.g: ff:ff:ff:ff:ff:ff for an Ethernet broadcast MAC
+               address.
+
+What:          /sys/class/net/<iface>/carrier
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the current physical link state of the interface.
+               Posssible values are:
+               0: physical link is down
+               1: physical link is up
+
+               Note: some special devices, e.g: bonding and team drivers will
+               allow this attribute to be written to force a link state for
+               operating correctly and designating another fallback interface.
+
+What:          /sys/class/net/<iface>/dev_id
+Date:          April 2008
+KernelVersion: 2.6.26
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the device unique identifier. Format is an hexadecimal
+               value. This is used to disambiguate interfaces which might be
+               stacked (e.g: VLAN interfaces) but still have the same MAC
+               address as their parent device.
+
+What:          /sys/class/net/<iface>/dormant
+Date:          March 2006
+KernelVersion: 2.6.17
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates whether the interface is in dormant state. Possible
+               values are:
+               0: interface is not dormant
+               1: interface is dormant
+
+               This attribute can be used by supplicant software to signal that
+               the device is not usable unless some supplicant-based
+               authentication is performed (e.g: 802.1x). 'link_mode' attribute
+               will also reflect the dormant state.
+
+What:          /sys/clas/net/<iface>/duplex
+Date:          October 2009
+KernelVersion: 2.6.33
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface latest or current duplex value. Possible
+               values are:
+               half: half duplex
+               full: full duplex
+
+               Note: This attribute is only valid for interfaces that implement
+               the ethtool get_settings method (mostly Ethernet).
+
+What:          /sys/class/net/<iface>/flags
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface flags as a bitmask in hexadecimal. See
+               include/uapi/linux/if.h for a list of all possible values and
+               the flags semantics.
+
+What:          /sys/class/net/<iface>/ifalias
+Date:          September 2008
+KernelVersion: 2.6.28
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates/stores an interface alias name as a string. This can
+               be used for system management purposes.
+
+What:          /sys/class/net/<iface>/ifindex
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the system-wide interface unique index identifier as a
+               decimal number. This attribute is used for mapping an interface
+               identifier to an interface name. It is used throughout the
+               networking stack for specifying the interface specific
+               requests/events.
+
+What:          /sys/class/net/<iface>/iflink
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the system-wide interface unique index identifier a
+               the interface is linked to. Format is decimal. This attribute is
+               used to resolve interfaces chaining, linking and stacking.
+               Physical interfaces have the same 'ifindex' and 'iflink' values.
+
+What:          /sys/class/net/<iface>/link_mode
+Date:          March 2006
+KernelVersion: 2.6.17
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface link mode, as a decimal number. This
+               attribute should be used in conjunction with 'dormant' attribute
+               to determine the interface usability. Possible values:
+               0: default link mode
+               1: dormant link mode
+
+What:          /sys/class/net/<iface>/mtu
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface currently configured MTU value, in
+               bytes, and in decimal format. Specific values depends on the
+               lower-level interface protocol used. Ethernet devices will show
+               a 'mtu' attribute value of 1500 unless changed.
+
+What:          /sys/calss/net/<iface>/netdev_group
+Date:          January 2011
+KernelVersion: 2.6.39
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface network device group, as a decimal
+               integer. Default value is 0 which corresponds to the initial
+               network devices group. The group can be changed to affect
+               routing decisions (see: net/ipv4/fib_rules and
+               net/ipv6/fib6_rules.c).
+
+What:          /sys/class/net/<iface>/operstate
+Date:          March 2006
+KernelVersion: 2.6.17
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface RFC2863 operational state as a string.
+               Possible values are:
+               "unknown", "notpresent", "down", "lowerlayerdown", "testing",
+               "dormant", "up".
+
+What:          /sys/class/net/<iface>/speed
+Date:          October 2009
+KernelVersion: 2.6.33
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface latest or current speed value. Value is
+               an integer representing the link speed in Mbits/sec.
+
+               Note: this attribute is only valid for interfaces that implement
+               the ethtool get_settings method (mostly Ethernet ).
+
+What:          /sys/class/net/<iface>/tx_queue_len
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface transmit queue len in number of packets,
+               as an integer value. Value depend on the type of interface,
+               Ethernet network adapters have a default value of 1000 unless
+               configured otherwise
+
+What:          /sys/class/net/<iface>/type
+Date:          April 2005
+KernelVersion: 2.6.12
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the interface protocol type as a decimal value. See
+               include/uapi/linux/if_arp.h for all possible values.
index 4793d3dff6af04e7ac4a736ccfdbd794d5a4d7f0..c464062966317b71838454a5de532095f4027851 100644 (file)
@@ -76,6 +76,15 @@ Description:
                is used to classify clients as "isolated" by the
                Extended Isolation feature.
 
+What:           /sys/class/net/<mesh_iface>/mesh/multicast_mode
+Date:           Feb 2014
+Contact:        Linus Lüssing <linus.luessing@web.de>
+Description:
+                Indicates whether multicast optimizations are enabled
+                or disabled. If set to zero then all nodes in the
+                mesh are going to use classic flooding for any
+                multicast packet with no optimizations.
+
 What:           /sys/class/net/<mesh_iface>/mesh/network_coding
 Date:           Nov 2012
 Contact:        Martin Hundeboll <martin@hundeboll.net>
index 05aeedf177946a9f9eb99a9f6b4b26a488f2f74a..44806a678f12a21a3ff149bc1b7838a4c14da602 100644 (file)
@@ -54,6 +54,26 @@ Description:
                This file contains the number of programmable periodic
                output channels offered by the PTP hardware clock.
 
+What:          /sys/class/ptp/ptpN/n_pins
+Date:          March 2014
+Contact:       Richard Cochran <richardcochran@gmail.com>
+Description:
+               This file contains the number of programmable pins
+               offered by the PTP hardware clock.
+
+What:          /sys/class/ptp/ptpN/pins
+Date:          March 2014
+Contact:       Richard Cochran <richardcochran@gmail.com>
+Description:
+               This directory contains one file for each programmable
+               pin offered by the PTP hardware clock. The file name
+               is the hardware dependent pin name. Reading from this
+               file produces two numbers, the assigned function (see
+               the PTP_PF_ enumeration values in linux/ptp_clock.h)
+               and the channel number. The function and channel
+               assignment may be changed by two writing numbers into
+               the file.
+
 What:          /sys/class/ptp/ptpN/pps_avaiable
 Date:          September 2010
 Contact:       Richard Cochran <richardcochran@gmail.com>
index 46ad6faee9ab73dfae2fa3e2b00bb6fb7f1c63a1..044b76436e8373ae601f9c60bd274754f3df6033 100644 (file)
@@ -98,6 +98,8 @@
 !Finclude/net/cfg80211.h priv_to_wiphy
 !Finclude/net/cfg80211.h set_wiphy_dev
 !Finclude/net/cfg80211.h wdev_priv
+!Finclude/net/cfg80211.h ieee80211_iface_limit
+!Finclude/net/cfg80211.h ieee80211_iface_combination
       </chapter>
       <chapter>
       <title>Actions and configuration</title>
index 10378cc48374cf8ffde068d4ce329ce5b5cbb205..04356f5bc3afbb8c91332aa1077a344526862af0 100644 (file)
@@ -353,6 +353,7 @@ Your cooperation is appreciated.
                133 = /dev/exttrp       External device trap
                134 = /dev/apm_bios     Advanced Power Management BIOS
                135 = /dev/rtc          Real Time Clock
+               137 = /dev/vhci         Bluetooth virtual HCI driver
                139 = /dev/openprom     SPARC OpenBoot PROM
                140 = /dev/relay8       Berkshire Products Octal relay card
                141 = /dev/relay16      Berkshire Products ISO-16 relay card
index 863d5b8155c70db91e1eb2a121467fa40284c701..10640b17c8668b5bc53a1e50813e6e6155b9458c 100644 (file)
@@ -5,13 +5,9 @@ Required properties:
               "allwinner,sun4i-emac")
 - reg: address and length of the register set for the device.
 - interrupts: interrupt for the device
-- phy: A phandle to a phy node defining the PHY address (as the reg
-  property, a single integer).
+- phy: see ethernet.txt file in the same directory.
 - clocks: A phandle to the reference clock for this device
 
-Optional properties:
-- (local-)mac-address: mac address to be used by this driver
-
 Example:
 
 emac: ethernet@01c0b000 {
diff --git a/Documentation/devicetree/bindings/net/altera_tse.txt b/Documentation/devicetree/bindings/net/altera_tse.txt
new file mode 100644 (file)
index 0000000..a706297
--- /dev/null
@@ -0,0 +1,114 @@
+* Altera Triple-Speed Ethernet MAC driver (TSE)
+
+Required properties:
+- compatible: Should be "altr,tse-1.0" for legacy SGDMA based TSE, and should
+               be "altr,tse-msgdma-1.0" for the preferred MSGDMA based TSE.
+               ALTR is supported for legacy device trees, but is deprecated.
+               altr should be used for all new designs.
+- reg: Address and length of the register set for the device. It contains
+  the information of registers in the same order as described by reg-names
+- reg-names: Should contain the reg names
+  "control_port": MAC configuration space region
+  "tx_csr":       xDMA Tx dispatcher control and status space region
+  "tx_desc":      MSGDMA Tx dispatcher descriptor space region
+  "rx_csr" :      xDMA Rx dispatcher control and status space region
+  "rx_desc":      MSGDMA Rx dispatcher descriptor space region
+  "rx_resp":      MSGDMA Rx dispatcher response space region
+  "s1":                  SGDMA descriptor memory
+- interrupts: Should contain the TSE interrupts and it's mode.
+- interrupt-names: Should contain the interrupt names
+  "rx_irq":       xDMA Rx dispatcher interrupt
+  "tx_irq":       xDMA Tx dispatcher interrupt
+- rx-fifo-depth: MAC receive FIFO buffer depth in bytes
+- tx-fifo-depth: MAC transmit FIFO buffer depth in bytes
+- phy-mode: See ethernet.txt in the same directory.
+- phy-handle: See ethernet.txt in the same directory.
+- phy-addr: See ethernet.txt in the same directory. A configuration should
+               include phy-handle or phy-addr.
+- altr,has-supplementary-unicast:
+               If present, TSE supports additional unicast addresses.
+               Otherwise additional unicast addresses are not supported.
+- altr,has-hash-multicast-filter:
+               If present, TSE supports a hash based multicast filter.
+               Otherwise, hash-based multicast filtering is not supported.
+
+- mdio device tree subnode: When the TSE has a phy connected to its local
+               mdio, there must be device tree subnode with the following
+               required properties:
+
+       - compatible: Must be "altr,tse-mdio".
+       - #address-cells: Must be <1>.
+       - #size-cells: Must be <0>.
+
+       For each phy on the mdio bus, there must be a node with the following
+       fields:
+
+       - reg: phy id used to communicate to phy.
+       - device_type: Must be "ethernet-phy".
+
+Optional properties:
+- local-mac-address: See ethernet.txt in the same directory.
+- max-frame-size: See ethernet.txt in the same directory.
+
+Example:
+
+       tse_sub_0_eth_tse_0: ethernet@0x1,00000000 {
+               compatible = "altr,tse-msgdma-1.0";
+               reg =   <0x00000001 0x00000000 0x00000400>,
+                       <0x00000001 0x00000460 0x00000020>,
+                       <0x00000001 0x00000480 0x00000020>,
+                       <0x00000001 0x000004A0 0x00000008>,
+                       <0x00000001 0x00000400 0x00000020>,
+                       <0x00000001 0x00000420 0x00000020>;
+               reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc";
+               interrupt-parent = <&hps_0_arm_gic_0>;
+               interrupts = <0 41 4>, <0 40 4>;
+               interrupt-names = "rx_irq", "tx_irq";
+               rx-fifo-depth = <2048>;
+               tx-fifo-depth = <2048>;
+               address-bits = <48>;
+               max-frame-size = <1500>;
+               local-mac-address = [ 00 00 00 00 00 00 ];
+               phy-mode = "gmii";
+               altr,has-supplementary-unicast;
+               altr,has-hash-multicast-filter;
+               phy-handle = <&phy0>;
+               mdio {
+                       compatible = "altr,tse-mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       phy0: ethernet-phy@0 {
+                               reg = <0x0>;
+                               device_type = "ethernet-phy";
+                       };
+
+                       phy1: ethernet-phy@1 {
+                               reg = <0x1>;
+                               device_type = "ethernet-phy";
+                       };
+
+               };
+       };
+
+       tse_sub_1_eth_tse_0: ethernet@0x1,00001000 {
+               compatible = "altr,tse-msgdma-1.0";
+               reg =   <0x00000001 0x00001000 0x00000400>,
+                       <0x00000001 0x00001460 0x00000020>,
+                       <0x00000001 0x00001480 0x00000020>,
+                       <0x00000001 0x000014A0 0x00000008>,
+                       <0x00000001 0x00001400 0x00000020>,
+                       <0x00000001 0x00001420 0x00000020>;
+               reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc";
+               interrupt-parent = <&hps_0_arm_gic_0>;
+               interrupts = <0 43 4>, <0 42 4>;
+               interrupt-names = "rx_irq", "tx_irq";
+               rx-fifo-depth = <2048>;
+               tx-fifo-depth = <2048>;
+               address-bits = <48>;
+               max-frame-size = <1500>;
+               local-mac-address = [ 00 00 00 00 00 00 ];
+               phy-mode = "gmii";
+               altr,has-supplementary-unicast;
+               altr,has-hash-multicast-filter;
+               phy-handle = <&phy1>;
+       };
index bcbc3f009158c0ad1b5c9f285fce5c207057d664..7fbb027218a126002312a829c6cd273ac715b030 100644 (file)
@@ -6,19 +6,12 @@ Required properties:
 - interrupts: Should contain the EMAC interrupts
 - clock-frequency: CPU frequency. It is needed to calculate and set polling
 period of EMAC.
-- max-speed: Maximum supported data-rate in Mbit/s. In some HW configurations
-bandwidth of external memory controller might be a limiting factor. That's why
-it's required to specify which data-rate is supported on current SoC or FPGA.
-For example if only 10 Mbit/s is supported (10BASE-T) set "10". If 100 Mbit/s is
-supported (100BASE-TX) set "100".
-- phy: PHY device attached to the EMAC via MDIO bus
+- max-speed: see ethernet.txt file in the same directory.
+- phy: see ethernet.txt file in the same directory.
 
 Child nodes of the driver are the individual PHY devices connected to the
 MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
 
-Optional properties:
-- mac-address: 6 bytes, mac address
-
 Examples:
 
        ethernet@c0fc2000 {
diff --git a/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt b/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
new file mode 100644 (file)
index 0000000..f2febb9
--- /dev/null
@@ -0,0 +1,121 @@
+* Broadcom BCM7xxx Ethernet Controller (GENET)
+
+Required properties:
+- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
+  "brcm,genet-v3", "brcm,genet-v4".
+- reg: address and length of the register set for the device
+- interrupts: must be two cells, the first cell is the general purpose
+  interrupt line, while the second cell is the interrupt for the ring
+  RX and TX queues operating in ring mode
+- phy-mode: see ethernet.txt file in the same directory
+- #address-cells: should be 1
+- #size-cells: should be 1
+
+Optional properties:
+- clocks: When provided, must be two phandles to the functional clocks nodes
+  of the GENET block. The first phandle is the main GENET clock used during
+  normal operation, while the second phandle is the Wake-on-LAN clock.
+- clock-names: When provided, names of the functional clock phandles, first
+  name should be "enet" and second should be "enet-wol".
+
+- phy-handle: See ethernet.txt file in the same directory; used to describe
+  configurations where a PHY (internal or external) is used.
+
+- fixed-link: When the GENET interface is connected to a MoCA hardware block or
+  when operating in a RGMII to RGMII type of connection, or when the MDIO bus is
+  voluntarily disabled, this property should be used to describe the "fixed link".
+  See Documentation/devicetree/bindings/net/fsl-tsec-phy.txt for information on
+  the property specifics
+
+Required child nodes:
+
+- mdio bus node: this node should always be present regarless of the PHY
+  configuration of the GENET instance
+
+MDIO bus node required properties:
+
+- compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2"
+  "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", the version has to match the
+  parent node compatible property (e.g: brcm,genet-v4 pairs with
+  brcm,genet-mdio-v4)
+- reg: address and length relative to the parent node base register address
+- #address-cells: address cell for MDIO bus addressing, should be 1
+- #size-cells: size of the cells for MDIO bus addressing, should be 0
+
+Ethernet PHY node properties:
+
+See Documentation/devicetree/bindings/net/phy.txt for the list of required and
+optional properties.
+
+Internal Gigabit PHY example:
+
+ethernet@f0b60000 {
+       phy-mode = "internal";
+       phy-handle = <&phy1>;
+       mac-address = [ 00 10 18 36 23 1a ];
+       compatible = "brcm,genet-v4";
+       #address-cells = <0x1>;
+       #size-cells = <0x1>;
+       reg = <0xf0b60000 0xfc4c>;
+       interrupts = <0x0 0x14 0x0>, <0x0 0x15 0x0>;
+
+       mdio@e14 {
+               compatible = "brcm,genet-mdio-v4";
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+               reg = <0xe14 0x8>;
+
+               phy1: ethernet-phy@1 {
+                       max-speed = <1000>;
+                       reg = <0x1>;
+                       compatible = "brcm,28nm-gphy", "ethernet-phy-ieee802.3-c22";
+               };
+       };
+};
+
+MoCA interface / MAC to MAC example:
+
+ethernet@f0b80000 {
+       phy-mode = "moca";
+       fixed-link = <1 0 1000 0 0>;
+       mac-address = [ 00 10 18 36 24 1a ];
+       compatible = "brcm,genet-v4";
+       #address-cells = <0x1>;
+       #size-cells = <0x1>;
+       reg = <0xf0b80000 0xfc4c>;
+       interrupts = <0x0 0x16 0x0>, <0x0 0x17 0x0>;
+
+       mdio@e14 {
+               compatible = "brcm,genet-mdio-v4";
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+               reg = <0xe14 0x8>;
+       };
+};
+
+
+External MDIO-connected Gigabit PHY/switch:
+
+ethernet@f0ba0000 {
+       phy-mode = "rgmii";
+       phy-handle = <&phy0>;
+       mac-address = [ 00 10 18 36 26 1a ];
+       compatible = "brcm,genet-v4";
+       #address-cells = <0x1>;
+       #size-cells = <0x1>;
+       reg = <0xf0ba0000 0xfc4c>;
+       interrupts = <0x0 0x18 0x0>, <0x0 0x19 0x0>;
+
+       mdio@0e14 {
+               compatible = "brcm,genet-mdio-v4";
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+               reg = <0xe14 0x8>;
+
+               phy0: ethernet-phy@0 {
+                       max-speed = <1000>;
+                       reg = <0x0>;
+                       compatible = "brcm,bcm53125", "ethernet-phy-ieee802.3-c22";
+               };
+       };
+};
index f2105a47ec87c547fcd88d383d136ec4d11eb65b..b4a6d53fb01ae8827ba69b33bbc60e6db21d90a7 100644 (file)
@@ -12,6 +12,10 @@ Required properties:
 
 Optional properties:
 
+- reg-io-width : Specify the size (in bytes) of the IO accesses that
+       should be performed on the device.  Valid value is 1, 2 or 4.
+       Default to 1 (8 bits).
+
 - nxp,external-clock-frequency : Frequency of the external oscillator
        clock in Hz. Note that the internal clock frequency used by the
        SJA1000 is half of that value. If not specified, a default value
index 5da628db68bf4eaf29fa0aa86d32500481da32fb..8d7c3096390f59506552340b94bdba83dc717d30 100644 (file)
@@ -18,12 +18,7 @@ Properties:
 - interrupts: Two interrupt specifiers.  The first is the MIX
   interrupt routing and the second the routing for the AGL interrupts.
 
-- mac-address: Optional, the MAC address to assign to the device.
-
-- local-mac-address: Optional, the MAC address to assign to the device
-  if mac-address is not specified.
-
-- phy-handle: Optional, a phandle for the PHY device connected to this device.
+- phy-handle: Optional, see ethernet.txt file in the same directory.
 
 Example:
        ethernet@1070000100800 {
index d4c53ba04b3bad857d5f30c051c5e867653e31e5..7dbd158810d293b597d76f909d5544b3411bb641 100644 (file)
@@ -35,12 +35,7 @@ Properties for PIP port which is a child the PIP interface:
 
 - reg: The port number within the interface group.
 
-- mac-address: Optional, the MAC address to assign to the device.
-
-- local-mac-address: Optional, the MAC address to assign to the device
-  if mac-address is not specified.
-
-- phy-handle: Optional, a phandle for the PHY device connected to this device.
+- phy-handle: Optional, see ethernet.txt file in the same directory.
 
 Example:
 
index 09055c2495f0299f29597aeb76bfe199e4197126..abd67c13d3442228834e7df36a53a71454132160 100644 (file)
@@ -6,11 +6,7 @@ Required properties:
   or the generic form: "cdns,emac".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
-- phy-mode: String, operation mode of the PHY interface.
-  Supported values are: "mii", "rmii".
-
-Optional properties:
-- local-mac-address: 6 bytes, mac address
+- phy-mode: see ethernet.txt file in the same directory.
 
 Examples:
 
index 05d660e4ac6402f57be201105b94147c69776b53..ae2b8b7f9c38f0bb3d233588b1b2a4cc01f6c900 100644 (file)
@@ -28,9 +28,8 @@ Optional properties:
 Slave Properties:
 Required properties:
 - phy_id               : Specifies slave phy id
-- phy-mode             : The interface between the SoC and the PHY (a string
-                         that of_get_phy_mode() can understand)
-- mac-address          : Specifies slave MAC address
+- phy-mode             : See ethernet.txt file in the same directory
+- mac-address          : See ethernet.txt file in the same directory
 
 Optional properties:
 - dual_emac_res_vlan   : Specifies VID to be used to segregate the ports
index 2d39c990e641170ac22e5ff1e9d0caf1740c2d50..28767ed7c1bdcf44b43be8fdd50182706fd2a5f1 100644 (file)
@@ -9,8 +9,6 @@ Required properties:
 - interrupts : interrupt specifier specific to interrupt controller
 
 Optional properties:
-- local-mac-address : A bytestring of 6 bytes specifying Ethernet MAC address
-    to use (from firmware or bootloader)
 - davicom,no-eeprom : Configuration EEPROM is not available
 - davicom,ext-phy : Use external PHY
 
index 6e356d15154a9603c8ce2b73f57edbd0c326c60c..032808843f90a3b1db0389b0a60dd075c9ea3ac6 100644 (file)
@@ -17,9 +17,8 @@ Required properties:
                          Miscellaneous Interrupt>
 
 Optional properties:
-- phy-handle: Contains a phandle to an Ethernet PHY.
+- phy-handle: See ethernet.txt file in the same directory.
               If absent, davinci_emac driver defaults to 100/FULL.
-- local-mac-address : 6 bytes, mac address
 - ti,davinci-rmii-en: 1 byte, 1 means use RMII
 - ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
 
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
new file mode 100644 (file)
index 0000000..9ecd43d
--- /dev/null
@@ -0,0 +1,25 @@
+The following properties are common to the Ethernet controllers:
+
+- local-mac-address: array of 6 bytes, specifies the MAC address that was
+  assigned to the network device;
+- mac-address: array of 6 bytes, specifies the MAC address that was last used by
+  the boot program; should be used in cases where the MAC address assigned to
+  the device by the boot program is different from the "local-mac-address"
+  property;
+- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
+- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
+  the maximum frame size (there's contradiction in ePAPR).
+- phy-mode: string, operation mode of the PHY interface; supported values are
+  "mii", "gmii", "sgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
+  "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii"; this is now a de-facto
+  standard property;
+- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
+- phy-handle: phandle, specifies a reference to a node representing a PHY
+  device; this property is described in ePAPR and so preferred;
+- phy: the same as "phy-handle" property, not recommended for new bindings.
+- phy-device: the same as "phy-handle" property, not recommended for new
+  bindings.
+
+Child nodes of the Ethernet controller are typically the individual PHY devices
+connected via the MDIO bus (sometimes the MDIO bus controller is separate).
+They are described in the phy.txt file in this same directory.
index 845ff848d8950b546e7243fbc9568847ed2f87ab..6bc84adb10c0ca6f278cc30d40e0b802c12adfd8 100644 (file)
@@ -4,12 +4,9 @@ Required properties:
 - compatible : Should be "fsl,<soc>-fec"
 - reg : Address and length of the register set for the device
 - interrupts : Should contain fec interrupt
-- phy-mode : String, operation mode of the PHY interface.
-  Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
-  "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
+- phy-mode : See ethernet.txt file in the same directory
 
 Optional properties:
-- local-mac-address : 6 bytes, mac address
 - phy-reset-gpios : Should specify the gpio for phy reset
 - phy-reset-duration : Reset duration in milliseconds.  Should present
   only if property "phy-reset-gpios" is available.  Missing the property
index d2ea4605d0789dc8d11ff3e1fd8686a30431a43b..737cdef4f9036eb6069b9f536f137351dc42b137 100644 (file)
@@ -38,22 +38,17 @@ Properties:
   - model : Model of the device.  Can be "TSEC", "eTSEC", or "FEC"
   - compatible : Should be "gianfar"
   - reg : Offset and length of the register set for the device
-  - local-mac-address : List of bytes representing the ethernet address of
-    this controller
   - interrupts : For FEC devices, the first interrupt is the device's
     interrupt.  For TSEC and eTSEC devices, the first interrupt is
     transmit, the second is receive, and the third is error.
-  - phy-handle : The phandle for the PHY connected to this ethernet
-    controller.
+  - phy-handle : See ethernet.txt file in the same directory.
   - fixed-link : <a b c d e> where a is emulated phy id - choose any,
     but unique to the all specified fixed-links, b is duplex - 0 half,
     1 full, c is link speed - d#10/d#100/d#1000, d is pause - 0 no
     pause, 1 pause, e is asym_pause - 0 no asym_pause, 1 asym_pause.
-  - phy-connection-type : a string naming the controller/PHY interface type,
-    i.e., "mii" (default), "rmii", "gmii", "rgmii", "rgmii-id", "sgmii",
-    "tbi", or "rtbi".  This property is only really needed if the connection
-    is of type "rgmii-id", as all other connection types are detected by
-    hardware.
+  - phy-connection-type : See ethernet.txt file in the same directory.
+    This property is only really needed if the connection is of type
+    "rgmii-id", as all other connection types are detected by hardware.
   - fsl,magic-packet : If present, indicates that the hardware supports
     waking up via magic packet.
   - bd-stash : If present, indicates that the hardware supports stashing
index 585021acd1786d8b8413effddd3caa514c3aef5c..b92e927808b607e96c36d66f61fa6499c7ac0f1f 100644 (file)
@@ -6,10 +6,9 @@ Required properties:
 - interrupts: Should contain ethernet controller interrupt
 
 Optional properties:
-- phy-mode: String, operation mode of the PHY interface.
-  Supported values are: "mii", "rmii" (default)
+- phy-mode: See ethernet.txt file in the same directory. If the property is
+  absent, "rmii" is assumed.
 - use-iram: Use LPC32xx internal SRAM (IRAM) for DMA buffering
-- local-mac-address : 6 bytes, mac address
 
 Example:
 
index 70af2ec12b09306eebc6a809a9cf55feefc1635a..aaa696414f57a1012b5fc18586ce06da6a899309 100644 (file)
@@ -8,16 +8,12 @@ Required properties:
   the Cadence GEM, or the generic form: "cdns,gem".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
-- phy-mode: String, operation mode of the PHY interface.
-  Supported values are: "mii", "rmii", "gmii", "rgmii".
+- phy-mode: See ethernet.txt file in the same directory.
 - clock-names: Tuple listing input clock names.
        Required elements: 'pclk', 'hclk'
        Optional elements: 'tx_clk'
 - clocks: Phandles to input clocks.
 
-Optional properties:
-- local-mac-address: 6 bytes, mac address
-
 Examples:
 
        macb0: ethernet@fffc4000 {
index 859a6fa7569c07f93eac4a629969a7bf664e4a6e..750d577e8083ee3f96c8bf823c986c162d4ac5b3 100644 (file)
@@ -4,10 +4,8 @@ Required properties:
 - compatible: should be "marvell,armada-370-neta".
 - reg: address and length of the register set for the device.
 - interrupts: interrupt for the device
-- phy: A phandle to a phy node defining the PHY address (as the reg
-  property, a single integer).
-- phy-mode: The interface between the SoC and the PHY (a string that
-  of_get_phy_mode() can understand)
+- phy: See ethernet.txt file in the same directory.
+- phy-mode: See ethernet.txt file in the same directory
 - clocks: a pointer to the reference clock for this device.
 
 Example:
index c233b61142427c0c2793b936798aad4eaf17403b..bce52b2ec55ece41a14b997772956e077a7259e6 100644 (file)
@@ -36,7 +36,7 @@ Required port properties:
       "marvell,kirkwood-eth-port".
  - reg: port number relative to ethernet controller, shall be 0, 1, or 2.
  - interrupts: port interrupt.
- - local-mac-address: 6 bytes MAC address.
+ - local-mac-address: See ethernet.txt file in the same directory.
 
 Optional port properties:
  - marvell,tx-queue-size: size of the transmit ring buffer.
@@ -48,7 +48,7 @@ Optional port properties:
 
 and
 
- - phy-handle: phandle reference to ethernet PHY.
+ - phy-handle: See ethernet.txt file in the same directory.
 
 or
 
index 4fc39276361132fe7450248e0ceb5216c151bf25..d54d0cc794871b29cbbbf9fbedd6242defa63677 100644 (file)
@@ -6,5 +6,4 @@ Required properties:
 - interrupts : interrupt connection
 
 Optional properties:
-- local-mac-address : Ethernet mac address to use
 - vdd-supply:  supply for Ethernet mac
diff --git a/Documentation/devicetree/bindings/net/micrel.txt b/Documentation/devicetree/bindings/net/micrel.txt
new file mode 100644 (file)
index 0000000..98a3e61
--- /dev/null
@@ -0,0 +1,18 @@
+Micrel PHY properties.
+
+These properties cover the base properties Micrel PHYs.
+
+Optional properties:
+
+ - micrel,led-mode : LED mode value to set for PHYs with configurable LEDs.
+
+              Configure the LED mode with single value. The list of PHYs and
+             the bits that are currently supported:
+
+             KSZ8001: register 0x1e, bits 15..14
+             KSZ8041: register 0x1e, bits 15..14
+             KSZ8021: register 0x1f, bits 5..4
+             KSZ8031: register 0x1f, bits 5..4
+             KSZ8051: register 0x1f, bits 5..4
+
+              See the respective PHY datasheet for the mode values.
diff --git a/Documentation/devicetree/bindings/net/nfc/trf7970a.txt b/Documentation/devicetree/bindings/net/nfc/trf7970a.txt
new file mode 100644 (file)
index 0000000..8dd3ef7
--- /dev/null
@@ -0,0 +1,34 @@
+* Texas Instruments TRF7970A RFID/NFC/15693 Transceiver
+
+Required properties:
+- compatible: Should be "ti,trf7970a".
+- spi-max-frequency: Maximum SPI frequency (<= 2000000).
+- interrupt-parent: phandle of parent interrupt handler.
+- interrupts: A single interrupt specifier.
+- ti,enable-gpios: Two GPIO entries used for 'EN' and 'EN2' pins on the
+  TRF7970A.
+- vin-supply: Regulator for supply voltage to VIN pin
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBone with TRF7970A on SPI1):
+
+&spi1 {
+       status = "okay";
+
+       nfc@0 {
+               compatible = "ti,trf7970a";
+               reg = <0>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&trf7970a_default>;
+               spi-max-frequency = <2000000>;
+               interrupt-parent = <&gpio2>;
+               interrupts = <14 0>;
+               ti,enable-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>,
+                                 <&gpio2 5 GPIO_ACTIVE_LOW>;
+               vin-supply = <&ldo3_reg>;
+               status = "okay";
+       };
+};
index 58307d0931c84294ec0ebfd75d6a215db4b5bf3f..5b8c5890307773cf52b62eded7c1869a9b7aa2a3 100644 (file)
@@ -21,10 +21,18 @@ Optional Properties:
   elements.
 - max-speed: Maximum PHY supported speed (10, 100, 1000...)
 
+  If the phy's identifier is known then the list may contain an entry
+  of the form: "ethernet-phy-idAAAA.BBBB" where
+     AAAA - The value of the 16 bit Phy Identifier 1 register as
+            4 hex digits. This is the chip vendor OUI bits 3:18
+     BBBB - The value of the 16 bit Phy Identifier 2 register as
+            4 hex digits. This is the chip vendor OUI bits 19:24,
+            followed by 10 bits of a vendor specific ID.
+
 Example:
 
 ethernet-phy@0 {
-       compatible = "ethernet-phy-ieee802.3-c22";
+       compatible = "ethernet-phy-id0141.0e90", "ethernet-phy-ieee802.3-c22";
        interrupt-parent = <40000>;
        interrupts = <35 1>;
        reg = <0>;
diff --git a/Documentation/devicetree/bindings/net/samsung-sxgbe.txt b/Documentation/devicetree/bindings/net/samsung-sxgbe.txt
new file mode 100644 (file)
index 0000000..989f6c9
--- /dev/null
@@ -0,0 +1,52 @@
+* Samsung 10G Ethernet driver (SXGBE)
+
+Required properties:
+- compatible: Should be "samsung,sxgbe-v2.0a"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the SXGBE interrupts
+  These interrupts are ordered by fixed and follows variable
+  trasmit DMA interrupts, receive DMA interrupts and lpi interrupt.
+  index 0 - this is fixed common interrupt of SXGBE and it is always
+  available.
+  index 1 to 25 - 8 variable trasmit interrupts, variable 16 receive interrupts
+  and 1 optional lpi interrupt.
+- phy-mode: String, operation mode of the PHY interface.
+  Supported values are: "sgmii", "xgmii".
+- samsung,pbl: Integer, Programmable Burst Length.
+  Supported values are 1, 2, 4, 8, 16, or 32.
+- samsung,burst-map: Integer, Program the possible bursts supported by sxgbe
+  This is an interger and represents allowable DMA bursts when fixed burst.
+  Allowable range is 0x01-0x3F. When this field is set fixed burst is enabled.
+  When fixed length is needed for burst mode, it can be set within allowable
+  range.
+
+Optional properties:
+- mac-address: 6 bytes, mac address
+- max-frame-size: Maximum Transfer Unit (IEEE defined MTU), rather
+                 than the maximum frame size.
+
+Example:
+
+       aliases {
+               ethernet0 = <&sxgbe0>;
+       };
+
+       sxgbe0: ethernet@1a040000 {
+               compatible = "samsung,sxgbe-v2.0a";
+               reg = <0 0x1a040000 0 0x10000>;
+               interrupt-parent = <&gic>;
+               interrupts = <0 209 4>, <0 185 4>, <0 186 4>, <0 187 4>,
+                            <0 188 4>, <0 189 4>, <0 190 4>, <0 191 4>,
+                            <0 192 4>, <0 193 4>, <0 194 4>, <0 195 4>,
+                            <0 196 4>, <0 197 4>, <0 198 4>, <0 199 4>,
+                            <0 200 4>, <0 201 4>, <0 202 4>, <0 203 4>,
+                            <0 204 4>, <0 205 4>, <0 206 4>, <0 207 4>,
+                            <0 208 4>, <0 210 4>;
+               samsung,pbl = <0x08>
+               samsung,burst-map = <0x20>
+               mac-address = [ 00 11 22 33 44 55 ]; /* Filled in by U-Boot */
+               max-frame-size = <9000>;
+               phy-mode = "xgmii";
+       };
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
new file mode 100644 (file)
index 0000000..e7106b5
--- /dev/null
@@ -0,0 +1,55 @@
+* Renesas Electronics SH EtherMAC
+
+This file provides information on what the device node for the SH EtherMAC
+interface contains.
+
+Required properties:
+- compatible: "renesas,gether-r8a7740" if the device is a part of R8A7740 SoC.
+             "renesas,ether-r8a7778"  if the device is a part of R8A7778 SoC.
+             "renesas,ether-r8a7779"  if the device is a part of R8A7779 SoC.
+             "renesas,ether-r8a7790"  if the device is a part of R8A7790 SoC.
+             "renesas,ether-r8a7791"  if the device is a part of R8A7791 SoC.
+             "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+- reg: offset and length of (1) the E-DMAC/feLic register block (required),
+       (2) the TSU register block (optional).
+- interrupts: interrupt specifier for the sole interrupt.
+- phy-mode: see ethernet.txt file in the same directory.
+- phy-handle: see ethernet.txt file in the same directory.
+- #address-cells: number of address cells for the MDIO bus, must be equal to 1.
+- #size-cells: number of size cells on the MDIO bus, must be equal to 0.
+- clocks: clock phandle and specifier pair.
+- pinctrl-0: phandle, referring to a default pin configuration node.
+
+Optional properties:
+- interrupt-parent: the phandle for the interrupt controller that services
+                   interrupts for this device.
+- pinctrl-names: pin configuration state name ("default").
+- renesas,no-ether-link: boolean, specify when a board does not provide a proper
+                        Ether LINK signal.
+- renesas,ether-link-active-low: boolean, specify when the Ether LINK signal is
+                                active-low instead of normal active-high.
+
+Example (Lager board):
+
+       ethernet@ee700000 {
+               compatible = "renesas,ether-r8a7790";
+               reg = <0 0xee700000 0 0x400>;
+               interrupt-parent = <&gic>;
+               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
+               phy-mode = "rmii";
+               phy-handle = <&phy1>;
+               pinctrl-0 = <&ether_pins>;
+               pinctrl-names = "default";
+               renesas,ether-link-active-low;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               phy1: ethernet-phy@1 {
+                       reg = <1>;
+                       interrupt-parent = <&irqc0>;
+                       interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+                       pinctrl-0 = <&phy1_pins>;
+                       pinctrl-names = "default";
+               };
+       };
index 5a41a8658daa12087678a7d81f5f60038cbeddf8..0f8487b888221e6344dec76afd6a8ffa44242306 100644 (file)
@@ -6,8 +6,7 @@ Required properties:
 - interrupts : interrupt connection
 
 Optional properties:
-- phy-device : phandle to Ethernet phy
-- local-mac-address : Ethernet mac address to use
+- phy-device : see ethernet.txt file in the same directory
 - reg-io-width : Mask of sizes (in bytes) of the IO accesses that
   are supported on the device.  Valid value for SMSC LAN91c111 are
   1, 2 or 4.  If it's omitted or invalid, the size would be 2 meaning
index adb5b5744ecd6a7809457061721a16479bdc59b2..3fed3c12441161926fb78590016b59bb90c78448 100644 (file)
@@ -6,9 +6,7 @@ Required properties:
 - interrupts : Should contain SMSC LAN interrupt line
 - interrupt-parent : Should be the phandle for the interrupt controller
   that services interrupts for this device
-- phy-mode : String, operation mode of the PHY interface.
-  Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
-  "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
+- phy-mode : See ethernet.txt file in the same directory
 
 Optional properties:
 - reg-shift : Specify the quantity to shift the register offsets by
@@ -23,7 +21,6 @@ Optional properties:
   external PHY
 - smsc,save-mac-address : Indicates that mac address needs to be saved
   before resetting the controller
-- local-mac-address : 6 bytes, mac address
 
 Examples:
 
index 9d92d42140f22109143f9d975a9fd2c96fec9837..5748351fb9dfce76f148772e4546e97684fb6b67 100644 (file)
@@ -10,8 +10,7 @@ Required properties:
 - interrupt-names: Should contain the interrupt names "macirq"
   "eth_wake_irq" if this interrupt is supported in the "interrupts"
   property
-- phy-mode: String, operation mode of the PHY interface.
-  Supported values are: "mii", "rmii", "gmii", "rgmii".
+- phy-mode: See ethernet.txt file in the same directory.
 - snps,reset-gpio      gpio number for phy reset.
 - snps,reset-active-low boolean flag to indicate if phy reset is active low.
 - snps,reset-delays-us  is triplet of delays
@@ -28,12 +27,10 @@ Required properties:
                                ignored if force_thresh_dma_mode is set.
 
 Optional properties:
-- mac-address: 6 bytes, mac address
 - resets: Should contain a phandle to the STMMAC reset signal, if any
 - reset-names: Should contain the reset signal name "stmmaceth", if a
        reset phandle is given
-- max-frame-size:      Maximum Transfer Unit (IEEE defined MTU), rather
-                       than the maximum frame size.
+- max-frame-size: See ethernet.txt file in the same directory
 
 Examples:
 
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt b/Documentation/devicetree/bindings/net/wireless/ti,wl1251.txt
new file mode 100644 (file)
index 0000000..189ae5c
--- /dev/null
@@ -0,0 +1,39 @@
+* Texas Instruments wl1251 wireless lan controller
+
+The wl1251 chip can be connected via SPI or via SDIO. This
+document describes the binding for the SPI connected chip.
+
+Required properties:
+- compatible :        Should be "ti,wl1251"
+- reg :               Chip select address of device
+- spi-max-frequency : Maximum SPI clocking speed of device in Hz
+- interrupts :        Should contain interrupt line
+- interrupt-parent :  Should be the phandle for the interrupt controller
+                      that services interrupts for this device
+- vio-supply :        phandle to regulator providing VIO
+- ti,power-gpio :     GPIO connected to chip's PMEN pin
+
+Optional properties:
+- ti,wl1251-has-eeprom : boolean, the wl1251 has an eeprom connected, which
+                         provides configuration data (calibration, MAC, ...)
+- Please consult Documentation/devicetree/bindings/spi/spi-bus.txt
+  for optional SPI connection related properties,
+
+Examples:
+
+&spi1 {
+       wl1251@0 {
+               compatible = "ti,wl1251";
+
+               reg = <0>;
+               spi-max-frequency = <48000000>;
+               spi-cpol;
+               spi-cpha;
+
+               interrupt-parent = <&gpio2>;
+               interrupts = <10 IRQ_TYPE_NONE>; /* gpio line 42 */
+
+               vio-supply = <&vio>;
+               ti,power-gpio = <&gpio3 23 GPIO_ACTIVE_HIGH>; /* 87 */
+       };
+};
diff --git a/Documentation/networking/altera_tse.txt b/Documentation/networking/altera_tse.txt
new file mode 100644 (file)
index 0000000..3f24df8
--- /dev/null
@@ -0,0 +1,263 @@
+       Altera Triple-Speed Ethernet MAC driver
+
+Copyright (C) 2008-2014 Altera Corporation
+
+This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers
+using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
+platform bus to obtain component resources. The designs used to test this
+driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
+and tested with ARM and NIOS processor hosts seperately. The anticipated use
+cases are simple communications between an embedded system and an external peer
+for status and simple configuration of the embedded system.
+
+For more information visit www.altera.com and www.rocketboards.org. Support
+forums for the driver may be found on www.rocketboards.org, and a design used
+to test this driver may be found there as well. Support is also available from
+the maintainer of this driver, found in MAINTAINERS.
+
+The Triple-Speed Ethernet, SGDMA, and MSGDMA components are all soft IP
+components that can be assembled and built into an FPGA using the Altera
+Quartus toolchain. Quartus 13.1 and 14.0 were used to build the design that
+this driver was tested against. The sopc2dts tool is used to create the
+device tree for the driver, and may be found at rocketboards.org.
+
+The driver probe function examines the device tree and determines if the
+Triple-Speed Ethernet instance is using an SGDMA or MSGDMA component. The
+probe function then installs the appropriate set of DMA routines to
+initialize, setup transmits, receives, and interrupt handling primitives for
+the respective configurations.
+
+The SGDMA component is to be deprecated in the near future (over the next 1-2
+years as of this writing in early 2014) in favor of the MSGDMA component.
+SGDMA support is included for existing designs and reference in case a
+developer wishes to support their own soft DMA logic and driver support. Any
+new designs should not use the SGDMA.
+
+The SGDMA supports only a single transmit or receive operation at a time, and
+therefore will not perform as well compared to the MSGDMA soft IP. Please
+visit www.altera.com for known, documented SGDMA errata.
+
+Scatter-gather DMA is not supported by the SGDMA or MSGDMA at this time.
+Scatter-gather DMA will be added to a future maintenance update to this
+driver.
+
+Jumbo frames are not supported at this time.
+
+The driver limits PHY operations to 10/100Mbps, and has not yet been fully
+tested for 1Gbps. This support will be added in a future maintenance update.
+
+1) Kernel Configuration
+The kernel configuration option is ALTERA_TSE:
+ Device Drivers ---> Network device support ---> Ethernet driver support --->
+ Altera Triple-Speed Ethernet MAC support (ALTERA_TSE)
+
+2) Driver parameters list:
+       debug: message level (0: no output, 16: all);
+       dma_rx_num: Number of descriptors in the RX list (default is 64);
+       dma_tx_num: Number of descriptors in the TX list (default is 64).
+
+3) Command line options
+Driver parameters can be also passed in command line by using:
+       altera_tse=dma_rx_num:128,dma_tx_num:512
+
+4) Driver information and notes
+
+4.1) Transmit process
+When the driver's transmit routine is called by the kernel, it sets up a
+transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
+MSGDMA), and initites a transmit operation. Once the transmit is complete, an
+interrupt is driven by the transmit DMA logic. The driver handles the transmit
+completion in the context of the interrupt handling chain by recycling
+resource required to send and track the requested transmit operation.
+
+4.2) Receive process
+The driver will post receive buffers to the receive DMA logic during driver
+intialization. Receive buffers may or may not be queued depending upon the
+underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
+to queue receive buffers to the SGDMA receive logic). When a packet is
+received, the DMA logic generates an interrupt. The driver handles a receive
+interrupt by obtaining the DMA receive logic status, reaping receive
+completions until no more receive completions are available.
+
+4.3) Interrupt Mitigation
+The driver is able to mitigate the number of its DMA interrupts
+using NAPI for receive operations. Interrupt mitigation is not yet supported
+for transmit operations, but will be added in a future maintenance release.
+
+4.4) Ethtool support
+Ethtool is supported. Driver statistics and internal errors can be taken using:
+ethtool -S ethX command. It is possible to dump registers etc.
+
+4.5) PHY Support
+The driver is compatible with PAL to work with PHY and GPHY devices.
+
+4.7) List of source files:
+ o Kconfig
+ o Makefile
+ o altera_tse_main.c: main network device driver
+ o altera_tse_ethtool.c: ethtool support
+ o altera_tse.h: private driver structure and common definitions
+ o altera_msgdma.h: MSGDMA implementation function definitions
+ o altera_sgdma.h: SGDMA implementation function definitions
+ o altera_msgdma.c: MSGDMA implementation
+ o altera_sgdma.c: SGDMA implementation
+ o altera_sgdmahw.h: SGDMA register and descriptor definitions
+ o altera_msgdmahw.h: MSGDMA register and descriptor definitions
+ o altera_utils.c: Driver utility functions
+ o altera_utils.h: Driver utility function definitions
+
+5) Debug Information
+
+The driver exports debug information such as internal statistics,
+debug information, MAC and DMA registers etc.
+
+A user may use the ethtool support to get statistics:
+e.g. using: ethtool -S ethX (that shows the statistics counters)
+or sees the MAC registers: e.g. using: ethtool -d ethX
+
+The developer can also use the "debug" module parameter to get
+further debug information.
+
+6) Statistics Support
+
+The controller and driver support a mix of IEEE standard defined statistics,
+RFC defined statistics, and driver or Altera defined statistics. The four
+specifications containing the standard definitions for these statistics are
+as follows:
+
+ o IEEE 802.3-2012 - IEEE Standard for Ethernet.
+ o RFC 2863 found at http://www.rfc-editor.org/rfc/rfc2863.txt.
+ o RFC 2819 found at http://www.rfc-editor.org/rfc/rfc2819.txt.
+ o Altera Triple Speed Ethernet User Guide, found at http://www.altera.com
+
+The statistics supported by the TSE and the device driver are as follows:
+
+"tx_packets" is equivalent to aFramesTransmittedOK defined in IEEE 802.3-2012,
+Section 5.2.2.1.2. This statistics is the count of frames that are successfully
+transmitted.
+
+"rx_packets" is equivalent to aFramesReceivedOK defined in IEEE 802.3-2012,
+Section 5.2.2.1.5. This statistic is the count of frames that are successfully
+received. This count does not include any error packets such as CRC errors,
+length errors, or alignment errors.
+
+"rx_crc_errors" is equivalent to aFrameCheckSequenceErrors defined in IEEE
+802.3-2012, Section 5.2.2.1.6. This statistic is the count of frames that are
+an integral number of bytes in length and do not pass the CRC test as the frame
+is received.
+
+"rx_align_errors" is equivalent to aAlignmentErrors defined in IEEE 802.3-2012,
+Section 5.2.2.1.7. This statistic is the count of frames that are not an
+integral number of bytes in length and do not pass the CRC test as the frame is
+received.
+
+"tx_bytes" is equivalent to aOctetsTransmittedOK defined in IEEE 802.3-2012,
+Section 5.2.2.1.8. This statistic is the count of data and pad bytes
+successfully transmitted from the interface.
+
+"rx_bytes" is equivalent to aOctetsReceivedOK defined in IEEE 802.3-2012,
+Section 5.2.2.1.14. This statistic is the count of data and pad bytes
+successfully received by the controller.
+
+"tx_pause" is equivalent to aPAUSEMACCtrlFramesTransmitted defined in IEEE
+802.3-2012, Section 30.3.4.2. This statistic is a count of PAUSE frames
+transmitted from the network controller.
+
+"rx_pause" is equivalent to aPAUSEMACCtrlFramesReceived defined in IEEE
+802.3-2012, Section 30.3.4.3. This statistic is a count of PAUSE frames
+received by the network controller.
+
+"rx_errors" is equivalent to ifInErrors defined in RFC 2863. This statistic is
+a count of the number of packets received containing errors that prevented the
+packet from being delivered to a higher level protocol.
+
+"tx_errors" is equivalent to ifOutErrors defined in RFC 2863. This statistic
+is a count of the number of packets that could not be transmitted due to errors.
+
+"rx_unicast" is equivalent to ifInUcastPkts defined in RFC 2863. This
+statistic is a count of the number of packets received that were not addressed
+to the broadcast address or a multicast group.
+
+"rx_multicast" is equivalent to ifInMulticastPkts defined in RFC 2863. This
+statistic is a count of the number of packets received that were addressed to
+a multicast address group.
+
+"rx_broadcast" is equivalent to ifInBroadcastPkts defined in RFC 2863. This
+statistic is a count of the number of packets received that were addressed to
+the broadcast address.
+
+"tx_discards" is equivalent to ifOutDiscards defined in RFC 2863. This
+statistic is the number of outbound packets not transmitted even though an
+error was not detected. An example of a reason this might occur is to free up
+internal buffer space.
+
+"tx_unicast" is equivalent to ifOutUcastPkts defined in RFC 2863. This
+statistic counts the number of packets transmitted that were not addressed to
+a multicast group or broadcast address.
+
+"tx_multicast" is equivalent to ifOutMulticastPkts defined in RFC 2863. This
+statistic counts the number of packets transmitted that were addressed to a
+multicast group.
+
+"tx_broadcast" is equivalent to ifOutBroadcastPkts defined in RFC 2863. This
+statistic counts the number of packets transmitted that were addressed to a
+broadcast address.
+
+"ether_drops" is equivalent to etherStatsDropEvents defined in RFC 2819.
+This statistic counts the number of packets dropped due to lack of internal
+controller resources.
+
+"rx_total_bytes" is equivalent to etherStatsOctets defined in RFC 2819.
+This statistic counts the total number of bytes received by the controller,
+including error and discarded packets.
+
+"rx_total_packets" is equivalent to etherStatsPkts defined in RFC 2819.
+This statistic counts the total number of packets received by the controller,
+including error, discarded, unicast, multicast, and broadcast packets.
+
+"rx_undersize" is equivalent to etherStatsUndersizePkts defined in RFC 2819.
+This statistic counts the number of correctly formed packets received less
+than 64 bytes long.
+
+"rx_oversize" is equivalent to etherStatsOversizePkts defined in RFC 2819.
+This statistic counts the number of correctly formed packets greater than 1518
+bytes long.
+
+"rx_64_bytes" is equivalent to etherStatsPkts64Octets defined in RFC 2819.
+This statistic counts the total number of packets received that were 64 octets
+in length.
+
+"rx_65_127_bytes" is equivalent to etherStatsPkts65to127Octets defined in RFC
+2819. This statistic counts the total number of packets received that were
+between 65 and 127 octets in length inclusive.
+
+"rx_128_255_bytes" is equivalent to etherStatsPkts128to255Octets defined in
+RFC 2819. This statistic is the total number of packets received that were
+between 128 and 255 octets in length inclusive.
+
+"rx_256_511_bytes" is equivalent to etherStatsPkts256to511Octets defined in
+RFC 2819. This statistic is the total number of packets received that were
+between 256 and 511 octets in length inclusive.
+
+"rx_512_1023_bytes" is equivalent to etherStatsPkts512to1023Octets defined in
+RFC 2819. This statistic is the total number of packets received that were
+between 512 and 1023 octets in length inclusive.
+
+"rx_1024_1518_bytes" is equivalent to etherStatsPkts1024to1518Octets define
+in RFC 2819. This statistic is the total number of packets received that were
+between 1024 and 1518 octets in length inclusive.
+
+"rx_gte_1519_bytes" is a statistic defined specific to the behavior of the
+Altera TSE. This statistics counts the number of received good and errored
+frames between the length of 1519 and the maximum frame length configured
+in the frm_length register. See the Altera TSE User Guide for More details.
+
+"rx_jabbers" is equivalent to etherStatsJabbers defined in RFC 2819. This
+statistic is the total number of packets received that were longer than 1518
+octets, and had either a bad CRC with an integral number of octets (CRC Error)
+or a bad CRC with a non-integral number of octets (Alignment Error).
+
+"rx_runts" is equivalent to etherStatsFragments defined in RFC 2819. This
+statistic is the total number of packets received that were less than 64 octets
+in length and had either a bad CRC with an integral number of octets (CRC
+error) or a bad CRC with a non-integral number of octets (Alignment Error).
index 5cdb22971d19753607739a66108bd85fdf4a492c..a383c00392d03f2e316f7fe7dd954c7d8b71f274 100644 (file)
@@ -270,16 +270,15 @@ arp_ip_target
 arp_validate
 
        Specifies whether or not ARP probes and replies should be
-       validated in the active-backup mode.  This causes the ARP
-       monitor to examine the incoming ARP requests and replies, and
-       only consider a slave to be up if it is receiving the
-       appropriate ARP traffic.
+       validated in any mode that supports arp monitoring, or whether
+       non-ARP traffic should be filtered (disregarded) for link
+       monitoring purposes.
 
        Possible values are:
 
        none or 0
 
-               No validation is performed.  This is the default.
+               No validation or filtering is performed.
 
        active or 1
 
@@ -293,31 +292,68 @@ arp_validate
 
                Validation is performed for all slaves.
 
-       For the active slave, the validation checks ARP replies to
-       confirm that they were generated by an arp_ip_target.  Since
-       backup slaves do not typically receive these replies, the
-       validation performed for backup slaves is on the ARP request
-       sent out via the active slave.  It is possible that some
-       switch or network configurations may result in situations
-       wherein the backup slaves do not receive the ARP requests; in
-       such a situation, validation of backup slaves must be
-       disabled.
-
-       The validation of ARP requests on backup slaves is mainly
-       helping bonding to decide which slaves are more likely to
-       work in case of the active slave failure, it doesn't really
-       guarantee that the backup slave will work if it's selected
-       as the next active slave.
-
-       This option is useful in network configurations in which
-       multiple bonding hosts are concurrently issuing ARPs to one or
-       more targets beyond a common switch.  Should the link between
-       the switch and target fail (but not the switch itself), the
-       probe traffic generated by the multiple bonding instances will
-       fool the standard ARP monitor into considering the links as
-       still up.  Use of the arp_validate option can resolve this, as
-       the ARP monitor will only consider ARP requests and replies
-       associated with its own instance of bonding.
+       filter or 4
+
+               Filtering is applied to all slaves. No validation is
+               performed.
+
+       filter_active or 5
+
+               Filtering is applied to all slaves, validation is performed
+               only for the active slave.
+
+       filter_backup or 6
+
+               Filtering is applied to all slaves, validation is performed
+               only for backup slaves.
+
+       Validation:
+
+       Enabling validation causes the ARP monitor to examine the incoming
+       ARP requests and replies, and only consider a slave to be up if it
+       is receiving the appropriate ARP traffic.
+
+       For an active slave, the validation checks ARP replies to confirm
+       that they were generated by an arp_ip_target.  Since backup slaves
+       do not typically receive these replies, the validation performed
+       for backup slaves is on the broadcast ARP request sent out via the
+       active slave.  It is possible that some switch or network
+       configurations may result in situations wherein the backup slaves
+       do not receive the ARP requests; in such a situation, validation
+       of backup slaves must be disabled.
+
+       The validation of ARP requests on backup slaves is mainly helping
+       bonding to decide which slaves are more likely to work in case of
+       the active slave failure, it doesn't really guarantee that the
+       backup slave will work if it's selected as the next active slave.
+
+       Validation is useful in network configurations in which multiple
+       bonding hosts are concurrently issuing ARPs to one or more targets
+       beyond a common switch.  Should the link between the switch and
+       target fail (but not the switch itself), the probe traffic
+       generated by the multiple bonding instances will fool the standard
+       ARP monitor into considering the links as still up.  Use of
+       validation can resolve this, as the ARP monitor will only consider
+       ARP requests and replies associated with its own instance of
+       bonding.
+
+       Filtering:
+
+       Enabling filtering causes the ARP monitor to only use incoming ARP
+       packets for link availability purposes.  Arriving packets that are
+       not ARPs are delivered normally, but do not count when determining
+       if a slave is available.
+
+       Filtering operates by only considering the reception of ARP
+       packets (any ARP packet, regardless of source or destination) when
+       determining if a slave has received traffic for link availability
+       purposes.
+
+       Filtering is useful in network configurations in which significant
+       levels of third party broadcast traffic would fool the standard
+       ARP monitor into considering the links as still up.  Use of
+       filtering can resolve this, as only ARP traffic is considered for
+       link availability purposes.
 
        This option was added in bonding version 3.1.0.
 
index a06b48d2f5cc68c9bd6c37ac0bdb9ce54a69f5b2..81f940f4e88480d48c35fd7707d679d646ef0af8 100644 (file)
@@ -546,6 +546,130 @@ ffffffffa0069c8f + <x>:
 For BPF JIT developers, bpf_jit_disasm, bpf_asm and bpf_dbg provides a useful
 toolchain for developing and testing the kernel's JIT compiler.
 
+BPF kernel internals
+--------------------
+Internally, for the kernel interpreter, a different BPF instruction set
+format with similar underlying principles from BPF described in previous
+paragraphs is being used. However, the instruction set format is modelled
+closer to the underlying architecture to mimic native instruction sets, so
+that a better performance can be achieved (more details later).
+
+It is designed to be JITed with one to one mapping, which can also open up
+the possibility for GCC/LLVM compilers to generate optimized BPF code through
+a BPF backend that performs almost as fast as natively compiled code.
+
+The new instruction set was originally designed with the possible goal in
+mind to write programs in "restricted C" and compile into BPF with a optional
+GCC/LLVM backend, so that it can just-in-time map to modern 64-bit CPUs with
+minimal performance overhead over two steps, that is, C -> BPF -> native code.
+
+Currently, the new format is being used for running user BPF programs, which
+includes seccomp BPF, classic socket filters, cls_bpf traffic classifier,
+team driver's classifier for its load-balancing mode, netfilter's xt_bpf
+extension, PTP dissector/classifier, and much more. They are all internally
+converted by the kernel into the new instruction set representation and run
+in the extended interpreter. For in-kernel handlers, this all works
+transparently by using sk_unattached_filter_create() for setting up the
+filter, resp. sk_unattached_filter_destroy() for destroying it. The macro
+SK_RUN_FILTER(filter, ctx) transparently invokes the right BPF function to
+run the filter. 'filter' is a pointer to struct sk_filter that we got from
+sk_unattached_filter_create(), and 'ctx' the given context (e.g. skb pointer).
+All constraints and restrictions from sk_chk_filter() apply before a
+conversion to the new layout is being done behind the scenes!
+
+Currently, for JITing, the user BPF format is being used and current BPF JIT
+compilers reused whenever possible. In other words, we do not (yet!) perform
+a JIT compilation in the new layout, however, future work will successively
+migrate traditional JIT compilers into the new instruction format as well, so
+that they will profit from the very same benefits. Thus, when speaking about
+JIT in the following, a JIT compiler (TBD) for the new instruction format is
+meant in this context.
+
+Some core changes of the new internal format:
+
+- Number of registers increase from 2 to 10:
+
+  The old format had two registers A and X, and a hidden frame pointer. The
+  new layout extends this to be 10 internal registers and a read-only frame
+  pointer. Since 64-bit CPUs are passing arguments to functions via registers
+  the number of args from BPF program to in-kernel function is restricted
+  to 5 and one register is used to accept return value from an in-kernel
+  function. Natively, x86_64 passes first 6 arguments in registers, aarch64/
+  sparcv9/mips64 have 7 - 8 registers for arguments; x86_64 has 6 callee saved
+  registers, and aarch64/sparcv9/mips64 have 11 or more callee saved registers.
+
+  Therefore, BPF calling convention is defined as:
+
+    * R0       - return value from in-kernel function
+    * R1 - R5  - arguments from BPF program to in-kernel function
+    * R6 - R9  - callee saved registers that in-kernel function will preserve
+    * R10      - read-only frame pointer to access stack
+
+  Thus, all BPF registers map one to one to HW registers on x86_64, aarch64,
+  etc, and BPF calling convention maps directly to ABIs used by the kernel on
+  64-bit architectures.
+
+  On 32-bit architectures JIT may map programs that use only 32-bit arithmetic
+  and may let more complex programs to be interpreted.
+
+  R0 - R5 are scratch registers and BPF program needs spill/fill them if
+  necessary across calls. Note that there is only one BPF program (== one BPF
+  main routine) and it cannot call other BPF functions, it can only call
+  predefined in-kernel functions, though.
+
+- Register width increases from 32-bit to 64-bit:
+
+  Still, the semantics of the original 32-bit ALU operations are preserved
+  via 32-bit subregisters. All BPF registers are 64-bit with 32-bit lower
+  subregisters that zero-extend into 64-bit if they are being written to.
+  That behavior maps directly to x86_64 and arm64 subregister definition, but
+  makes other JITs more difficult.
+
+  32-bit architectures run 64-bit internal BPF programs via interpreter.
+  Their JITs may convert BPF programs that only use 32-bit subregisters into
+  native instruction set and let the rest being interpreted.
+
+  Operation is 64-bit, because on 64-bit architectures, pointers are also
+  64-bit wide, and we want to pass 64-bit values in/out of kernel functions,
+  so 32-bit BPF registers would otherwise require to define register-pair
+  ABI, thus, there won't be able to use a direct BPF register to HW register
+  mapping and JIT would need to do combine/split/move operations for every
+  register in and out of the function, which is complex, bug prone and slow.
+  Another reason is the use of atomic 64-bit counters.
+
+- Conditional jt/jf targets replaced with jt/fall-through:
+
+  While the original design has constructs such as "if (cond) jump_true;
+  else jump_false;", they are being replaced into alternative constructs like
+  "if (cond) jump_true; /* else fall-through */".
+
+- Introduces bpf_call insn and register passing convention for zero overhead
+  calls from/to other kernel functions:
+
+  After a kernel function call, R1 - R5 are reset to unreadable and R0 has a
+  return type of the function. Since R6 - R9 are callee saved, their state is
+  preserved across the call.
+
+Also in the new design, BPF is limited to 4096 insns, which means that any
+program will terminate quickly and will only call a fixed number of kernel
+functions. Original BPF and the new format are two operand instructions,
+which helps to do one-to-one mapping between BPF insn and x86 insn during JIT.
+
+The input context pointer for invoking the interpreter function is generic,
+its content is defined by a specific use case. For seccomp register R1 points
+to seccomp_data, for converted BPF filters R1 points to a skb.
+
+A program, that is translated internally consists of the following elements:
+
+  op:16, jt:8, jf:8, k:32    ==>    op:8, a_reg:4, x_reg:4, off:16, imm:32
+
+Just like the original BPF, the new format runs within a controlled environment,
+is deterministic and the kernel can easily prove that. The safety of the program
+can be determined in two steps: first step does depth-first-search to disallow
+loops and other CFG validation; second step starts from the first insn and
+descends all possible paths. It simulates execution of every insn and observes
+the state change of registers and stack.
+
 Misc
 ----
 
@@ -561,3 +685,4 @@ the underlying architecture.
 
 Jay Schulist <jschlst@samba.org>
 Daniel Borkmann <dborkman@redhat.com>
+Alexei Starovoitov <ast@plumgrid.com>
index ad474ea07d07f1d03674c832cb7436ea1ec6059c..ba1daea7f2e4c67490478c0eae850bd35a08796c 100644 (file)
@@ -1,38 +1,8 @@
 The Gianfar Ethernet Driver
-Sysfs File description
 
 Author: Andy Fleming <afleming@freescale.com>
 Updated: 2005-07-28
 
-SYSFS
-
-Several of the features of the gianfar driver are controlled
-through sysfs files.  These are:
-
-bd_stash:
-To stash RX Buffer Descriptors in the L2, echo 'on' or '1' to
-bd_stash, echo 'off' or '0' to disable
-
-rx_stash_len:
-To stash the first n bytes of the packet in L2, echo the number
-of bytes to buf_stash_len.  echo 0 to disable.
-
-WARNING: You could really screw these up if you set them too low or high!
-fifo_threshold:
-To change the number of bytes the controller needs in the
-fifo before it starts transmission, echo the number of bytes to 
-fifo_thresh.  Range should be 0-511.
-
-fifo_starve:
-When the FIFO has less than this many bytes during a transmit, it
-enters starve mode, and increases the priority of TX memory
-transactions.  To change, echo the number of bytes to
-fifo_starve.  Range should be 0-511.
-
-fifo_starve_off:
-Once in starve mode, the FIFO remains there until it has this
-many bytes.  To change, echo the number of bytes to
-fifo_starve_off.  Range should be 0-511.
 
 CHECKSUM OFFLOADING
 
index 4ebbd659256fbfe3cdf450bfed814066ae2df575..43d3549366a09d6cfbb7e4b284bb01d04fd314a0 100644 (file)
@@ -36,54 +36,6 @@ Default Value: 0
 This parameter adds support for SR-IOV.  It causes the driver to spawn up to
 max_vfs worth of virtual function.
 
-QueuePairs
-----------
-Valid Range:  0-1
-Default Value:  1 (TX and RX will be paired onto one interrupt vector)
-
-If set to 0, when MSI-X is enabled, the TX and RX will attempt to occupy
-separate vectors.
-
-This option can be overridden to 1 if there are not sufficient interrupts
-available.  This can occur if any combination of RSS, VMDQ, and max_vfs
-results in more than 4 queues being used.
-
-Node
-----
-Valid Range:   0-n
-Default Value: -1 (off)
-
-  0 - n: where n is the number of the NUMA node that should be used to
-         allocate memory for this adapter port.
-  -1: uses the driver default of allocating memory on whichever processor is
-      running insmod/modprobe.
-
-  The Node parameter will allow you to pick which NUMA node you want to have
-  the adapter allocate memory from.  All driver structures, in-memory queues,
-  and receive buffers will be allocated on the node specified.  This parameter
-  is only useful when interrupt affinity is specified, otherwise some portion
-  of the time the interrupt could run on a different core than the memory is
-  allocated on, causing slower memory access and impacting throughput, CPU, or
-  both.
-
-EEE
----
-Valid Range:  0-1
-Default Value: 1 (enabled)
-
-  A link between two EEE-compliant devices will result in periodic bursts of
-  data followed by long periods where in the link is in an idle state. This Low
-  Power Idle (LPI) state is supported in both 1Gbps and 100Mbps link speeds.
-  NOTE: EEE support requires autonegotiation.
-
-DMAC
-----
-Valid Range: 0-1
-Default Value: 1 (enabled)
-  Enables or disables DMA Coalescing feature.
-
-
-
 Additional Configurations
 =========================
 
index ebf2707194029b5f7a44a5f5b4c1c8166e6ac816..3544c98401fd121a1f217160575176960a078760 100644 (file)
@@ -48,7 +48,7 @@ The MDIO bus
    time, so it is safe for them to block, waiting for an interrupt to signal
    the operation is complete
  
- 2) A reset function is necessary.  This is used to return the bus to an
+ 2) A reset function is optional.  This is used to return the bus to an
    initialized state.
 
  3) A probe function is needed.  This function should set up anything the bus
@@ -253,16 +253,25 @@ Writing a PHY driver
 
  Each driver consists of a number of function pointers:
 
+   soft_reset: perform a PHY software reset
    config_init: configures PHY into a sane state after a reset.
      For instance, a Davicom PHY requires descrambling disabled.
    probe: Allocate phy->priv, optionally refuse to bind.
    PHY may not have been reset or had fixups run yet.
    suspend/resume: power management
    config_aneg: Changes the speed/duplex/negotiation settings
+   aneg_done: Determines the auto-negotiation result
    read_status: Reads the current speed/duplex/negotiation settings
    ack_interrupt: Clear a pending interrupt
+   did_interrupt: Checks if the PHY generated an interrupt
    config_intr: Enable or disable interrupts
    remove: Does any driver take-down
+   ts_info: Queries about the HW timestamping status
+   hwtstamp: Set the PHY HW timestamping configuration
+   rxtstamp: Requests a receive timestamp at the PHY level for a 'skb'
+   txtsamp: Requests a transmit timestamp at the PHY level for a 'skb'
+   set_wol: Enable Wake-on-LAN at the PHY level
+   get_wol: Get the Wake-on-LAN status at the PHY level
 
  Of these, only config_aneg and read_status are required to be
  assigned by the driver code.  The rest are optional.  Also, it is
index 5a61a240a6523d64641c0807808a78f8b2eb2e6f..0e30c7845b2b316cb4bb7cdf32807aaf18a55610 100644 (file)
@@ -102,13 +102,18 @@ Examples:
                          The 'minimum' MAC is what you set with dstmac.
 
  pgset "flag [name]"     Set a flag to determine behaviour.  Current flags
-                         are: IPSRC_RND #IP Source is random (between min/max),
-                              IPDST_RND, UDPSRC_RND,
-                              UDPDST_RND, MACSRC_RND, MACDST_RND 
+                         are: IPSRC_RND # IP source is random (between min/max)
+                              IPDST_RND # IP destination is random
+                              UDPSRC_RND, UDPDST_RND,
+                              MACSRC_RND, MACDST_RND
+                              TXSIZE_RND, IPV6,
                               MPLS_RND, VID_RND, SVID_RND
+                              FLOW_SEQ,
                               QUEUE_MAP_RND # queue map random
                               QUEUE_MAP_CPU # queue map mirrors smp_processor_id()
-                              IPSEC # Make IPsec encapsulation for packet
+                              UDPCSUM,
+                              IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
+                              NODE_ALLOC # node specific memory allocation
 
  pgset spi SPI_VALUE     Set specific SA used to transform packet.
 
@@ -233,13 +238,22 @@ udp_dst_max
 
 flag
   IPSRC_RND
-  TXSIZE_RND
   IPDST_RND
   UDPSRC_RND
   UDPDST_RND
   MACSRC_RND
   MACDST_RND
+  TXSIZE_RND
+  IPV6
+  MPLS_RND
+  VID_RND
+  SVID_RND
+  FLOW_SEQ
+  QUEUE_MAP_RND
+  QUEUE_MAP_CPU
+  UDPCSUM
   IPSEC
+  NODE_ALLOC
 
 dst_min
 dst_max
index b89bc82eed4656430ab2d4d4a6454d08ac44624f..16a924c486bf3adb856efc2a2bec72b90311f2ff 100644 (file)
@@ -27,6 +27,8 @@ Contents of this document:
 
  (*) AF_RXRPC kernel interface.
 
+ (*) Configurable parameters.
+
 
 ========
 OVERVIEW
@@ -864,3 +866,82 @@ The kernel interface functions are as follows:
 
      This is used to allocate a null RxRPC key that can be used to indicate
      anonymous security for a particular domain.
+
+
+=======================
+CONFIGURABLE PARAMETERS
+=======================
+
+The RxRPC protocol driver has a number of configurable parameters that can be
+adjusted through sysctls in /proc/net/rxrpc/:
+
+ (*) req_ack_delay
+
+     The amount of time in milliseconds after receiving a packet with the
+     request-ack flag set before we honour the flag and actually send the
+     requested ack.
+
+     Usually the other side won't stop sending packets until the advertised
+     reception window is full (to a maximum of 255 packets), so delaying the
+     ACK permits several packets to be ACK'd in one go.
+
+ (*) soft_ack_delay
+
+     The amount of time in milliseconds after receiving a new packet before we
+     generate a soft-ACK to tell the sender that it doesn't need to resend.
+
+ (*) idle_ack_delay
+
+     The amount of time in milliseconds after all the packets currently in the
+     received queue have been consumed before we generate a hard-ACK to tell
+     the sender it can free its buffers, assuming no other reason occurs that
+     we would send an ACK.
+
+ (*) resend_timeout
+
+     The amount of time in milliseconds after transmitting a packet before we
+     transmit it again, assuming no ACK is received from the receiver telling
+     us they got it.
+
+ (*) max_call_lifetime
+
+     The maximum amount of time in seconds that a call may be in progress
+     before we preemptively kill it.
+
+ (*) dead_call_expiry
+
+     The amount of time in seconds before we remove a dead call from the call
+     list.  Dead calls are kept around for a little while for the purpose of
+     repeating ACK and ABORT packets.
+
+ (*) connection_expiry
+
+     The amount of time in seconds after a connection was last used before we
+     remove it from the connection list.  Whilst a connection is in existence,
+     it serves as a placeholder for negotiated security; when it is deleted,
+     the security must be renegotiated.
+
+ (*) transport_expiry
+
+     The amount of time in seconds after a transport was last used before we
+     remove it from the transport list.  Whilst a transport is in existence, it
+     serves to anchor the peer data and keeps the connection ID counter.
+
+ (*) rxrpc_rx_window_size
+
+     The size of the receive window in packets.  This is the maximum number of
+     unconsumed received packets we're willing to hold in memory for any
+     particular call.
+
+ (*) rxrpc_rx_mtu
+
+     The maximum packet MTU size that we're willing to receive in bytes.  This
+     indicates to the peer whether we're willing to accept jumbo packets.
+
+ (*) rxrpc_rx_jumbo_max
+
+     The maximum number of packets that we're willing to accept in a jumbo
+     packet.  Non-terminal packets in a jumbo packet must contain a four byte
+     header plus exactly 1412 bytes of data.  The terminal packet must contain
+     a four byte header plus any amount of data.  In any event, a jumbo packet
+     may not exceed rxrpc_rx_mtu in size.
index 7d11bb5dc30a7fbb7f4f13317c588e6ae4f00304..bdc4c0db51e1078fb002907124fe7008ef4c0cd4 100644 (file)
@@ -30,7 +30,7 @@ A congestion control mechanism can be registered through functions in
 tcp_cong.c. The functions used by the congestion control mechanism are
 registered via passing a tcp_congestion_ops struct to
 tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid, min_cwnd must be valid.
+cong_avoid must be valid.
 
 Private data for a congestion control mechanism is stored in tp->ca_priv.
 tcp_ca(tp) returns a pointer to this space.  This is preallocated space - it
index 048c92b487f6a50b552cf12d47abe4212ba66f36..bc35541249032c4d64c8888741eddc64f87e4f2e 100644 (file)
@@ -202,6 +202,9 @@ Time stamps for outgoing packets are to be generated as follows:
   and not free the skb. A driver not supporting hardware time stamping doesn't
   do that. A driver must never touch sk_buff::tstamp! It is used to store
   software generated time stamps by the network subsystem.
+- Driver should call skb_tx_timestamp() as close to passing sk_buff to hardware
+  as possible. skb_tx_timestamp() provides a software time stamp if requested
+  and hardware timestamping is not possible (SKBTX_IN_PROGRESS not set).
 - As soon as the driver has sent the packet and/or obtained a
   hardware time stamp for it, it passes the time stamp back by
   calling skb_hwtstamp_tx() with the original skb, the raw
@@ -212,6 +215,3 @@ Time stamps for outgoing packets are to be generated as follows:
   this would occur at a later time in the processing pipeline than other
   software time stamping and therefore could lead to unexpected deltas
   between time stamps.
-- If the driver did not set the SKBTX_IN_PROGRESS flag (see above), then
-  dev_hard_start_xmit() checks whether software time stamping
-  is wanted as fallback and potentially generates the time stamp.
index 4aba0436da65c309c167028cfa70d7058cbf7fd0..f1ac2dae999e008ca7175b2eee0c482f144b9f12 100644 (file)
@@ -19,6 +19,7 @@
  */
 #include <errno.h>
 #include <fcntl.h>
+#include <inttypes.h>
 #include <math.h>
 #include <signal.h>
 #include <stdio.h>
@@ -120,11 +121,19 @@ static void usage(char *progname)
                " -i val     index for event/trigger\n"
                " -k val     measure the time offset between system and phc clock\n"
                "            for 'val' times (Maximum 25)\n"
+               " -l         list the current pin configuration\n"
+               " -L pin,val configure pin index 'pin' with function 'val'\n"
+               "            the channel index is taken from the '-i' option\n"
+               "            'val' specifies the auxiliary function:\n"
+               "            0 - none\n"
+               "            1 - external time stamp\n"
+               "            2 - periodic output\n"
                " -p val     enable output with a period of 'val' nanoseconds\n"
                " -P val     enable or disable (val=1|0) the system clock PPS\n"
                " -s         set the ptp clock time from the system time\n"
                " -S         set the system time from the ptp clock time\n"
-               " -t val     shift the ptp clock time by 'val' seconds\n",
+               " -t val     shift the ptp clock time by 'val' seconds\n"
+               " -T val     set the ptp clock time to 'val' seconds\n",
                progname);
 }
 
@@ -134,6 +143,7 @@ int main(int argc, char *argv[])
        struct ptp_extts_event event;
        struct ptp_extts_request extts_request;
        struct ptp_perout_request perout_request;
+       struct ptp_pin_desc desc;
        struct timespec ts;
        struct timex tx;
 
@@ -156,12 +166,15 @@ int main(int argc, char *argv[])
        int extts = 0;
        int gettime = 0;
        int index = 0;
+       int list_pins = 0;
        int oneshot = 0;
        int pct_offset = 0;
        int n_samples = 0;
        int periodic = 0;
        int perout = -1;
+       int pin_index = -1, pin_func;
        int pps = -1;
+       int seconds = 0;
        int settime = 0;
 
        int64_t t1, t2, tp;
@@ -169,7 +182,7 @@ int main(int argc, char *argv[])
 
        progname = strrchr(argv[0], '/');
        progname = progname ? 1+progname : argv[0];
-       while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:p:P:sSt:v"))) {
+       while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:lL:p:P:sSt:T:v"))) {
                switch (c) {
                case 'a':
                        oneshot = atoi(optarg);
@@ -199,6 +212,16 @@ int main(int argc, char *argv[])
                        pct_offset = 1;
                        n_samples = atoi(optarg);
                        break;
+               case 'l':
+                       list_pins = 1;
+                       break;
+               case 'L':
+                       cnt = sscanf(optarg, "%d,%d", &pin_index, &pin_func);
+                       if (cnt != 2) {
+                               usage(progname);
+                               return -1;
+                       }
+                       break;
                case 'p':
                        perout = atoi(optarg);
                        break;
@@ -214,6 +237,10 @@ int main(int argc, char *argv[])
                case 't':
                        adjtime = atoi(optarg);
                        break;
+               case 'T':
+                       settime = 3;
+                       seconds = atoi(optarg);
+                       break;
                case 'h':
                        usage(progname);
                        return 0;
@@ -245,12 +272,14 @@ int main(int argc, char *argv[])
                               "  %d programmable alarms\n"
                               "  %d external time stamp channels\n"
                               "  %d programmable periodic signals\n"
-                              "  %d pulse per second\n",
+                              "  %d pulse per second\n"
+                              "  %d programmable pins\n",
                               caps.max_adj,
                               caps.n_alarm,
                               caps.n_ext_ts,
                               caps.n_per_out,
-                              caps.pps);
+                              caps.pps,
+                              caps.n_pins);
                }
        }
 
@@ -304,6 +333,16 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (settime == 3) {
+               ts.tv_sec = seconds;
+               ts.tv_nsec = 0;
+               if (clock_settime(clkid, &ts)) {
+                       perror("clock_settime");
+               } else {
+                       puts("set time okay");
+               }
+       }
+
        if (extts) {
                memset(&extts_request, 0, sizeof(extts_request));
                extts_request.index = index;
@@ -331,6 +370,24 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (list_pins) {
+               int n_pins = 0;
+               if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
+                       perror("PTP_CLOCK_GETCAPS");
+               } else {
+                       n_pins = caps.n_pins;
+               }
+               for (i = 0; i < n_pins; i++) {
+                       desc.index = i;
+                       if (ioctl(fd, PTP_PIN_GETFUNC, &desc)) {
+                               perror("PTP_PIN_GETFUNC");
+                               break;
+                       }
+                       printf("name %s index %u func %u chan %u\n",
+                              desc.name, desc.index, desc.func, desc.chan);
+               }
+       }
+
        if (oneshot) {
                install_handler(SIGALRM, handle_alarm);
                /* Create a timer. */
@@ -392,6 +449,18 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (pin_index >= 0) {
+               memset(&desc, 0, sizeof(desc));
+               desc.index = pin_index;
+               desc.func = pin_func;
+               desc.chan = index;
+               if (ioctl(fd, PTP_PIN_SETFUNC, &desc)) {
+                       perror("PTP_PIN_SETFUNC");
+               } else {
+                       puts("set pin function okay");
+               }
+       }
+
        if (pps != -1) {
                int enable = pps ? 1 : 0;
                if (ioctl(fd, PTP_ENABLE_PPS, enable)) {
@@ -428,14 +497,14 @@ int main(int argc, char *argv[])
                        interval = t2 - t1;
                        offset = (t2 + t1) / 2 - tp;
 
-                       printf("system time: %ld.%ld\n",
+                       printf("system time: %" PRId64 ".%u\n",
                                (pct+2*i)->sec, (pct+2*i)->nsec);
-                       printf("phc    time: %ld.%ld\n",
+                       printf("phc    time: %" PRId64 ".%u\n",
                                (pct+2*i+1)->sec, (pct+2*i+1)->nsec);
-                       printf("system time: %ld.%ld\n",
+                       printf("system time: %" PRId64 ".%u\n",
                                (pct+2*i+2)->sec, (pct+2*i+2)->nsec);
-                       printf("system/phc clock time offset is %ld ns\n"
-                               "system     clock time delay  is %ld ns\n",
+                       printf("system/phc clock time offset is %" PRId64 " ns\n"
+                              "system     clock time delay  is %" PRId64 " ns\n",
                                offset, interval);
                }
 
index a80c84e4f488ac7ab89d7fc9414901c18838a89d..162b0fe3b0019aa4a3af487e1356f691f1e96c19 100644 (file)
@@ -536,6 +536,13 @@ S: Odd Fixes
 L:     linux-alpha@vger.kernel.org
 F:     arch/alpha/
 
+ALTERA TRIPLE SPEED ETHERNET DRIVER
+M:     Vince Bridgers <vbridgers2013@gmail.com
+L:     netdev@vger.kernel.org
+L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
+S:     Maintained
+F:     drivers/net/ethernet/altera/
+
 ALTERA UART/JTAG UART SERIAL DRIVERS
 M:     Tobias Klauser <tklauser@distanz.ch>
 L:     linux-serial@vger.kernel.org
@@ -1847,6 +1854,12 @@ L:       netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/b44.*
 
+BROADCOM GENET ETHERNET DRIVER
+M:     Florian Fainelli <f.fainelli@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/broadcom/genet/
+
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 M:     Michael Chan <mchan@broadcom.com>
 L:     netdev@vger.kernel.org
@@ -6030,6 +6043,7 @@ L:        netdev@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
 S:     Maintained
+F:     net/core/flow.c
 F:     net/xfrm/
 F:     net/key/
 F:     net/ipv4/xfrm*
@@ -6105,6 +6119,7 @@ F:        include/net/nfc/
 F:     include/uapi/linux/nfc.h
 F:     drivers/nfc/
 F:     include/linux/platform_data/pn544.h
+F:     Documentation/devicetree/bindings/net/nfc/
 
 NFS, SUNRPC, AND LOCKD CLIENTS
 M:     Trond Myklebust <trond.myklebust@primarydata.com>
@@ -7064,13 +7079,8 @@ F:       Documentation/networking/LICENSE.qla3xxx
 F:     drivers/net/ethernet/qlogic/qla3xxx.*
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Himanshu Madhani <himanshu.madhani@qlogic.com>
-M:     Rajesh Borundia <rajesh.borundia@qlogic.com>
 M:     Shahed Shaikh <shahed.shaikh@qlogic.com>
-M:     Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
-M:     Sony Chacko <sony.chacko@qlogic.com>
-M:     Sucheta Chakraborty <sucheta.chakraborty@qlogic.com>
-M:     linux-driver@qlogic.com
+M:     Dept-HSGLinuxNICDev@qlogic.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
@@ -7541,6 +7551,15 @@ S:       Supported
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 F:     drivers/clk/samsung/
 
+SAMSUNG SXGBE DRIVERS
+M:     Byungho An <bh74.an@samsung.com>
+M:     Girish K S <ks.giri@samsung.com>
+M:     Siva Reddy Kallam <siva.kallam@samsung.com>
+M:     Vipul Pandya <vipul.pandya@samsung.com>
+S:     Supported
+L:     netdev@vger.kernel.org
+F:     drivers/net/ethernet/samsung/sxgbe/
+
 SERIAL DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
index de1bc6bbe5850befb37de62465c95ba52b5a2b10..cf18340eb3bbd5ecc4655cf8c12eb54de1279f2b 100644 (file)
@@ -536,11 +536,13 @@ static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
 
 static void __init pandora_wl1251_init(void)
 {
-       struct wl12xx_platform_data pandora_wl1251_pdata;
+       struct wl1251_platform_data pandora_wl1251_pdata;
        int ret;
 
        memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
 
+       pandora_wl1251_pdata.power_gpio = -1;
+
        ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
        if (ret < 0)
                goto fail;
@@ -550,7 +552,7 @@ static void __init pandora_wl1251_init(void)
                goto fail_irq;
 
        pandora_wl1251_pdata.use_eeprom = true;
-       ret = wl12xx_set_platform_data(&pandora_wl1251_pdata);
+       ret = wl1251_set_platform_data(&pandora_wl1251_pdata);
        if (ret < 0)
                goto fail_irq;
 
index 8760bbe3baab9234da17c5a55d3126de189a5778..ddfc8df83c6a9adf2c0d128c688d16e094708ac2 100644 (file)
@@ -84,7 +84,7 @@ enum {
        RX51_SPI_MIPID,         /* LCD panel */
 };
 
-static struct wl12xx_platform_data wl1251_pdata;
+static struct wl1251_platform_data wl1251_pdata;
 static struct tsc2005_platform_data tsc2005_pdata;
 
 #if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
@@ -1173,13 +1173,7 @@ static inline void board_smc91x_init(void)
 
 #endif
 
-static void rx51_wl1251_set_power(bool enable)
-{
-       gpio_set_value(RX51_WL1251_POWER_GPIO, enable);
-}
-
 static struct gpio rx51_wl1251_gpios[] __initdata = {
-       { RX51_WL1251_POWER_GPIO, GPIOF_OUT_INIT_LOW,   "wl1251 power"  },
        { RX51_WL1251_IRQ_GPIO,   GPIOF_IN,             "wl1251 irq"    },
 };
 
@@ -1196,17 +1190,16 @@ static void __init rx51_init_wl1251(void)
        if (irq < 0)
                goto err_irq;
 
-       wl1251_pdata.set_power = rx51_wl1251_set_power;
+       wl1251_pdata.power_gpio = RX51_WL1251_POWER_GPIO;
        rx51_peripherals_spi_board_info[RX51_SPI_WL1251].irq = irq;
 
        return;
 
 err_irq:
        gpio_free(RX51_WL1251_IRQ_GPIO);
-       gpio_free(RX51_WL1251_POWER_GPIO);
 error:
        printk(KERN_ERR "wl1251 board initialisation failed\n");
-       wl1251_pdata.set_power = NULL;
+       wl1251_pdata.power_gpio = -1;
 
        /*
         * Now rx51_peripherals_spi_board_info[1].irq is zero and
index 271b5e9715682ab40869a1ea8a02c9696eaebf07..6f879c319a9dbe9fc406946565da49a0e99f5eb8 100644 (file)
@@ -825,8 +825,8 @@ b_epilogue:
                        break;
                case BPF_S_ANC_RXHASH:
                        ctx->seen |= SEEN_SKB;
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
-                       off = offsetof(struct sk_buff, rxhash);
+                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+                       off = offsetof(struct sk_buff, hash);
                        emit(ARM_LDR_I(r_A, r_skb, off), ctx);
                        break;
                case BPF_S_ANC_VLAN_TAG:
@@ -925,6 +925,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
 
        fp->bpf_func = (void *)ctx.target;
+       fp->jited = 1;
 out:
        kfree(ctx.offsets);
        return;
@@ -932,7 +933,7 @@ out:
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter)
+       if (fp->jited)
                module_free(NULL, fp->bpf_func);
        kfree(fp);
 }
index 555034f8505e8d1d83bfeef3480a9bc058499754..808ce1cae21ab998439854e31eacaced66c044cb 100644 (file)
@@ -390,9 +390,9 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
                                                          mark));
                        break;
                case BPF_S_ANC_RXHASH:
-                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
+                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
                        PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
-                                                         rxhash));
+                                                         hash));
                        break;
                case BPF_S_ANC_VLAN_TAG:
                case BPF_S_ANC_VLAN_TAG_PRESENT:
@@ -689,6 +689,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                ((u64 *)image)[0] = (u64)code_base;
                ((u64 *)image)[1] = local_paca->kernel_toc;
                fp->bpf_func = (void *)image;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
@@ -697,7 +698,7 @@ out:
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter)
+       if (fp->jited)
                module_free(NULL, fp->bpf_func);
        kfree(fp);
 }
index 708d60e4006676486b3892195b165ede007d2544..9c36dc398f9070afb4d6151d214623d1cbcf9f4a 100644 (file)
@@ -737,10 +737,10 @@ call_fn:  /* lg %r1,<d(function)>(%r13) */
                /* icm  %r5,3,<d(type)>(%r1) */
                EMIT4_DISP(0xbf531000, offsetof(struct net_device, type));
                break;
-       case BPF_S_ANC_RXHASH: /* A = skb->rxhash */
-               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
-               /* l %r5,<d(rxhash)>(%r2) */
-               EMIT4_DISP(0x58502000, offsetof(struct sk_buff, rxhash));
+       case BPF_S_ANC_RXHASH: /* A = skb->hash */
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+               /* l %r5,<d(hash)>(%r2) */
+               EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash));
                break;
        case BPF_S_ANC_VLAN_TAG:
        case BPF_S_ANC_VLAN_TAG_PRESENT:
@@ -877,6 +877,7 @@ void bpf_jit_compile(struct sk_filter *fp)
        if (jit.start) {
                set_memory_ro((unsigned long)header, header->pages);
                fp->bpf_func = (void *) jit.start;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
@@ -887,10 +888,12 @@ void bpf_jit_free(struct sk_filter *fp)
        unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
        struct bpf_binary_header *header = (void *)addr;
 
-       if (fp->bpf_func == sk_run_filter)
+       if (!fp->jited)
                goto free_filter;
+
        set_memory_rw(addr, header->pages);
        module_free(NULL, header);
+
 free_filter:
        kfree(fp);
 }
index 01fe9946d388de469e140532b3f973f8a6db28fb..a82c6b2a9780cab8d882c2d13429f4889375229e 100644 (file)
@@ -618,7 +618,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                                emit_load16(r_A, struct net_device, type, r_A);
                                break;
                        case BPF_S_ANC_RXHASH:
-                               emit_skb_load32(rxhash, r_A);
+                               emit_skb_load32(hash, r_A);
                                break;
                        case BPF_S_ANC_VLAN_TAG:
                        case BPF_S_ANC_VLAN_TAG_PRESENT:
@@ -809,6 +809,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf];
        if (image) {
                bpf_flush_icache(image, image + proglen);
                fp->bpf_func = (void *)image;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
@@ -817,7 +818,7 @@ out:
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter)
+       if (fp->jited)
                module_free(NULL, fp->bpf_func);
        kfree(fp);
 }
index 39f186252e02521ccc33eb0fad6b450ccfb19d73..7d26d9c0b2fb85cd96ca25953f2bb84d4022be9d 100644 (file)
@@ -240,7 +240,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        spin_unlock_irqrestore(&lp->lock, flags);
 
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index 4ed75dd81d052ff96cc832702d174ad4d63bffec..dc017735bb91b7b2ec61f333b091c63accdb921b 100644 (file)
@@ -553,13 +553,13 @@ void bpf_jit_compile(struct sk_filter *fp)
                                }
                                break;
                        case BPF_S_ANC_RXHASH:
-                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
-                               if (is_imm8(offsetof(struct sk_buff, rxhash))) {
+                               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+                               if (is_imm8(offsetof(struct sk_buff, hash))) {
                                        /* mov off8(%rdi),%eax */
-                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
+                                       EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
                                } else {
                                        EMIT2(0x8b, 0x87);
-                                       EMIT(offsetof(struct sk_buff, rxhash), 4);
+                                       EMIT(offsetof(struct sk_buff, hash), 4);
                                }
                                break;
                        case BPF_S_ANC_QUEUE:
@@ -772,6 +772,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                bpf_flush_icache(header, image + proglen);
                set_memory_ro((unsigned long)header, header->pages);
                fp->bpf_func = (void *)image;
+               fp->jited = 1;
        }
 out:
        kfree(addrs);
@@ -791,7 +792,7 @@ static void bpf_jit_free_deferred(struct work_struct *work)
 
 void bpf_jit_free(struct sk_filter *fp)
 {
-       if (fp->bpf_func != sk_run_filter) {
+       if (fp->jited) {
                INIT_WORK(&fp->work, bpf_jit_free_deferred);
                schedule_work(&fp->work);
        } else {
index 86154eab95239fdd6300ddc665f03650b80f1a5b..604f6d99ab92ca859d46b7ff7d8ce4b5519ebb61 100644 (file)
@@ -435,9 +435,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
        uint64_t v;
 
        do {
-               start = u64_stats_fetch_begin_bh(&stat->syncp);
+               start = u64_stats_fetch_begin_irq(&stat->syncp);
                v = stat->cnt;
-       } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
+       } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
 
        return v;
 }
@@ -508,9 +508,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
        struct blkg_rwstat tmp;
 
        do {
-               start = u64_stats_fetch_begin_bh(&rwstat->syncp);
+               start = u64_stats_fetch_begin_irq(&rwstat->syncp);
                tmp = *rwstat;
-       } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
+       } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
 
        return tmp;
 }
index 62a76076b5482a3bacab4a9cf2a5ff2c269efddb..f1a9198dfe5a4966cbe7f433b357005501eb002a 100644 (file)
@@ -1925,7 +1925,7 @@ static int ucode_init(loader_block *lb, amb_dev *dev)
   const struct firmware *fw;
   unsigned long start_address;
   const struct ihex_binrec *rec;
-  const char *errmsg = 0;
+  const char *errmsg = NULL;
   int res;
 
   res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
index b41c9481b67b94bf8331c9401b18123eb47ffaef..82f2ae0d7cc488a337772aaf8d7a19b3373bcd94 100644 (file)
@@ -736,8 +736,8 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
       
                        skb = td->skb;
                        if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
-                               wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
                                FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
+                               wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
                        }
                        td->dev->ntxpckts--;
 
@@ -1123,7 +1123,7 @@ static void fs_close(struct atm_vcc *atm_vcc)
                   this sleep_on, we'll lose any reference to these packets. Memory leak!
                   On the other hand, it's awfully convenient that we can abort a "close" that
                   is taking too long. Maybe just use non-interruptible sleep on? -- REW */
-               interruptible_sleep_on (& vcc->close_wait);
+               wait_event_interruptible(vcc->close_wait, !vcc->last_skb);
        }
 
        txtp = &atm_vcc->qos.txtp;
@@ -2000,7 +2000,7 @@ static void firestream_remove_one(struct pci_dev *pdev)
 
                fs_dprintk (FS_DEBUG_CLEANUP, "Freeing irq%d.\n", dev->irq);
                free_irq (dev->irq, dev);
-               del_timer (&dev->timer);
+               del_timer_sync (&dev->timer);
 
                atm_dev_deregister(dev->atm_dev);
                free_queue (dev, &dev->hp_txq);
index 45d506363aba8f90c117bfa5c31d4ca046eb4630..909c95bd7be260ec970dc42f1299113333793b4e 100644 (file)
@@ -368,9 +368,9 @@ EXPORT_SYMBOL(idt77105_init);
 
 static void __exit idt77105_exit(void)
 {
-        /* turn off timers */
-        del_timer(&stats_timer);
-        del_timer(&restart_timer);
+       /* turn off timers */
+       del_timer_sync(&stats_timer);
+       del_timer_sync(&restart_timer);
 }
 
 module_exit(idt77105_exit);
index 9587e959ce1af0cefca7d814ad161aa1c709c9bb..9988ac98b6d83ee58fa2774f1c4ced4559dcee74 100644 (file)
@@ -639,9 +639,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
        card->hbnr.init = NUM_HB;
        card->hbnr.max = MAX_HB;
 
-       card->sm_handle = 0x00000000;
+       card->sm_handle = NULL;
        card->sm_addr = 0x00000000;
-       card->lg_handle = 0x00000000;
+       card->lg_handle = NULL;
        card->lg_addr = 0x00000000;
 
        card->efbie = 1;        /* To prevent push_rxbufs from enabling the interrupt */
@@ -979,7 +979,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
                                addr2 = card->sm_addr;
                                handle2 = card->sm_handle;
                                card->sm_addr = 0x00000000;
-                               card->sm_handle = 0x00000000;
+                               card->sm_handle = NULL;
                        } else {        /* (!sm_addr) */
 
                                card->sm_addr = addr1;
@@ -993,7 +993,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
                                addr2 = card->lg_addr;
                                handle2 = card->lg_handle;
                                card->lg_addr = 0x00000000;
-                               card->lg_handle = 0x00000000;
+                               card->lg_handle = NULL;
                        } else {        /* (!lg_addr) */
 
                                card->lg_addr = addr1;
@@ -1739,10 +1739,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
                }
 
                scq->full = 1;
-               spin_unlock_irqrestore(&scq->lock, flags);
-               interruptible_sleep_on_timeout(&scq->scqfull_waitq,
-                                              SCQFULL_TIMEOUT);
-               spin_lock_irqsave(&scq->lock, flags);
+               wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+                                                         scq->tail != scq->next,
+                                                         scq->lock,
+                                                         SCQFULL_TIMEOUT);
 
                if (scq->full) {
                        spin_unlock_irqrestore(&scq->lock, flags);
@@ -1789,10 +1789,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
                        scq->full = 1;
                        if (has_run++)
                                break;
-                       spin_unlock_irqrestore(&scq->lock, flags);
-                       interruptible_sleep_on_timeout(&scq->scqfull_waitq,
-                                                      SCQFULL_TIMEOUT);
-                       spin_lock_irqsave(&scq->lock, flags);
+                       wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+                                                                 scq->tail != scq->next,
+                                                                 scq->lock,
+                                                                 SCQFULL_TIMEOUT);
                }
 
                if (!scq->full) {
index e3fb496c71630a643a1dea0b472fab3238fa755e..943cf0d6abaf8b959e2a879ca901fedaea693b1e 100644 (file)
@@ -760,7 +760,7 @@ static irqreturn_t solos_irq(int irq, void *dev_id)
        return IRQ_RETVAL(handled);
 }
 
-void solos_bh(unsigned long card_arg)
+static void solos_bh(unsigned long card_arg)
 {
        struct solos_card *card = (void *)card_arg;
        uint32_t card_flags;
index 25f9887a35d08e89600e8aca86fc8c72062600f6..d7f81ad56b8af731ea10fb863460cebb717dddc0 100644 (file)
@@ -218,7 +218,14 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
 #if IS_BUILTIN(CONFIG_BCMA_HOST_SOC)
        chip->to_irq            = bcma_gpio_to_irq;
 #endif
-       chip->ngpio             = 16;
+       switch (cc->core->bus->chipinfo.id) {
+       case BCMA_CHIP_ID_BCM5357:
+               chip->ngpio     = 32;
+               break;
+       default:
+               chip->ngpio     = 16;
+       }
+
        /* There is just one SoC in one device and its GPIO addresses should be
         * deterministic to address them more easily. The other buses could get
         * a random base number. */
index 106d1d8e16ad4f87ac510f5d02b53dc36e8f23ef..be571fef185da6a597fcdac3d15093ed8e47fb5b 100644 (file)
@@ -62,50 +62,54 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0CF3, 0x3000) },
 
        /* Atheros AR3011 with sflash firmware*/
+       { USB_DEVICE(0x0489, 0xE027) },
+       { USB_DEVICE(0x0489, 0xE03D) },
+       { USB_DEVICE(0x0930, 0x0215) },
        { USB_DEVICE(0x0CF3, 0x3002) },
        { USB_DEVICE(0x0CF3, 0xE019) },
        { USB_DEVICE(0x13d3, 0x3304) },
-       { USB_DEVICE(0x0930, 0x0215) },
-       { USB_DEVICE(0x0489, 0xE03D) },
-       { USB_DEVICE(0x0489, 0xE027) },
 
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03F0, 0x311D) },
 
        /* Atheros AR3012 with sflash firmware*/
-       { USB_DEVICE(0x0CF3, 0x0036) },
-       { USB_DEVICE(0x0CF3, 0x3004) },
-       { USB_DEVICE(0x0CF3, 0x3008) },
-       { USB_DEVICE(0x0CF3, 0x311D) },
-       { USB_DEVICE(0x0CF3, 0x817a) },
-       { USB_DEVICE(0x13d3, 0x3375) },
+       { USB_DEVICE(0x0489, 0xe04d) },
+       { USB_DEVICE(0x0489, 0xe04e) },
+       { USB_DEVICE(0x0489, 0xe057) },
+       { USB_DEVICE(0x0489, 0xe056) },
+       { USB_DEVICE(0x0489, 0xe05f) },
+       { USB_DEVICE(0x04c5, 0x1330) },
        { USB_DEVICE(0x04CA, 0x3004) },
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x04CA, 0x3006) },
        { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x04CA, 0x300b) },
-       { USB_DEVICE(0x13d3, 0x3362) },
-       { USB_DEVICE(0x0CF3, 0xE004) },
-       { USB_DEVICE(0x0CF3, 0xE005) },
        { USB_DEVICE(0x0930, 0x0219) },
        { USB_DEVICE(0x0930, 0x0220) },
-       { USB_DEVICE(0x0489, 0xe057) },
-       { USB_DEVICE(0x13d3, 0x3393) },
-       { USB_DEVICE(0x0489, 0xe04e) },
-       { USB_DEVICE(0x0489, 0xe056) },
-       { USB_DEVICE(0x0489, 0xe04d) },
-       { USB_DEVICE(0x04c5, 0x1330) },
-       { USB_DEVICE(0x13d3, 0x3402) },
+       { USB_DEVICE(0x0b05, 0x17d0) },
+       { USB_DEVICE(0x0CF3, 0x0036) },
+       { USB_DEVICE(0x0CF3, 0x3004) },
+       { USB_DEVICE(0x0CF3, 0x3005) },
+       { USB_DEVICE(0x0CF3, 0x3008) },
+       { USB_DEVICE(0x0CF3, 0x311D) },
+       { USB_DEVICE(0x0CF3, 0x311E) },
+       { USB_DEVICE(0x0CF3, 0x311F) },
        { USB_DEVICE(0x0cf3, 0x3121) },
+       { USB_DEVICE(0x0CF3, 0x817a) },
        { USB_DEVICE(0x0cf3, 0xe003) },
-       { USB_DEVICE(0x0489, 0xe05f) },
+       { USB_DEVICE(0x0CF3, 0xE004) },
+       { USB_DEVICE(0x0CF3, 0xE005) },
+       { USB_DEVICE(0x13d3, 0x3362) },
+       { USB_DEVICE(0x13d3, 0x3375) },
+       { USB_DEVICE(0x13d3, 0x3393) },
+       { USB_DEVICE(0x13d3, 0x3402) },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xE02C) },
 
        /* Atheros AR5BBU22 with sflash firmware */
-       { USB_DEVICE(0x0489, 0xE03C) },
        { USB_DEVICE(0x0489, 0xE036) },
+       { USB_DEVICE(0x0489, 0xE03C) },
 
        { }     /* Terminating entry */
 };
@@ -118,36 +122,40 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
 static const struct usb_device_id ath3k_blist_tbl[] = {
 
        /* Atheros AR3012 with sflash firmware*/
-       { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU22 with sflash firmware */
-       { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
 
        { }     /* Terminating entry */
 };
@@ -174,10 +182,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
        }
 
        memcpy(send_buf, firmware->data, 20);
-       if ((err = usb_control_msg(udev, pipe,
-                               USB_REQ_DFU_DNLOAD,
-                               USB_TYPE_VENDOR, 0, 0,
-                               send_buf, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+       err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR,
+                             0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT);
+       if (err < 0) {
                BT_ERR("Can't change to loading configuration err");
                goto error;
        }
@@ -360,7 +367,7 @@ static int ath3k_load_patch(struct usb_device *udev)
        }
 
        snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu",
-               fw_version.rom_version);
+               le32_to_cpu(fw_version.rom_version));
 
        ret = request_firmware(&firmware, filename, &udev->dev);
        if (ret < 0) {
@@ -422,7 +429,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
        }
 
        snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
-               fw_version.rom_version, clk_value, ".dfu");
+               le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
 
        ret = request_firmware(&firmware, filename, &udev->dev);
        if (ret < 0) {
index 31386998c9a7b4159f04194f2125ee10d73cc920..b2e7e94a67719945e9d9ef277aed9b694d3601d1 100644 (file)
@@ -131,8 +131,11 @@ static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
 
        BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len);
 
-       if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
-               return -ENOMEM;
+       if (!urb) {
+               urb = usb_alloc_urb(0, GFP_ATOMIC);
+               if (!urb)
+                       return -ENOMEM;
+       }
 
        pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);
 
@@ -218,8 +221,11 @@ static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
 
        BT_DBG("bfusb %p urb %p", data, urb);
 
-       if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
-               return -ENOMEM;
+       if (!urb) {
+               urb = usb_alloc_urb(0, GFP_ATOMIC);
+               if (!urb)
+                       return -ENOMEM;
+       }
 
        skb = bt_skb_alloc(size, GFP_ATOMIC);
        if (!skb) {
index 57427de864a657530ec92f1a54fe9e0d5c52938e..dfa5043e68bacc9de66f6ce12c565fb8f410f4b1 100644 (file)
@@ -257,7 +257,8 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
                        ready_bit = XMIT_BUF_ONE_READY;
                }
 
-               if (!(skb = skb_dequeue(&(info->txq))))
+               skb = skb_dequeue(&(info->txq));
+               if (!skb)
                        break;
 
                if (bt_cb(skb)->pkt_type & 0x80) {
@@ -391,7 +392,8 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
                if (info->rx_skb == NULL) {
                        info->rx_state = RECV_WAIT_PACKET_TYPE;
                        info->rx_count = 0;
-                       if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+                       info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+                       if (!info->rx_skb) {
                                BT_ERR("Can't allocate mem for new packet");
                                return;
                        }
@@ -566,7 +568,8 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
        /* Ericsson baud rate command */
        unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
 
-       if (!(skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+       skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+       if (!skb) {
                BT_ERR("Can't allocate mem for new packet");
                return -1;
        }
@@ -898,7 +901,7 @@ static void bluecard_release(struct pcmcia_device *link)
 
        bluecard_close(info);
 
-       del_timer(&(info->timer));
+       del_timer_sync(&(info->timer));
 
        pcmcia_disable_device(link);
 }
index 73d87994d028ad3e64397a61e05a2e8c15f78321..1d82721cf9c67e05653f00f6ae93e20d482c9ff4 100644 (file)
@@ -193,8 +193,8 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
                if (!pcmcia_dev_present(info->p_dev))
                        break;
 
-
-               if (!(skb = skb_dequeue(&(info->txq)))) {
+               skb = skb_dequeue(&(info->txq));
+               if (!skb) {
                        clear_bit(XMIT_SENDING, &(info->tx_state));
                        break;
                }
@@ -238,7 +238,8 @@ static void bt3c_receive(bt3c_info_t *info)
                if (info->rx_skb == NULL) {
                        info->rx_state = RECV_WAIT_PACKET_TYPE;
                        info->rx_count = 0;
-                       if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+                       info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+                       if (!info->rx_skb) {
                                BT_ERR("Can't allocate mem for new packet");
                                return;
                        }
index 1e0320af00c63be9a1fdd808e8c0301dedf7ad0a..2c4997ce248484703a1b859c5e518396fcdbfa64 100644 (file)
@@ -59,12 +59,13 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
                        priv->btmrvl_dev.sendcmdflag = false;
                        priv->adapter->cmd_complete = true;
                        wake_up_interruptible(&priv->adapter->cmd_wait_q);
-               }
 
-               if (hci_opcode_ogf(opcode) == 0x3F) {
-                       BT_DBG("vendor event skipped: opcode=%#4.4x", opcode);
-                       kfree_skb(skb);
-                       return false;
+                       if (hci_opcode_ogf(opcode) == 0x3F) {
+                               BT_DBG("vendor event skipped: opcode=%#4.4x",
+                                      opcode);
+                               kfree_skb(skb);
+                               return false;
+                       }
                }
        }
 
index a03ecc22a561caf4c3d1bdc53489a5b9612745d8..fb948f02eda5b1747a7ca7604133daf88a719ef1 100644 (file)
@@ -149,7 +149,8 @@ static void btuart_write_wakeup(btuart_info_t *info)
                if (!pcmcia_dev_present(info->p_dev))
                        return;
 
-               if (!(skb = skb_dequeue(&(info->txq))))
+               skb = skb_dequeue(&(info->txq));
+               if (!skb)
                        break;
 
                /* Send frame */
@@ -190,7 +191,8 @@ static void btuart_receive(btuart_info_t *info)
                if (info->rx_skb == NULL) {
                        info->rx_state = RECV_WAIT_PACKET_TYPE;
                        info->rx_count = 0;
-                       if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+                       info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+                       if (!info->rx_skb) {
                                BT_ERR("Can't allocate mem for new packet");
                                return;
                        }
index baeaaed299e4e339ee455833c94c9d7aa24421c4..f338b0c5a8de507a153b6761886b943c702176a4 100644 (file)
@@ -101,21 +101,24 @@ static const struct usb_device_id btusb_table[] = {
        { USB_DEVICE(0x0c10, 0x0000) },
 
        /* Broadcom BCM20702A0 */
+       { USB_DEVICE(0x0489, 0xe042) },
+       { USB_DEVICE(0x04ca, 0x2003) },
        { USB_DEVICE(0x0b05, 0x17b5) },
        { USB_DEVICE(0x0b05, 0x17cb) },
-       { USB_DEVICE(0x04ca, 0x2003) },
-       { USB_DEVICE(0x0489, 0xe042) },
        { USB_DEVICE(0x413c, 0x8197) },
 
        /* Foxconn - Hon Hai */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) },
 
-       /*Broadcom devices with vendor specific id */
+       /* Broadcom devices with vendor specific id */
        { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
 
        /* Belkin F8065bf - Broadcom based */
        { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) },
 
+       /* IMC Networks - Broadcom based */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) },
+
        { }     /* Terminating entry */
 };
 
@@ -129,55 +132,59 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
 
        /* Atheros 3011 with sflash firmware */
+       { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+       { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
        { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
-       { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
-       { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
-       { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
 
        /* Atheros AR9285 Malbec with sflash firmware */
        { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
        /* Atheros 3012 with sflash firmware */
-       { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
 
        /* Atheros AR5BBU12 with sflash firmware */
        { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
 
        /* Atheros AR5BBU12 with sflash firmware */
-       { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
        /* Broadcom BCM2035 */
-       { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
-       { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
        { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
+       { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
+       { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
 
        /* Broadcom BCM2045 */
        { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU },
index 52eed1f3565dbf5868c094d246cf7dee4dcc86a4..2bd8fad172064af8f2211536fe8903e363ef33f1 100644 (file)
@@ -153,7 +153,8 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
                if (!pcmcia_dev_present(info->p_dev))
                        return;
 
-               if (!(skb = skb_dequeue(&(info->txq))))
+               skb = skb_dequeue(&(info->txq));
+               if (!skb)
                        break;
 
                /* Send frame */
@@ -215,13 +216,15 @@ static void dtl1_receive(dtl1_info_t *info)
                info->hdev->stat.byte_rx++;
 
                /* Allocate packet */
-               if (info->rx_skb == NULL)
-                       if (!(info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
+               if (info->rx_skb == NULL) {
+                       info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
+                       if (!info->rx_skb) {
                                BT_ERR("Can't allocate mem for new packet");
                                info->rx_state = RECV_WAIT_NSH;
                                info->rx_count = NSHL;
                                return;
                        }
+               }
 
                *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
                nsh = (nsh_t *)info->rx_skb->data;
index 0bc87f7abd958a262028cdd1f0cb7cb0b624c45e..21cc45b34f134fe8028606e2595d381a125e0926 100644 (file)
@@ -291,7 +291,8 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
        /* First of all, check for unreliable messages in the queue,
           since they have priority */
 
-       if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
+       skb = skb_dequeue(&bcsp->unrel);
+       if (skb != NULL) {
                struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
                if (nskb) {
                        kfree_skb(skb);
@@ -308,16 +309,20 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
 
        spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
 
-       if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
-               struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
-               if (nskb) {
-                       __skb_queue_tail(&bcsp->unack, skb);
-                       mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
-                       spin_unlock_irqrestore(&bcsp->unack.lock, flags);
-                       return nskb;
-               } else {
-                       skb_queue_head(&bcsp->rel, skb);
-                       BT_ERR("Could not dequeue pkt because alloc_skb failed");
+       if (bcsp->unack.qlen < BCSP_TXWINSIZE) {
+               skb = skb_dequeue(&bcsp->rel);
+               if (skb != NULL) {
+                       struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
+                                                               bt_cb(skb)->pkt_type);
+                       if (nskb) {
+                               __skb_queue_tail(&bcsp->unack, skb);
+                               mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
+                               spin_unlock_irqrestore(&bcsp->unack.lock, flags);
+                               return nskb;
+                       } else {
+                               skb_queue_head(&bcsp->rel, skb);
+                               BT_ERR("Could not dequeue pkt because alloc_skb failed");
+                       }
                }
        }
 
@@ -715,6 +720,9 @@ static int bcsp_open(struct hci_uart *hu)
 static int bcsp_close(struct hci_uart *hu)
 {
        struct bcsp_struct *bcsp = hu->priv;
+
+       del_timer_sync(&bcsp->tbcsp);
+
        hu->priv = NULL;
 
        BT_DBG("hu %p", hu);
@@ -722,7 +730,6 @@ static int bcsp_close(struct hci_uart *hu)
        skb_queue_purge(&bcsp->unack);
        skb_queue_purge(&bcsp->rel);
        skb_queue_purge(&bcsp->unrel);
-       del_timer(&bcsp->tbcsp);
 
        kfree(bcsp);
        return 0;
index f6f4974505600a2884f56c01be22bd6a79492f04..04680ead9275c20566aeb8268a06e9393fc3107a 100644 (file)
@@ -206,12 +206,12 @@ static int h5_close(struct hci_uart *hu)
 {
        struct h5 *h5 = hu->priv;
 
+       del_timer_sync(&h5->timer);
+
        skb_queue_purge(&h5->unack);
        skb_queue_purge(&h5->rel);
        skb_queue_purge(&h5->unrel);
 
-       del_timer(&h5->timer);
-
        kfree(h5);
 
        return 0;
@@ -673,7 +673,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
                return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
        }
 
-       if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
+       skb = skb_dequeue(&h5->unrel);
+       if (skb != NULL) {
                nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
                                      skb->data, skb->len);
                if (nskb) {
@@ -690,7 +691,8 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
        if (h5->unack.qlen >= h5->tx_win)
                goto unlock;
 
-       if ((skb = skb_dequeue(&h5->rel)) != NULL) {
+       skb = skb_dequeue(&h5->rel);
+       if (skb != NULL) {
                nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
                                      skb->data, skb->len);
                if (nskb) {
index 6e06f6f6915296ad08d7da394175da19250364db..f1fbf4f1e5bec2e5b3104462c76d0556ddcb8f45 100644 (file)
@@ -271,7 +271,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
        if (tty->ops->write == NULL)
                return -EOPNOTSUPP;
 
-       if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) {
+       hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL);
+       if (!hu) {
                BT_ERR("Can't allocate control structure");
                return -ENFILE;
        }
@@ -569,7 +570,8 @@ static int __init hci_uart_init(void)
        hci_uart_ldisc.write_wakeup     = hci_uart_tty_wakeup;
        hci_uart_ldisc.owner            = THIS_MODULE;
 
-       if ((err = tty_register_ldisc(N_HCI, &hci_uart_ldisc))) {
+       err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
+       if (err) {
                BT_ERR("HCI line discipline registration failed. (%d)", err);
                return err;
        }
@@ -614,7 +616,8 @@ static void __exit hci_uart_exit(void)
 #endif
 
        /* Release tty registration of line discipline */
-       if ((err = tty_unregister_ldisc(N_HCI)))
+       err = tty_unregister_ldisc(N_HCI);
+       if (err)
                BT_ERR("Can't unregister HCI line discipline (%d)", err);
 }
 
index 1ef6990a5c7e7c0387b4d9b6aafcb7a5c6f5cdc2..add1c6a720637a4009e35bb2b0a9b4e605fb59b8 100644 (file)
@@ -359,7 +359,7 @@ static const struct file_operations vhci_fops = {
 static struct miscdevice vhci_miscdev= {
        .name   = "vhci",
        .fops   = &vhci_fops,
-       .minor  = MISC_DYNAMIC_MINOR,
+       .minor  = VHCI_MINOR,
 };
 
 static int __init vhci_init(void)
@@ -385,3 +385,4 @@ MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("devname:vhci");
+MODULE_ALIAS_MISCDEV(VHCI_MINOR);
index a36749f1e44a869418e1bcab6481334948618391..9b0ea0a6e26ebd4b5f07360d923d1ad509c41d46 100644 (file)
@@ -139,7 +139,6 @@ static int cn_call_callback(struct sk_buff *skb)
        spin_unlock_bh(&dev->cbdev->queue_lock);
 
        if (cbq != NULL) {
-               err = 0;
                cbq->callback(msg, nsp);
                kfree_skb(skb);
                cn_queue_release_callback(cbq);
index d286bdebe2ab90fc613c7696d41690909247da52..7e98a58aacfd68f6d8af350b2483e225409df55c 100644 (file)
@@ -1647,6 +1647,15 @@ static inline int act_open_has_tid(int status)
               status != CPL_ERR_ARP_MISS;
 }
 
+/* Returns whether a CPL status conveys negative advice.
+ */
+static int is_neg_adv(unsigned int status)
+{
+       return status == CPL_ERR_RTX_NEG_ADVICE ||
+              status == CPL_ERR_PERSIST_NEG_ADVICE ||
+              status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
 #define ACT_OPEN_RETRY_COUNT 2
 
 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
@@ -1835,7 +1844,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
             status, status2errno(status));
 
-       if (status == CPL_ERR_RTX_NEG_ADVICE) {
+       if (is_neg_adv(status)) {
                printk(KERN_WARNING MOD "Connection problems for atid %u\n",
                        atid);
                return 0;
@@ -2265,15 +2274,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
        return 0;
 }
 
-/*
- * Returns whether an ABORT_REQ_RSS message is a negative advice.
- */
-static int is_neg_adv_abort(unsigned int status)
-{
-       return status == CPL_ERR_RTX_NEG_ADVICE ||
-              status == CPL_ERR_PERSIST_NEG_ADVICE;
-}
-
 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 {
        struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -2287,7 +2287,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int tid = GET_TID(req);
 
        ep = lookup_tid(t, tid);
-       if (is_neg_adv_abort(req->status)) {
+       if (is_neg_adv(req->status)) {
                PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
                     ep->hwtid);
                return 0;
@@ -3570,7 +3570,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
                kfree_skb(skb);
                return 0;
        }
-       if (is_neg_adv_abort(req->status)) {
+       if (is_neg_adv(req->status)) {
                PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
                     ep->hwtid);
                kfree_skb(skb);
index 4a033853312e52c6ff026d8e7e7892f010826255..ba7335fd4ebfca7db505585ac7fbee792fd4d67e 100644 (file)
@@ -64,6 +64,10 @@ struct uld_ctx {
 static LIST_HEAD(uld_ctx_list);
 static DEFINE_MUTEX(dev_mutex);
 
+#define DB_FC_RESUME_SIZE 64
+#define DB_FC_RESUME_DELAY 1
+#define DB_FC_DRAIN_THRESH 0
+
 static struct dentry *c4iw_debugfs_root;
 
 struct c4iw_debugfs_data {
@@ -282,7 +286,7 @@ static const struct file_operations stag_debugfs_fops = {
        .llseek  = default_llseek,
 };
 
-static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
+static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
 
 static int stats_show(struct seq_file *seq, void *v)
 {
@@ -311,9 +315,10 @@ static int stats_show(struct seq_file *seq, void *v)
        seq_printf(seq, "  DB FULL: %10llu\n", dev->rdev.stats.db_full);
        seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
        seq_printf(seq, "  DB DROP: %10llu\n", dev->rdev.stats.db_drop);
-       seq_printf(seq, " DB State: %s Transitions %llu\n",
+       seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
                   db_state_str[dev->db_state],
-                  dev->rdev.stats.db_state_transitions);
+                  dev->rdev.stats.db_state_transitions,
+                  dev->rdev.stats.db_fc_interruptions);
        seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
        seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
                   dev->rdev.stats.act_ofld_conn_fails);
@@ -643,6 +648,12 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
                goto err4;
        }
+       rdev->status_page = (struct t4_dev_status_page *)
+                           __get_free_page(GFP_KERNEL);
+       if (!rdev->status_page) {
+               pr_err(MOD "error allocating status page\n");
+               goto err4;
+       }
        return 0;
 err4:
        c4iw_rqtpool_destroy(rdev);
@@ -656,6 +667,7 @@ err1:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
+       free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
        c4iw_rqtpool_destroy(rdev);
        c4iw_destroy_resource(&rdev->resource);
@@ -703,18 +715,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
                pr_info("%s: On-Chip Queues not supported on this device.\n",
                        pci_name(infop->pdev));
 
-       if (!is_t4(infop->adapter_type)) {
-               if (!allow_db_fc_on_t5) {
-                       db_fc_threshold = 100000;
-                       pr_info("DB Flow Control Disabled.\n");
-               }
-
-               if (!allow_db_coalescing_on_t5) {
-                       db_coalescing_threshold = -1;
-                       pr_info("DB Coalescing Disabled.\n");
-               }
-       }
-
        devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
        if (!devp) {
                printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -749,6 +749,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        spin_lock_init(&devp->lock);
        mutex_init(&devp->rdev.stats.lock);
        mutex_init(&devp->db_mutex);
+       INIT_LIST_HEAD(&devp->db_fc_list);
 
        if (c4iw_debugfs_root) {
                devp->debugfs_root = debugfs_create_dir(
@@ -977,13 +978,16 @@ static int disable_qp_db(int id, void *p, void *data)
 
 static void stop_queues(struct uld_ctx *ctx)
 {
-       spin_lock_irq(&ctx->dev->lock);
-       if (ctx->dev->db_state == NORMAL) {
-               ctx->dev->rdev.stats.db_state_transitions++;
-               ctx->dev->db_state = FLOW_CONTROL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ctx->dev->lock, flags);
+       ctx->dev->rdev.stats.db_state_transitions++;
+       ctx->dev->db_state = STOPPED;
+       if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
                idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
-       }
-       spin_unlock_irq(&ctx->dev->lock);
+       else
+               ctx->dev->rdev.status_page->db_off = 1;
+       spin_unlock_irqrestore(&ctx->dev->lock, flags);
 }
 
 static int enable_qp_db(int id, void *p, void *data)
@@ -994,15 +998,70 @@ static int enable_qp_db(int id, void *p, void *data)
        return 0;
 }
 
+static void resume_rc_qp(struct c4iw_qp *qp)
+{
+       spin_lock(&qp->lock);
+       t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc);
+       qp->wq.sq.wq_pidx_inc = 0;
+       t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc);
+       qp->wq.rq.wq_pidx_inc = 0;
+       spin_unlock(&qp->lock);
+}
+
+static void resume_a_chunk(struct uld_ctx *ctx)
+{
+       int i;
+       struct c4iw_qp *qp;
+
+       for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
+               qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
+                                     db_fc_entry);
+               list_del_init(&qp->db_fc_entry);
+               resume_rc_qp(qp);
+               if (list_empty(&ctx->dev->db_fc_list))
+                       break;
+       }
+}
+
 static void resume_queues(struct uld_ctx *ctx)
 {
        spin_lock_irq(&ctx->dev->lock);
-       if (ctx->dev->qpcnt <= db_fc_threshold &&
-           ctx->dev->db_state == FLOW_CONTROL) {
-               ctx->dev->db_state = NORMAL;
-               ctx->dev->rdev.stats.db_state_transitions++;
-               idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
+       if (ctx->dev->db_state != STOPPED)
+               goto out;
+       ctx->dev->db_state = FLOW_CONTROL;
+       while (1) {
+               if (list_empty(&ctx->dev->db_fc_list)) {
+                       WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
+                       ctx->dev->db_state = NORMAL;
+                       ctx->dev->rdev.stats.db_state_transitions++;
+                       if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
+                               idr_for_each(&ctx->dev->qpidr, enable_qp_db,
+                                            NULL);
+                       } else {
+                               ctx->dev->rdev.status_page->db_off = 0;
+                       }
+                       break;
+               } else {
+                       if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
+                           < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
+                              DB_FC_DRAIN_THRESH)) {
+                               resume_a_chunk(ctx);
+                       }
+                       if (!list_empty(&ctx->dev->db_fc_list)) {
+                               spin_unlock_irq(&ctx->dev->lock);
+                               if (DB_FC_RESUME_DELAY) {
+                                       set_current_state(TASK_UNINTERRUPTIBLE);
+                                       schedule_timeout(DB_FC_RESUME_DELAY);
+                               }
+                               spin_lock_irq(&ctx->dev->lock);
+                               if (ctx->dev->db_state != FLOW_CONTROL)
+                                       break;
+                       }
+               }
        }
+out:
+       if (ctx->dev->db_state != NORMAL)
+               ctx->dev->rdev.stats.db_fc_interruptions++;
        spin_unlock_irq(&ctx->dev->lock);
 }
 
@@ -1028,12 +1087,12 @@ static int count_qps(int id, void *p, void *data)
        return 0;
 }
 
-static void deref_qps(struct qp_list qp_list)
+static void deref_qps(struct qp_list *qp_list)
 {
        int idx;
 
-       for (idx = 0; idx < qp_list.idx; idx++)
-               c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
+       for (idx = 0; idx < qp_list->idx; idx++)
+               c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
 }
 
 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
@@ -1044,17 +1103,22 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
        for (idx = 0; idx < qp_list->idx; idx++) {
                struct c4iw_qp *qp = qp_list->qps[idx];
 
+               spin_lock_irq(&qp->rhp->lock);
+               spin_lock(&qp->lock);
                ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
                                          qp->wq.sq.qid,
                                          t4_sq_host_wq_pidx(&qp->wq),
                                          t4_sq_wq_size(&qp->wq));
                if (ret) {
-                       printk(KERN_ERR MOD "%s: Fatal error - "
+                       pr_err(KERN_ERR MOD "%s: Fatal error - "
                               "DB overflow recovery failed - "
                               "error syncing SQ qid %u\n",
                               pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
+                       spin_unlock(&qp->lock);
+                       spin_unlock_irq(&qp->rhp->lock);
                        return;
                }
+               qp->wq.sq.wq_pidx_inc = 0;
 
                ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
                                          qp->wq.rq.qid,
@@ -1062,12 +1126,17 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
                                          t4_rq_wq_size(&qp->wq));
 
                if (ret) {
-                       printk(KERN_ERR MOD "%s: Fatal error - "
+                       pr_err(KERN_ERR MOD "%s: Fatal error - "
                               "DB overflow recovery failed - "
                               "error syncing RQ qid %u\n",
                               pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
+                       spin_unlock(&qp->lock);
+                       spin_unlock_irq(&qp->rhp->lock);
                        return;
                }
+               qp->wq.rq.wq_pidx_inc = 0;
+               spin_unlock(&qp->lock);
+               spin_unlock_irq(&qp->rhp->lock);
 
                /* Wait for the dbfifo to drain */
                while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
@@ -1083,36 +1152,22 @@ static void recover_queues(struct uld_ctx *ctx)
        struct qp_list qp_list;
        int ret;
 
-       /* lock out kernel db ringers */
-       mutex_lock(&ctx->dev->db_mutex);
-
-       /* put all queues in to recovery mode */
-       spin_lock_irq(&ctx->dev->lock);
-       ctx->dev->db_state = RECOVERY;
-       ctx->dev->rdev.stats.db_state_transitions++;
-       idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
-       spin_unlock_irq(&ctx->dev->lock);
-
        /* slow everybody down */
        set_current_state(TASK_UNINTERRUPTIBLE);
        schedule_timeout(usecs_to_jiffies(1000));
 
-       /* Wait for the dbfifo to completely drain. */
-       while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(usecs_to_jiffies(10));
-       }
-
        /* flush the SGE contexts */
        ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
        if (ret) {
                printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
                       pci_name(ctx->lldi.pdev));
-               goto out;
+               return;
        }
 
        /* Count active queues so we can build a list of queues to recover */
        spin_lock_irq(&ctx->dev->lock);
+       WARN_ON(ctx->dev->db_state != STOPPED);
+       ctx->dev->db_state = RECOVERY;
        idr_for_each(&ctx->dev->qpidr, count_qps, &count);
 
        qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
@@ -1120,7 +1175,7 @@ static void recover_queues(struct uld_ctx *ctx)
                printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
                       pci_name(ctx->lldi.pdev));
                spin_unlock_irq(&ctx->dev->lock);
-               goto out;
+               return;
        }
        qp_list.idx = 0;
 
@@ -1133,29 +1188,13 @@ static void recover_queues(struct uld_ctx *ctx)
        recover_lost_dbs(ctx, &qp_list);
 
        /* we're almost done!  deref the qps and clean up */
-       deref_qps(qp_list);
+       deref_qps(&qp_list);
        kfree(qp_list.qps);
 
-       /* Wait for the dbfifo to completely drain again */
-       while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(usecs_to_jiffies(10));
-       }
-
-       /* resume the queues */
        spin_lock_irq(&ctx->dev->lock);
-       if (ctx->dev->qpcnt > db_fc_threshold)
-               ctx->dev->db_state = FLOW_CONTROL;
-       else {
-               ctx->dev->db_state = NORMAL;
-               idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
-       }
-       ctx->dev->rdev.stats.db_state_transitions++;
+       WARN_ON(ctx->dev->db_state != RECOVERY);
+       ctx->dev->db_state = STOPPED;
        spin_unlock_irq(&ctx->dev->lock);
-
-out:
-       /* start up kernel db ringers again */
-       mutex_unlock(&ctx->dev->db_mutex);
 }
 
 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
@@ -1165,9 +1204,7 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
        switch (control) {
        case CXGB4_CONTROL_DB_FULL:
                stop_queues(ctx);
-               mutex_lock(&ctx->dev->rdev.stats.lock);
                ctx->dev->rdev.stats.db_full++;
-               mutex_unlock(&ctx->dev->rdev.stats.lock);
                break;
        case CXGB4_CONTROL_DB_EMPTY:
                resume_queues(ctx);
index 23eaeabab93b50d483e279de2adb7175c7c29dfc..eb18f9be35e4f84ffb64d4d8e24d6fbba1dd087d 100644 (file)
@@ -109,6 +109,7 @@ struct c4iw_dev_ucontext {
 
 enum c4iw_rdev_flags {
        T4_FATAL_ERROR = (1<<0),
+       T4_STATUS_PAGE_DISABLED = (1<<1),
 };
 
 struct c4iw_stat {
@@ -130,6 +131,7 @@ struct c4iw_stats {
        u64  db_empty;
        u64  db_drop;
        u64  db_state_transitions;
+       u64  db_fc_interruptions;
        u64  tcam_full;
        u64  act_ofld_conn_fails;
        u64  pas_ofld_conn_fails;
@@ -150,6 +152,7 @@ struct c4iw_rdev {
        unsigned long oc_mw_pa;
        void __iomem *oc_mw_kva;
        struct c4iw_stats stats;
+       struct t4_dev_status_page *status_page;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -211,7 +214,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
 enum db_state {
        NORMAL = 0,
        FLOW_CONTROL = 1,
-       RECOVERY = 2
+       RECOVERY = 2,
+       STOPPED = 3
 };
 
 struct c4iw_dev {
@@ -225,10 +229,10 @@ struct c4iw_dev {
        struct mutex db_mutex;
        struct dentry *debugfs_root;
        enum db_state db_state;
-       int qpcnt;
        struct idr hwtid_idr;
        struct idr atid_idr;
        struct idr stid_idr;
+       struct list_head db_fc_list;
 };
 
 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -432,6 +436,7 @@ struct c4iw_qp_attributes {
 
 struct c4iw_qp {
        struct ib_qp ibqp;
+       struct list_head db_fc_entry;
        struct c4iw_dev *rhp;
        struct c4iw_ep *ep;
        struct c4iw_qp_attributes attr;
index 7e94c9a656a1429d37c14bd2e7b8f4b8590d494e..79429256023a7e2e7be7f83e7f1b305233f5348a 100644 (file)
@@ -106,15 +106,56 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
 {
        struct c4iw_ucontext *context;
        struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
+       static int warned;
+       struct c4iw_alloc_ucontext_resp uresp;
+       int ret = 0;
+       struct c4iw_mm_entry *mm = NULL;
 
        PDBG("%s ibdev %p\n", __func__, ibdev);
        context = kzalloc(sizeof(*context), GFP_KERNEL);
-       if (!context)
-               return ERR_PTR(-ENOMEM);
+       if (!context) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
        c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
        INIT_LIST_HEAD(&context->mmaps);
        spin_lock_init(&context->mmap_lock);
+
+       if (udata->outlen < sizeof(uresp)) {
+               if (!warned++)
+                       pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
+               rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
+       } else {
+               mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+               if (!mm) {
+                       ret = -ENOMEM;
+                       goto err_free;
+               }
+
+               uresp.status_page_size = PAGE_SIZE;
+
+               spin_lock(&context->mmap_lock);
+               uresp.status_page_key = context->key;
+               context->key += PAGE_SIZE;
+               spin_unlock(&context->mmap_lock);
+
+               ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+               if (ret)
+                       goto err_mm;
+
+               mm->key = uresp.status_page_key;
+               mm->addr = virt_to_phys(rhp->rdev.status_page);
+               mm->len = PAGE_SIZE;
+               insert_mmap(context, mm);
+       }
        return &context->ibucontext;
+err_mm:
+       kfree(mm);
+err_free:
+       kfree(context);
+err:
+       return ERR_PTR(ret);
 }
 
 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
index 582936708e6e492dfca46b88b5db98f16159c0db..3b62eb556a4780f56cfd5131ca69272a388523b4 100644 (file)
@@ -638,6 +638,46 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
                wake_up(&(to_c4iw_qp(qp)->wait));
 }
 
+static void add_to_fc_list(struct list_head *head, struct list_head *entry)
+{
+       if (list_empty(entry))
+               list_add_tail(entry, head);
+}
+
+static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&qhp->rhp->lock, flags);
+       spin_lock(&qhp->lock);
+       if (qhp->rhp->db_state == NORMAL) {
+               t4_ring_sq_db(&qhp->wq, inc);
+       } else {
+               add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+               qhp->wq.sq.wq_pidx_inc += inc;
+       }
+       spin_unlock(&qhp->lock);
+       spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+       return 0;
+}
+
+static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&qhp->rhp->lock, flags);
+       spin_lock(&qhp->lock);
+       if (qhp->rhp->db_state == NORMAL) {
+               t4_ring_rq_db(&qhp->wq, inc);
+       } else {
+               add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+               qhp->wq.rq.wq_pidx_inc += inc;
+       }
+       spin_unlock(&qhp->lock);
+       spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+       return 0;
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                   struct ib_send_wr **bad_wr)
 {
@@ -750,9 +790,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                t4_sq_produce(&qhp->wq, len16);
                idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
        }
-       if (t4_wq_db_enabled(&qhp->wq))
+       if (!qhp->rhp->rdev.status_page->db_off) {
                t4_ring_sq_db(&qhp->wq, idx);
-       spin_unlock_irqrestore(&qhp->lock, flag);
+               spin_unlock_irqrestore(&qhp->lock, flag);
+       } else {
+               spin_unlock_irqrestore(&qhp->lock, flag);
+               ring_kernel_sq_db(qhp, idx);
+       }
        return err;
 }
 
@@ -812,9 +856,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                wr = wr->next;
                num_wrs--;
        }
-       if (t4_wq_db_enabled(&qhp->wq))
+       if (!qhp->rhp->rdev.status_page->db_off) {
                t4_ring_rq_db(&qhp->wq, idx);
-       spin_unlock_irqrestore(&qhp->lock, flag);
+               spin_unlock_irqrestore(&qhp->lock, flag);
+       } else {
+               spin_unlock_irqrestore(&qhp->lock, flag);
+               ring_kernel_rq_db(qhp, idx);
+       }
        return err;
 }
 
@@ -1200,35 +1248,6 @@ out:
        return ret;
 }
 
-/*
- * Called by the library when the qp has user dbs disabled due to
- * a DB_FULL condition.  This function will single-thread all user
- * DB rings to avoid overflowing the hw db-fifo.
- */
-static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
-{
-       int delay = db_delay_usecs;
-
-       mutex_lock(&qhp->rhp->db_mutex);
-       do {
-
-               /*
-                * The interrupt threshold is dbfifo_int_thresh << 6. So
-                * make sure we don't cross that and generate an interrupt.
-                */
-               if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
-                   (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
-                       writel(QID(qid) | PIDX(inc), qhp->wq.db);
-                       break;
-               }
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(usecs_to_jiffies(delay));
-               delay = min(delay << 1, 2000);
-       } while (1);
-       mutex_unlock(&qhp->rhp->db_mutex);
-       return 0;
-}
-
 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                   enum c4iw_qp_attr_mask mask,
                   struct c4iw_qp_attributes *attrs,
@@ -1278,11 +1297,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
        }
 
        if (mask & C4IW_QP_ATTR_SQ_DB) {
-               ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
+               ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
                goto out;
        }
        if (mask & C4IW_QP_ATTR_RQ_DB) {
-               ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
+               ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
                goto out;
        }
 
@@ -1465,14 +1484,6 @@ out:
        return ret;
 }
 
-static int enable_qp_db(int id, void *p, void *data)
-{
-       struct c4iw_qp *qp = p;
-
-       t4_enable_wq_db(&qp->wq);
-       return 0;
-}
-
 int c4iw_destroy_qp(struct ib_qp *ib_qp)
 {
        struct c4iw_dev *rhp;
@@ -1490,22 +1501,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
                c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
        wait_event(qhp->wait, !qhp->ep);
 
-       spin_lock_irq(&rhp->lock);
-       remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
-       rhp->qpcnt--;
-       BUG_ON(rhp->qpcnt < 0);
-       if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
-               rhp->rdev.stats.db_state_transitions++;
-               rhp->db_state = NORMAL;
-               idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
-       }
-       if (db_coalescing_threshold >= 0)
-               if (rhp->qpcnt <= db_coalescing_threshold)
-                       cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
-       spin_unlock_irq(&rhp->lock);
+       remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
        atomic_dec(&qhp->refcnt);
        wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
 
+       spin_lock_irq(&rhp->lock);
+       if (!list_empty(&qhp->db_fc_entry))
+               list_del_init(&qhp->db_fc_entry);
+       spin_unlock_irq(&rhp->lock);
+
        ucontext = ib_qp->uobject ?
                   to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
        destroy_qp(&rhp->rdev, &qhp->wq,
@@ -1516,14 +1520,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        return 0;
 }
 
-static int disable_qp_db(int id, void *p, void *data)
-{
-       struct c4iw_qp *qp = p;
-
-       t4_disable_wq_db(&qp->wq);
-       return 0;
-}
-
 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                             struct ib_udata *udata)
 {
@@ -1610,20 +1606,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        init_waitqueue_head(&qhp->wait);
        atomic_set(&qhp->refcnt, 1);
 
-       spin_lock_irq(&rhp->lock);
-       if (rhp->db_state != NORMAL)
-               t4_disable_wq_db(&qhp->wq);
-       rhp->qpcnt++;
-       if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
-               rhp->rdev.stats.db_state_transitions++;
-               rhp->db_state = FLOW_CONTROL;
-               idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
-       }
-       if (db_coalescing_threshold >= 0)
-               if (rhp->qpcnt > db_coalescing_threshold)
-                       cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
-       ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
-       spin_unlock_irq(&rhp->lock);
+       ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
        if (ret)
                goto err2;
 
@@ -1709,6 +1692,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
+       INIT_LIST_HEAD(&qhp->db_fc_entry);
        PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
             __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
             qhp->wq.sq.qid);
index e73ace739183a9864ab8fee70e374cdc9ce0d178..eeca8b1e63764cbbd7e8f1a0760028730bd9e944 100644 (file)
@@ -300,6 +300,7 @@ struct t4_sq {
        u16 cidx;
        u16 pidx;
        u16 wq_pidx;
+       u16 wq_pidx_inc;
        u16 flags;
        short flush_cidx;
 };
@@ -324,6 +325,7 @@ struct t4_rq {
        u16 cidx;
        u16 pidx;
        u16 wq_pidx;
+       u16 wq_pidx_inc;
 };
 
 struct t4_wq {
@@ -609,3 +611,7 @@ static inline void t4_set_cq_in_error(struct t4_cq *cq)
        ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
 }
 #endif
+
+struct t4_dev_status_page {
+       u8 db_off;
+};
index 32b754c35ab78f5f11f9e0014de8952df549e060..11ccd276e5d9c334d2cb3aa445ddaac1401483b7 100644 (file)
@@ -70,4 +70,9 @@ struct c4iw_create_qp_resp {
        __u32 qid_mask;
        __u32 flags;
 };
+
+struct c4iw_alloc_ucontext_resp {
+       __u64 status_page_key;
+       __u32 status_page_size;
+};
 #endif
index 2f215b93db6ba5a68b6d7e6fc38d21c91ff0cfa9..0eb141c41416cd89c99a3529df3e1dd0f2246234 100644 (file)
@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
                        continue;
 
                slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
-               if (slave_id >= dev->dev->num_slaves)
+               if (slave_id >= dev->dev->num_vfs + 1)
                        return;
                tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
                form_cache_ag = get_cached_alias_guid(dev, port_num,
index d1f5f1dd77b0db3d364fe447bd7f3259b4a0ff6d..56a593e0ae5d1f537db0f3615ee29eb99b0defc2 100644 (file)
@@ -61,6 +61,11 @@ struct cm_generic_msg {
        __be32 remote_comm_id;
 };
 
+struct cm_sidr_generic_msg {
+       struct ib_mad_hdr hdr;
+       __be32 request_id;
+};
+
 struct cm_req_msg {
        unsigned char unused[0x60];
        union ib_gid primary_path_sgid;
@@ -69,28 +74,62 @@ struct cm_req_msg {
 
 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
 {
-       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-       msg->local_comm_id = cpu_to_be32(cm_id);
+       if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+               struct cm_sidr_generic_msg *msg =
+                       (struct cm_sidr_generic_msg *)mad;
+               msg->request_id = cpu_to_be32(cm_id);
+       } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+               pr_err("trying to set local_comm_id in SIDR_REP\n");
+               return;
+       } else {
+               struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+               msg->local_comm_id = cpu_to_be32(cm_id);
+       }
 }
 
 static u32 get_local_comm_id(struct ib_mad *mad)
 {
-       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-
-       return be32_to_cpu(msg->local_comm_id);
+       if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+               struct cm_sidr_generic_msg *msg =
+                       (struct cm_sidr_generic_msg *)mad;
+               return be32_to_cpu(msg->request_id);
+       } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+               pr_err("trying to set local_comm_id in SIDR_REP\n");
+               return -1;
+       } else {
+               struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+               return be32_to_cpu(msg->local_comm_id);
+       }
 }
 
 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
 {
-       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-       msg->remote_comm_id = cpu_to_be32(cm_id);
+       if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+               struct cm_sidr_generic_msg *msg =
+                       (struct cm_sidr_generic_msg *)mad;
+               msg->request_id = cpu_to_be32(cm_id);
+       } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+               pr_err("trying to set remote_comm_id in SIDR_REQ\n");
+               return;
+       } else {
+               struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+               msg->remote_comm_id = cpu_to_be32(cm_id);
+       }
 }
 
 static u32 get_remote_comm_id(struct ib_mad *mad)
 {
-       struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-
-       return be32_to_cpu(msg->remote_comm_id);
+       if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
+               struct cm_sidr_generic_msg *msg =
+                       (struct cm_sidr_generic_msg *)mad;
+               return be32_to_cpu(msg->request_id);
+       } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+               pr_err("trying to set remote_comm_id in SIDR_REQ\n");
+               return -1;
+       } else {
+               struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+               return be32_to_cpu(msg->remote_comm_id);
+       }
 }
 
 static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
@@ -282,19 +321,21 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
        u32 sl_cm_id;
        int pv_cm_id = -1;
 
-       sl_cm_id = get_local_comm_id(mad);
-
        if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
-                       mad->mad_hdr.attr_id == CM_REP_ATTR_ID) {
+                       mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
+                       mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
+               sl_cm_id = get_local_comm_id(mad);
                id = id_map_alloc(ibdev, slave_id, sl_cm_id);
                if (IS_ERR(id)) {
                        mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
                                __func__, slave_id, sl_cm_id);
                        return PTR_ERR(id);
                }
-       } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID) {
+       } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
+                  mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
                return 0;
        } else {
+               sl_cm_id = get_local_comm_id(mad);
                id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
        }
 
@@ -315,14 +356,18 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
 }
 
 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
-                                                            struct ib_mad *mad)
+                            struct ib_mad *mad)
 {
        u32 pv_cm_id;
        struct id_map_entry *id;
 
-       if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID) {
+       if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
+           mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
                union ib_gid gid;
 
+               if (!slave)
+                       return 0;
+
                gid = gid_from_req_msg(ibdev, mad);
                *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
                if (*slave < 0) {
@@ -341,7 +386,8 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
                return -ENOENT;
        }
 
-       *slave = id->slave_id;
+       if (slave)
+               *slave = id->slave_id;
        set_remote_comm_id(mad, id->sl_cm_id);
 
        if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
index cc40f08ca8f1d353faafde91015ab13c247bad4b..5f640814cc81763e61fad3fb2bcdc50993a57a51 100644 (file)
@@ -564,7 +564,7 @@ static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
 }
 
 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc,
-                          unsigned tail, struct mlx4_cqe *cqe)
+                          unsigned tail, struct mlx4_cqe *cqe, int is_eth)
 {
        struct mlx4_ib_proxy_sqp_hdr *hdr;
 
@@ -574,12 +574,20 @@ static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct
                                   DMA_FROM_DEVICE);
        hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr);
        wc->pkey_index  = be16_to_cpu(hdr->tun.pkey_index);
-       wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
-       wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
        wc->src_qp      = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF;
        wc->wc_flags   |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0;
        wc->dlid_path_bits = 0;
 
+       if (is_eth) {
+               wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid);
+               memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4);
+               memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2);
+               wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
+       } else {
+               wc->slid        = be16_to_cpu(hdr->tun.slid_mac_47_32);
+               wc->sl          = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12);
+       }
+
        return 0;
 }
 
@@ -594,6 +602,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
        struct mlx4_srq *msrq = NULL;
        int is_send;
        int is_error;
+       int is_eth;
        u32 g_mlpath_rqpn;
        u16 wqe_ctr;
        unsigned tail = 0;
@@ -778,11 +787,15 @@ repoll:
                        break;
                }
 
+               is_eth = (rdma_port_get_link_layer(wc->qp->device,
+                                                 (*cur_qp)->port) ==
+                         IB_LINK_LAYER_ETHERNET);
                if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) {
                        if ((*cur_qp)->mlx4_ib_qp_type &
                            (MLX4_IB_QPT_PROXY_SMI_OWNER |
                             MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI))
-                               return use_tunnel_data(*cur_qp, cq, wc, tail, cqe);
+                               return use_tunnel_data(*cur_qp, cq, wc, tail,
+                                                      cqe, is_eth);
                }
 
                wc->slid           = be16_to_cpu(cqe->rlid);
@@ -793,20 +806,21 @@ repoll:
                wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
                wc->wc_flags      |= mlx4_ib_ipoib_csum_ok(cqe->status,
                                        cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
-               if (rdma_port_get_link_layer(wc->qp->device,
-                               (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
+               if (is_eth) {
                        wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
-               else
-                       wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
-               if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) {
-                       wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
-                               MLX4_CQE_VID_MASK;
+                       if (be32_to_cpu(cqe->vlan_my_qpn) &
+                                       MLX4_CQE_VLAN_PRESENT_MASK) {
+                               wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
+                                       MLX4_CQE_VID_MASK;
+                       } else {
+                               wc->vlan_id = 0xffff;
+                       }
+                       memcpy(wc->smac, cqe->smac, ETH_ALEN);
+                       wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
                } else {
+                       wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
                        wc->vlan_id = 0xffff;
                }
-               wc->wc_flags |= IB_WC_WITH_VLAN;
-               memcpy(wc->smac, cqe->smac, ETH_ALEN);
-               wc->wc_flags |= IB_WC_WITH_SMAC;
        }
 
        return 0;
index f2a3f48107e71e1d53b0abb61757f1f0ba38bb59..fd36ec67263208745170b28fce61e0ffec88844d 100644 (file)
@@ -467,6 +467,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
        int ret = 0;
        u16 tun_pkey_ix;
        u16 cached_pkey;
+       u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
 
        if (dest_qpt > IB_QPT_GSI)
                return -EINVAL;
@@ -509,6 +510,10 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
         * The driver will set the force loopback bit in post_send */
        memset(&attr, 0, sizeof attr);
        attr.port_num = port;
+       if (is_eth) {
+               memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
+               attr.ah_flags = IB_AH_GRH;
+       }
        ah = ib_create_ah(tun_ctx->pd, &attr);
        if (IS_ERR(ah))
                return -ENOMEM;
@@ -540,11 +545,36 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
 
        /* adjust tunnel data */
        tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
-       tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
-       tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
        tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
        tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
 
+       if (is_eth) {
+               u16 vlan = 0;
+               if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
+                                               NULL)) {
+                       /* VST mode */
+                       if (vlan != wc->vlan_id)
+                               /* Packet vlan is not the VST-assigned vlan.
+                                * Drop the packet.
+                                */
+                               goto out;
+                        else
+                               /* Remove the vlan tag before forwarding
+                                * the packet to the VF.
+                                */
+                               vlan = 0xffff;
+               } else {
+                       vlan = wc->vlan_id;
+               }
+
+               tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
+               memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
+               memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
+       } else {
+               tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
+               tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
+       }
+
        ib_dma_sync_single_for_device(&dev->ib_dev,
                                      tun_qp->tx_ring[tun_tx_ix].buf.map,
                                      sizeof (struct mlx4_rcv_tunnel_mad),
@@ -580,6 +610,41 @@ static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
        int err;
        int slave;
        u8 *slave_id;
+       int is_eth = 0;
+
+       if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
+               is_eth = 0;
+       else
+               is_eth = 1;
+
+       if (is_eth) {
+               if (!(wc->wc_flags & IB_WC_GRH)) {
+                       mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
+                       return -EINVAL;
+               }
+               if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
+                       mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
+                       return -EINVAL;
+               }
+               if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
+                       mlx4_ib_warn(ibdev, "failed matching grh\n");
+                       return -ENOENT;
+               }
+               if (slave >= dev->dev->caps.sqp_demux) {
+                       mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
+                                    slave, dev->dev->caps.sqp_demux);
+                       return -ENOENT;
+               }
+
+               if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
+                       return 0;
+
+               err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
+               if (err)
+                       pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
+                                slave, err);
+               return 0;
+       }
 
        /* Initially assume that this mad is for us */
        slave = mlx4_master_func_num(dev->dev);
@@ -1076,8 +1141,9 @@ static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
 
 
 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
-                        enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
-                        u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
+                        enum ib_qp_type dest_qpt, u16 pkey_index,
+                        u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
+                        u8 *s_mac, struct ib_mad *mad)
 {
        struct ib_sge list;
        struct ib_send_wr wr, *bad_wr;
@@ -1166,6 +1232,9 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
        wr.num_sge = 1;
        wr.opcode = IB_WR_SEND;
        wr.send_flags = IB_SEND_SIGNALED;
+       if (s_mac)
+               memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
+
 
        ret = ib_post_send(send_qp, &wr, &bad_wr);
 out:
@@ -1174,6 +1243,22 @@ out:
        return ret;
 }
 
+static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
+{
+       if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
+               return slave;
+       return mlx4_get_base_gid_ix(dev->dev, slave, port);
+}
+
+static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
+                                   struct ib_ah_attr *ah_attr)
+{
+       if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
+               ah_attr->grh.sgid_index = slave;
+       else
+               ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
+}
+
 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
 {
        struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
@@ -1184,6 +1269,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
        struct ib_ah_attr ah_attr;
        u8 *slave_id;
        int slave;
+       int port;
 
        /* Get slave that sent this packet */
        if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
@@ -1260,12 +1346,18 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
        memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
        ah.ibah.device = ctx->ib_dev;
        mlx4_ib_query_ah(&ah.ibah, &ah_attr);
-       if ((ah_attr.ah_flags & IB_AH_GRH) &&
-           (ah_attr.grh.sgid_index != slave)) {
-               mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
-                            slave, ah_attr.grh.sgid_index);
+       if (ah_attr.ah_flags & IB_AH_GRH)
+               fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
+
+       port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
+       if (port < 0)
                return;
-       }
+       ah_attr.port_num = port;
+       memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
+       ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
+       /* if slave have default vlan use it */
+       mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
+                                   &ah_attr.vlan_id, &ah_attr.sl);
 
        mlx4_ib_send_to_wire(dev, slave, ctx->port,
                             is_proxy_qp0(dev, wc->src_qp, slave) ?
@@ -1273,7 +1365,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
                             be16_to_cpu(tunnel->hdr.pkey_index),
                             be32_to_cpu(tunnel->hdr.remote_qpn),
                             be32_to_cpu(tunnel->hdr.qkey),
-                            &ah_attr, &tunnel->mad);
+                            &ah_attr, wc->smac, &tunnel->mad);
 }
 
 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
@@ -1850,7 +1942,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
        ctx->port = port;
        ctx->ib_dev = &dev->ib_dev;
 
-       for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
+       for (i = 0;
+            i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
+            i++) {
+               struct mlx4_active_ports actv_ports =
+                       mlx4_get_active_ports(dev->dev, i);
+
+               if (!test_bit(port - 1, actv_ports.ports))
+                       continue;
+
                ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
                if (ret) {
                        ret = -ENOMEM;
index f9c12e92fdd661a4e639790c9b2b2dd19ba7eb36..6cb85467dde7e53db3fdaddf9f626d056b671938 100644 (file)
@@ -1546,7 +1546,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
        iboe = &ibdev->iboe;
        spin_lock(&iboe->lock);
 
-       for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+       for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
                if ((netif_is_bond_master(real_dev) &&
                     (real_dev == iboe->masters[port - 1])) ||
                     (!netif_is_bond_master(real_dev) &&
@@ -1569,14 +1569,14 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
 
        iboe = &ibdev->iboe;
 
-       for (port = 1; port <= MLX4_MAX_PORTS; ++port)
+       for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
                if ((netif_is_bond_master(real_dev) &&
                     (real_dev == iboe->masters[port - 1])) ||
                     (!netif_is_bond_master(real_dev) &&
                     (real_dev == iboe->netdevs[port - 1])))
                        break;
 
-       if ((port == 0) || (port > MLX4_MAX_PORTS))
+       if ((port == 0) || (port > ibdev->dev->caps.num_ports))
                return 0;
        else
                return port;
@@ -1626,7 +1626,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
        union ib_gid gid;
 
 
-       if ((port == 0) || (port > MLX4_MAX_PORTS))
+       if ((port == 0) || (port > ibdev->dev->caps.num_ports))
                return;
 
        /* IPv4 gids */
@@ -1888,14 +1888,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
 
        pr_info_once("%s", mlx4_ib_version);
 
-       mlx4_foreach_non_ib_transport_port(i, dev)
-               num_ports++;
-
-       if (mlx4_is_mfunc(dev) && num_ports) {
-               dev_err(&dev->pdev->dev, "RoCE is not supported over SRIOV as yet\n");
-               return NULL;
-       }
-
        num_ports = 0;
        mlx4_foreach_ib_transport_port(i, dev)
                num_ports++;
@@ -2331,17 +2323,24 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
        struct mlx4_dev *dev = ibdev->dev;
        int i;
        unsigned long flags;
+       struct mlx4_active_ports actv_ports;
+       unsigned int ports;
+       unsigned int first_port;
 
        if (!mlx4_is_master(dev))
                return;
 
-       dm = kcalloc(dev->caps.num_ports, sizeof *dm, GFP_ATOMIC);
+       actv_ports = mlx4_get_active_ports(dev, slave);
+       ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+       first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+
+       dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
        if (!dm) {
                pr_err("failed to allocate memory for tunneling qp update\n");
                goto out;
        }
 
-       for (i = 0; i < dev->caps.num_ports; i++) {
+       for (i = 0; i < ports; i++) {
                dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
                if (!dm[i]) {
                        pr_err("failed to allocate memory for tunneling qp update work struct\n");
@@ -2353,9 +2352,9 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
                }
        }
        /* initialize or tear down tunnel QPs for the slave */
-       for (i = 0; i < dev->caps.num_ports; i++) {
+       for (i = 0; i < ports; i++) {
                INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
-               dm[i]->port = i + 1;
+               dm[i]->port = first_port + i + 1;
                dm[i]->slave = slave;
                dm[i]->do_init = do_init;
                dm[i]->dev = ibdev;
index 25b2cdff00f8e4bcd76044e8916b2aa257a7276e..ed327e6c8fdca54baf19c3ded92d60776cc1adbf 100644 (file)
@@ -215,8 +215,9 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
        }
        mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
        spin_unlock(&dev->sm_lock);
-       return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port,
-                                   IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad);
+       return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
+                                   ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
+                                   &ah_attr, NULL, mad);
 }
 
 static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
index a230683af940663a214ca611f28f842d956270ba..f589522fddfd9efa4e32fdd0a7e8f63e49a54927 100644 (file)
@@ -241,6 +241,22 @@ struct mlx4_ib_proxy_sqp_hdr {
        struct mlx4_rcv_tunnel_hdr tun;
 }  __packed;
 
+struct mlx4_roce_smac_vlan_info {
+       u64 smac;
+       int smac_index;
+       int smac_port;
+       u64 candidate_smac;
+       int candidate_smac_index;
+       int candidate_smac_port;
+       u16 vid;
+       int vlan_index;
+       int vlan_port;
+       u16 candidate_vid;
+       int candidate_vlan_index;
+       int candidate_vlan_port;
+       int update_vid;
+};
+
 struct mlx4_ib_qp {
        struct ib_qp            ibqp;
        struct mlx4_qp          mqp;
@@ -273,8 +289,9 @@ struct mlx4_ib_qp {
        struct list_head        gid_list;
        struct list_head        steering_rules;
        struct mlx4_ib_buf      *sqp_proxy_rcv;
+       struct mlx4_roce_smac_vlan_info pri;
+       struct mlx4_roce_smac_vlan_info alt;
        u64                     reg_id;
-
 };
 
 struct mlx4_ib_srq {
@@ -720,9 +737,12 @@ void mlx4_ib_tunnels_update_work(struct work_struct *work);
 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
                          enum ib_qp_type qpt, struct ib_wc *wc,
                          struct ib_grh *grh, struct ib_mad *mad);
+
 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
                         enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
-                        u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad);
+                        u32 qkey, struct ib_ah_attr *attr, u8 *s_mac,
+                        struct ib_mad *mad);
+
 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
 
 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
index d8f4d1fe849430ceb40eab47470d5af033884694..aadf7f82e1f388e387d053637da2e9b12fda22e2 100644 (file)
@@ -662,10 +662,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        if (!sqp)
                                return -ENOMEM;
                        qp = &sqp->qp;
+                       qp->pri.vid = 0xFFFF;
+                       qp->alt.vid = 0xFFFF;
                } else {
                        qp = kzalloc(sizeof (struct mlx4_ib_qp), GFP_KERNEL);
                        if (!qp)
                                return -ENOMEM;
+                       qp->pri.vid = 0xFFFF;
+                       qp->alt.vid = 0xFFFF;
                }
        } else
                qp = *caller_qp;
@@ -940,11 +944,32 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
 {
        struct mlx4_ib_cq *send_cq, *recv_cq;
 
-       if (qp->state != IB_QPS_RESET)
+       if (qp->state != IB_QPS_RESET) {
                if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
                                   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
                        pr_warn("modify QP %06x to RESET failed.\n",
                               qp->mqp.qpn);
+               if (qp->pri.smac) {
+                       mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+                       qp->pri.smac = 0;
+               }
+               if (qp->alt.smac) {
+                       mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+                       qp->alt.smac = 0;
+               }
+               if (qp->pri.vid < 0x1000) {
+                       mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
+                       qp->pri.vid = 0xFFFF;
+                       qp->pri.candidate_vid = 0xFFFF;
+                       qp->pri.update_vid = 0;
+               }
+               if (qp->alt.vid < 0x1000) {
+                       mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
+                       qp->alt.vid = 0xFFFF;
+                       qp->alt.candidate_vid = 0xFFFF;
+                       qp->alt.update_vid = 0;
+               }
+       }
 
        get_cqs(qp, &send_cq, &recv_cq);
 
@@ -1057,6 +1082,8 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
                qp = kzalloc(sizeof *qp, GFP_KERNEL);
                if (!qp)
                        return ERR_PTR(-ENOMEM);
+               qp->pri.vid = 0xFFFF;
+               qp->alt.vid = 0xFFFF;
                /* fall through */
        case IB_QPT_UD:
        {
@@ -1188,12 +1215,13 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port)
 
 static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
                          u64 smac, u16 vlan_tag, struct mlx4_qp_path *path,
-                         u8 port)
+                         struct mlx4_roce_smac_vlan_info *smac_info, u8 port)
 {
        int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) ==
                IB_LINK_LAYER_ETHERNET;
        int vidx;
        int smac_index;
+       int err;
 
 
        path->grh_mylmc     = ah->src_path_bits & 0x7f;
@@ -1223,61 +1251,103 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
        }
 
        if (is_eth) {
-               path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
-                       ((port - 1) << 6) | ((ah->sl & 7) << 3);
-
                if (!(ah->ah_flags & IB_AH_GRH))
                        return -1;
 
-               memcpy(path->dmac, ah->dmac, ETH_ALEN);
-               path->ackto = MLX4_IB_LINK_TYPE_ETH;
-               /* find the index  into MAC table for IBoE */
-               if (!is_zero_ether_addr((const u8 *)&smac)) {
-                       if (mlx4_find_cached_mac(dev->dev, port, smac,
-                                                &smac_index))
-                               return -ENOENT;
-               } else {
-                       smac_index = 0;
-               }
-
-               path->grh_mylmc &= 0x80 | smac_index;
+               path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
+                       ((port - 1) << 6) | ((ah->sl & 7) << 3);
 
                path->feup |= MLX4_FEUP_FORCE_ETH_UP;
                if (vlan_tag < 0x1000) {
-                       if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx))
-                               return -ENOENT;
-
-                       path->vlan_index = vidx;
-                       path->fl = 1 << 6;
+                       if (smac_info->vid < 0x1000) {
+                               /* both valid vlan ids */
+                               if (smac_info->vid != vlan_tag) {
+                                       /* different VIDs.  unreg old and reg new */
+                                       err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
+                                       if (err)
+                                               return err;
+                                       smac_info->candidate_vid = vlan_tag;
+                                       smac_info->candidate_vlan_index = vidx;
+                                       smac_info->candidate_vlan_port = port;
+                                       smac_info->update_vid = 1;
+                                       path->vlan_index = vidx;
+                               } else {
+                                       path->vlan_index = smac_info->vlan_index;
+                               }
+                       } else {
+                               /* no current vlan tag in qp */
+                               err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx);
+                               if (err)
+                                       return err;
+                               smac_info->candidate_vid = vlan_tag;
+                               smac_info->candidate_vlan_index = vidx;
+                               smac_info->candidate_vlan_port = port;
+                               smac_info->update_vid = 1;
+                               path->vlan_index = vidx;
+                       }
                        path->feup |= MLX4_FVL_FORCE_ETH_VLAN;
+                       path->fl = 1 << 6;
+               } else {
+                       /* have current vlan tag. unregister it at modify-qp success */
+                       if (smac_info->vid < 0x1000) {
+                               smac_info->candidate_vid = 0xFFFF;
+                               smac_info->update_vid = 1;
+                       }
                }
-       } else
+
+               /* get smac_index for RoCE use.
+                * If no smac was yet assigned, register one.
+                * If one was already assigned, but the new mac differs,
+                * unregister the old one and register the new one.
+               */
+               if (!smac_info->smac || smac_info->smac != smac) {
+                       /* register candidate now, unreg if needed, after success */
+                       smac_index = mlx4_register_mac(dev->dev, port, smac);
+                       if (smac_index >= 0) {
+                               smac_info->candidate_smac_index = smac_index;
+                               smac_info->candidate_smac = smac;
+                               smac_info->candidate_smac_port = port;
+                       } else {
+                               return -EINVAL;
+                       }
+               } else {
+                       smac_index = smac_info->smac_index;
+               }
+
+               memcpy(path->dmac, ah->dmac, 6);
+               path->ackto = MLX4_IB_LINK_TYPE_ETH;
+               /* put MAC table smac index for IBoE */
+               path->grh_mylmc = (u8) (smac_index) | 0x80;
+       } else {
                path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
                        ((port - 1) << 6) | ((ah->sl & 0xf) << 2);
+       }
 
        return 0;
 }
 
 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
                         enum ib_qp_attr_mask qp_attr_mask,
+                        struct mlx4_ib_qp *mqp,
                         struct mlx4_qp_path *path, u8 port)
 {
        return _mlx4_set_path(dev, &qp->ah_attr,
                              mlx4_mac_to_u64((u8 *)qp->smac),
                              (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff,
-                             path, port);
+                             path, &mqp->pri, port);
 }
 
 static int mlx4_set_alt_path(struct mlx4_ib_dev *dev,
                             const struct ib_qp_attr *qp,
                             enum ib_qp_attr_mask qp_attr_mask,
+                            struct mlx4_ib_qp *mqp,
                             struct mlx4_qp_path *path, u8 port)
 {
        return _mlx4_set_path(dev, &qp->alt_ah_attr,
                              mlx4_mac_to_u64((u8 *)qp->alt_smac),
                              (qp_attr_mask & IB_QP_ALT_VID) ?
                              qp->alt_vlan_id : 0xffff,
-                             path, port);
+                             path, &mqp->alt, port);
 }
 
 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
@@ -1292,6 +1362,37 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
        }
 }
 
+static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
+                                   struct mlx4_qp_context *context)
+{
+       struct net_device *ndev;
+       u64 u64_mac;
+       int smac_index;
+
+
+       ndev = dev->iboe.netdevs[qp->port - 1];
+       if (ndev) {
+               smac = ndev->dev_addr;
+               u64_mac = mlx4_mac_to_u64(smac);
+       } else {
+               u64_mac = dev->dev->caps.def_mac[qp->port];
+       }
+
+       context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
+       if (!qp->pri.smac) {
+               smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
+               if (smac_index >= 0) {
+                       qp->pri.candidate_smac_index = smac_index;
+                       qp->pri.candidate_smac = u64_mac;
+                       qp->pri.candidate_smac_port = qp->port;
+                       context->pri_path.grh_mylmc = 0x80 | (u8) smac_index;
+               } else {
+                       return -ENOENT;
+               }
+       }
+       return 0;
+}
+
 static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                               const struct ib_qp_attr *attr, int attr_mask,
                               enum ib_qp_state cur_state, enum ib_qp_state new_state)
@@ -1403,7 +1504,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
        }
 
        if (attr_mask & IB_QP_AV) {
-               if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path,
+               if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
                                  attr_mask & IB_QP_PORT ?
                                  attr->port_num : qp->port))
                        goto out;
@@ -1426,7 +1527,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                    dev->dev->caps.pkey_table_len[attr->alt_port_num])
                        goto out;
 
-               if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path,
+               if (mlx4_set_alt_path(dev, attr, attr_mask, qp,
+                                     &context->alt_path,
                                      attr->alt_port_num))
                        goto out;
 
@@ -1532,6 +1634,20 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                                context->pri_path.fl = 0x80;
                        context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE;
                }
+               if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+                   IB_LINK_LAYER_ETHERNET) {
+                       if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI ||
+                           qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI)
+                               context->pri_path.feup = 1 << 7; /* don't fsm */
+                       /* handle smac_index */
+                       if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD ||
+                           qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
+                           qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
+                               err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
+                               if (err)
+                                       return -EINVAL;
+                       }
+               }
        }
 
        if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
@@ -1619,28 +1735,113 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
         * If we moved a kernel QP to RESET, clean up all old CQ
         * entries and reinitialize the QP.
         */
-       if (new_state == IB_QPS_RESET && !ibqp->uobject) {
-               mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
-                                ibqp->srq ? to_msrq(ibqp->srq): NULL);
-               if (send_cq != recv_cq)
-                       mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+       if (new_state == IB_QPS_RESET) {
+               if (!ibqp->uobject) {
+                       mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
+                                        ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+                       if (send_cq != recv_cq)
+                               mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+
+                       qp->rq.head = 0;
+                       qp->rq.tail = 0;
+                       qp->sq.head = 0;
+                       qp->sq.tail = 0;
+                       qp->sq_next_wqe = 0;
+                       if (qp->rq.wqe_cnt)
+                               *qp->db.db  = 0;
 
-               qp->rq.head = 0;
-               qp->rq.tail = 0;
-               qp->sq.head = 0;
-               qp->sq.tail = 0;
-               qp->sq_next_wqe = 0;
-               if (qp->rq.wqe_cnt)
-                       *qp->db.db  = 0;
+                       if (qp->flags & MLX4_IB_QP_NETIF)
+                               mlx4_ib_steer_qp_reg(dev, qp, 0);
+               }
+               if (qp->pri.smac) {
+                       mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+                       qp->pri.smac = 0;
+               }
+               if (qp->alt.smac) {
+                       mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+                       qp->alt.smac = 0;
+               }
+               if (qp->pri.vid < 0x1000) {
+                       mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid);
+                       qp->pri.vid = 0xFFFF;
+                       qp->pri.candidate_vid = 0xFFFF;
+                       qp->pri.update_vid = 0;
+               }
 
-               if (qp->flags & MLX4_IB_QP_NETIF)
-                       mlx4_ib_steer_qp_reg(dev, qp, 0);
+               if (qp->alt.vid < 0x1000) {
+                       mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid);
+                       qp->alt.vid = 0xFFFF;
+                       qp->alt.candidate_vid = 0xFFFF;
+                       qp->alt.update_vid = 0;
+               }
        }
-
 out:
        if (err && steer_qp)
                mlx4_ib_steer_qp_reg(dev, qp, 0);
        kfree(context);
+       if (qp->pri.candidate_smac) {
+               if (err) {
+                       mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
+               } else {
+                       if (qp->pri.smac)
+                               mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
+                       qp->pri.smac = qp->pri.candidate_smac;
+                       qp->pri.smac_index = qp->pri.candidate_smac_index;
+                       qp->pri.smac_port = qp->pri.candidate_smac_port;
+               }
+               qp->pri.candidate_smac = 0;
+               qp->pri.candidate_smac_index = 0;
+               qp->pri.candidate_smac_port = 0;
+       }
+       if (qp->alt.candidate_smac) {
+               if (err) {
+                       mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac);
+               } else {
+                       if (qp->alt.smac)
+                               mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
+                       qp->alt.smac = qp->alt.candidate_smac;
+                       qp->alt.smac_index = qp->alt.candidate_smac_index;
+                       qp->alt.smac_port = qp->alt.candidate_smac_port;
+               }
+               qp->alt.candidate_smac = 0;
+               qp->alt.candidate_smac_index = 0;
+               qp->alt.candidate_smac_port = 0;
+       }
+
+       if (qp->pri.update_vid) {
+               if (err) {
+                       if (qp->pri.candidate_vid < 0x1000)
+                               mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port,
+                                                    qp->pri.candidate_vid);
+               } else {
+                       if (qp->pri.vid < 0x1000)
+                               mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port,
+                                                    qp->pri.vid);
+                       qp->pri.vid = qp->pri.candidate_vid;
+                       qp->pri.vlan_port = qp->pri.candidate_vlan_port;
+                       qp->pri.vlan_index =  qp->pri.candidate_vlan_index;
+               }
+               qp->pri.candidate_vid = 0xFFFF;
+               qp->pri.update_vid = 0;
+       }
+
+       if (qp->alt.update_vid) {
+               if (err) {
+                       if (qp->alt.candidate_vid < 0x1000)
+                               mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port,
+                                                    qp->alt.candidate_vid);
+               } else {
+                       if (qp->alt.vid < 0x1000)
+                               mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port,
+                                                    qp->alt.vid);
+                       qp->alt.vid = qp->alt.candidate_vid;
+                       qp->alt.vlan_port = qp->alt.candidate_vlan_port;
+                       qp->alt.vlan_index =  qp->alt.candidate_vlan_index;
+               }
+               qp->alt.candidate_vid = 0xFFFF;
+               qp->alt.update_vid = 0;
+       }
+
        return err;
 }
 
@@ -1842,9 +2043,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
 {
        struct ib_device *ib_dev = sqp->qp.ibqp.device;
        struct mlx4_wqe_mlx_seg *mlx = wqe;
+       struct mlx4_wqe_ctrl_seg *ctrl = wqe;
        struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
        struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
-       struct net_device *ndev;
        union ib_gid sgid;
        u16 pkey;
        int send_size;
@@ -1868,12 +2069,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                        /* When multi-function is enabled, the ib_core gid
                         * indexes don't necessarily match the hw ones, so
                         * we must use our own cache */
-                       sgid.global.subnet_prefix =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                               subnet_prefix;
-                       sgid.global.interface_id =
-                               to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
-                               guid_cache[ah->av.ib.gid_index];
+                       err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev,
+                                                          be32_to_cpu(ah->av.ib.port_pd) >> 24,
+                                                          ah->av.ib.gid_index, &sgid.raw[0]);
+                       if (err)
+                               return err;
                } else  {
                        err = ib_get_cached_gid(ib_dev,
                                                be32_to_cpu(ah->av.ib.port_pd) >> 24,
@@ -1902,6 +2102,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                sqp->ud_header.grh.flow_label    =
                        ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
                sqp->ud_header.grh.hop_limit     = ah->av.ib.hop_limit;
+               if (is_eth)
+                       memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
+               else {
                if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
                        /* When multi-function is enabled, the ib_core gid
                         * indexes don't necessarily match the hw ones, so
@@ -1917,6 +2120,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
                                          be32_to_cpu(ah->av.ib.port_pd) >> 24,
                                          ah->av.ib.gid_index,
                                          &sqp->ud_header.grh.source_gid);
+               }
                memcpy(sqp->ud_header.grh.destination_gid.raw,
                       ah->av.ib.dgid, 16);
        }
@@ -1949,16 +2153,23 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
 
        if (is_eth) {
                u8 *smac;
+               struct in6_addr in6;
+
                u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
 
                mlx->sched_prio = cpu_to_be16(pcp);
 
                memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
                /* FIXME: cache smac value? */
-               ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
-               if (!ndev)
-                       return -ENODEV;
-               smac = ndev->dev_addr;
+               memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2);
+               memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
+               memcpy(&in6, sgid.raw, sizeof(in6));
+
+               if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
+                       smac = to_mdev(sqp->qp.ibqp.device)->
+                               iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+               else    /* use the src mac of the tunnel */
+                       smac = ah->av.eth.s_mac;
                memcpy(sqp->ud_header.eth.smac_h, smac, 6);
                if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
                        mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
@@ -2190,6 +2401,8 @@ static void build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_
        hdr.remote_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
        hdr.pkey_index = cpu_to_be16(wr->wr.ud.pkey_index);
        hdr.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+       memcpy(hdr.mac, ah->av.eth.mac, 6);
+       hdr.vlan = ah->av.eth.vlan;
 
        spc = MLX4_INLINE_ALIGN -
                ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1));
index db2ea31df8325175d764051849f55631134148b5..5a38e43eca650c6cb736a5cee6e290e357aeb9ed 100644 (file)
@@ -627,6 +627,7 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
        int port;
        struct kobject *p, *t;
        struct mlx4_port *mport;
+       struct mlx4_active_ports actv_ports;
 
        get_name(dev, name, slave, sizeof name);
 
@@ -649,7 +650,11 @@ static int register_one_pkey_tree(struct mlx4_ib_dev *dev, int slave)
                goto err_ports;
        }
 
+       actv_ports = mlx4_get_active_ports(dev->dev, slave);
+
        for (port = 1; port <= dev->dev->caps.num_ports; ++port) {
+               if (!test_bit(port - 1, actv_ports.ports))
+                       continue;
                err = add_port(dev, port, slave);
                if (err)
                        goto err_add;
index b4147c0b14b79e40de0a0c613fce54fdffcc5976..c3a1b061838da5f5b42aee00f5087e1e11c87b71 100644 (file)
@@ -796,7 +796,7 @@ static void __exit act2000_exit(void)
        act2000_card *last;
        while (card) {
                unregister_card(card);
-               del_timer(&card->ptimer);
+               del_timer_sync(&card->ptimer);
                card = card->next;
        }
        card = cards;
index fb4f1bac0133faa6783b252432fd7ecfdafc3343..1c5dc345e7c50fe07196b5dc2b4963e4d9207b0b 100644 (file)
@@ -86,12 +86,13 @@ isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off)
        struct divert_info *inf;
        int len;
 
-       if (!*((struct divert_info **) file->private_data)) {
+       if (!(inf = *((struct divert_info **) file->private_data))) {
                if (file->f_flags & O_NONBLOCK)
                        return -EAGAIN;
-               interruptible_sleep_on(&(rd_queue));
+               wait_event_interruptible(rd_queue, (inf =
+                       *((struct divert_info **) file->private_data)));
        }
-       if (!(inf = *((struct divert_info **) file->private_data)))
+       if (!inf)
                return (0);
 
        inf->usage_cnt--;       /* new usage count */
index 2be1c8a3bb5f2b7fd84f0d3cd039fb8dc5da3dd0..d8ef64da26f1fe6e0a25c5515401079ed19ff3cb 100644 (file)
@@ -509,7 +509,8 @@ static void
 set_arcofi(struct IsdnCardState *cs, int bc) {
        cs->dc.isac.arcofi_bc = bc;
        arcofi_fsm(cs, ARCOFI_START, &ARCOFI_COP_5);
-       interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+       wait_event_interruptible(cs->dc.isac.arcofi_wait,
+                                cs->dc.isac.arcofi_state == ARCOFI_NOP);
 }
 
 static int
@@ -528,7 +529,8 @@ check_arcofi(struct IsdnCardState *cs)
                }
        cs->dc.isac.arcofi_bc = 0;
        arcofi_fsm(cs, ARCOFI_START, &ARCOFI_VERSION);
-       interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+       wait_event_interruptible(cs->dc.isac.arcofi_wait,
+                                cs->dc.isac.arcofi_state == ARCOFI_NOP);
        if (!test_and_clear_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags)) {
                debugl1(cs, "Arcofi response received %d bytes", cs->dc.isac.mon_rxp);
                p = cs->dc.isac.mon_rx;
@@ -595,7 +597,8 @@ check_arcofi(struct IsdnCardState *cs)
                               Elsa_Types[cs->subtyp],
                               cs->hw.elsa.base + 8);
                arcofi_fsm(cs, ARCOFI_START, &ARCOFI_XOP_0);
-               interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+               wait_event_interruptible(cs->dc.isac.arcofi_wait,
+                                cs->dc.isac.arcofi_state == ARCOFI_NOP);
                return (1);
        }
        return (0);
index 3f84dd8f1757d8b645af7bf6b3088b7ce084d320..a2a358c1dc8e59fe5b69f99891c8d3130717df8b 100644 (file)
@@ -573,7 +573,8 @@ modem_l2l1(struct PStack *st, int pr, void *arg)
                test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
                bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
                arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
-               interruptible_sleep_on(&bcs->cs->dc.isac.arcofi_wait);
+               wait_event_interruptible(bcs->cs->dc.isac.arcofi_wait,
+                                bcs->cs->dc.isac.arcofi_state == ARCOFI_NOP);
                bcs->cs->hw.elsa.MFlag = 1;
        } else {
                printk(KERN_WARNING "ElsaSer: unknown pr %x\n", pr);
index b61e8d5e84ad022e566f5bfa191ddcb392f3df7b..7b5fd8fb1761d1912be615ee14571e420dc4c92b 100644 (file)
@@ -175,14 +175,15 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
        int len;
        hysdn_card *card = PDE_DATA(file_inode(file));
 
-       if (!*((struct log_data **) file->private_data)) {
+       if (!(inf = *((struct log_data **) file->private_data))) {
                struct procdata *pd = card->proclog;
                if (file->f_flags & O_NONBLOCK)
                        return (-EAGAIN);
 
-               interruptible_sleep_on(&(pd->rd_queue));
+               wait_event_interruptible(pd->rd_queue, (inf =
+                               *((struct log_data **) file->private_data)));
        }
-       if (!(inf = *((struct log_data **) file->private_data)))
+       if (!inf)
                return (0);
 
        inf->usage_cnt--;       /* new usage count */
index 9bb12ba3191fbdac568f9352f77dc05b66d051cc..9b856e1890d1ebc4f7c82ebb6b045357abe5324b 100644 (file)
@@ -777,7 +777,8 @@ isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue
                return 0;
        if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) {
                if (sleep)
-                       interruptible_sleep_on(sleep);
+                       wait_event_interruptible(*sleep,
+                               !skb_queue_empty(&dev->drv[di]->rpqueue[channel]));
                else
                        return 0;
        }
@@ -1072,7 +1073,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
                                retval = -EAGAIN;
                                goto out;
                        }
-                       interruptible_sleep_on(&(dev->info_waitq));
+                       wait_event_interruptible(dev->info_waitq,
+                                                file->private_data);
                }
                p = isdn_statstr();
                file->private_data = NULL;
@@ -1128,7 +1130,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
                                retval = -EAGAIN;
                                goto out;
                        }
-                       interruptible_sleep_on(&(dev->drv[drvidx]->st_waitq));
+                       wait_event_interruptible(dev->drv[drvidx]->st_waitq,
+                                                dev->drv[drvidx]->stavail);
                }
                if (dev->drv[drvidx]->interface->readstat) {
                        if (count > dev->drv[drvidx]->stavail)
@@ -1188,8 +1191,8 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
                        goto out;
                }
                chidx = isdn_minor2chan(minor);
-               while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0)
-                       interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]);
+               wait_event_interruptible(dev->drv[drvidx]->snd_waitq[chidx],
+                       (retval = isdn_writebuf_stub(drvidx, chidx, buf, count)));
                goto out;
        }
        if (minor <= ISDN_MINOR_CTRLMAX) {
@@ -2378,7 +2381,7 @@ static void __exit isdn_exit(void)
        }
        isdn_tty_exit();
        unregister_chrdev(ISDN_MAJOR, "isdn");
-       del_timer(&dev->timer);
+       del_timer_sync(&dev->timer);
        /* call vfree with interrupts enabled, else it will hang */
        vfree(dev);
        printk(KERN_NOTICE "ISDN-subsystem unloaded\n");
index 38ceac5053a0ba79df766c16889cf5f384b582f4..a5da511e3c9ae4f381ae1526d2ab2779039aba9a 100644 (file)
@@ -378,10 +378,15 @@ isdn_ppp_release(int min, struct file *file)
        is->slcomp = NULL;
 #endif
 #ifdef CONFIG_IPPP_FILTER
-       kfree(is->pass_filter);
-       is->pass_filter = NULL;
-       kfree(is->active_filter);
-       is->active_filter = NULL;
+       if (is->pass_filter) {
+               sk_unattached_filter_destroy(is->pass_filter);
+               is->pass_filter = NULL;
+       }
+
+       if (is->active_filter) {
+               sk_unattached_filter_destroy(is->active_filter);
+               is->active_filter = NULL;
+       }
 #endif
 
 /* TODO: if this was the previous master: link the stuff to the new master */
@@ -629,25 +634,41 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
 #ifdef CONFIG_IPPP_FILTER
        case PPPIOCSPASS:
        {
+               struct sock_fprog fprog;
                struct sock_filter *code;
-               int len = get_filter(argp, &code);
+               int err, len = get_filter(argp, &code);
+
                if (len < 0)
                        return len;
-               kfree(is->pass_filter);
-               is->pass_filter = code;
-               is->pass_len = len;
-               break;
+
+               fprog.len = len;
+               fprog.filter = code;
+
+               if (is->pass_filter)
+                       sk_unattached_filter_destroy(is->pass_filter);
+               err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+               kfree(code);
+
+               return err;
        }
        case PPPIOCSACTIVE:
        {
+               struct sock_fprog fprog;
                struct sock_filter *code;
-               int len = get_filter(argp, &code);
+               int err, len = get_filter(argp, &code);
+
                if (len < 0)
                        return len;
-               kfree(is->active_filter);
-               is->active_filter = code;
-               is->active_len = len;
-               break;
+
+               fprog.len = len;
+               fprog.filter = code;
+
+               if (is->active_filter)
+                       sk_unattached_filter_destroy(is->active_filter);
+               err = sk_unattached_filter_create(&is->active_filter, &fprog);
+               kfree(code);
+
+               return err;
        }
 #endif /* CONFIG_IPPP_FILTER */
        default:
@@ -1147,14 +1168,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
        }
 
        if (is->pass_filter
-           && sk_run_filter(skb, is->pass_filter) == 0) {
+           && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
                if (is->debug & 0x2)
                        printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
                kfree_skb(skb);
                return;
        }
        if (!(is->active_filter
-             && sk_run_filter(skb, is->active_filter) == 0)) {
+             && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
                if (is->debug & 0x2)
                        printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
                lp->huptimer = 0;
@@ -1293,14 +1314,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (ipt->pass_filter
-           && sk_run_filter(skb, ipt->pass_filter) == 0) {
+           && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
                if (ipt->debug & 0x4)
                        printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
                kfree_skb(skb);
                goto unlock;
        }
        if (!(ipt->active_filter
-             && sk_run_filter(skb, ipt->active_filter) == 0)) {
+             && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
                if (ipt->debug & 0x4)
                        printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
                lp->huptimer = 0;
@@ -1490,9 +1511,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
        }
 
        drop |= is->pass_filter
-               && sk_run_filter(skb, is->pass_filter) == 0;
+               && SK_RUN_FILTER(is->pass_filter, skb) == 0;
        drop |= is->active_filter
-               && sk_run_filter(skb, is->active_filter) == 0;
+               && SK_RUN_FILTER(is->active_filter, skb) == 0;
 
        skb_push(skb, IPPP_MAX_HEADER - 4);
        return drop;
index 1eaf622739037dc26960f4b3d2ae5ba165830d21..f02cc506fbfa795938fc9dcdfa6e8931df734575 100644 (file)
@@ -796,6 +796,7 @@ static void set_running_timeout(unsigned long ptr)
 #endif
        dev = (struct pcbit_dev *) ptr;
 
+       dev->l2_state = L2_DOWN;
        wake_up_interruptible(&dev->set_running_wq);
 }
 
@@ -818,7 +819,8 @@ static int set_protocol_running(struct pcbit_dev *dev)
 
        add_timer(&dev->set_running_timer);
 
-       interruptible_sleep_on(&dev->set_running_wq);
+       wait_event(dev->set_running_wq, dev->l2_state == L2_RUNNING ||
+                                       dev->l2_state == L2_DOWN);
 
        del_timer(&dev->set_running_timer);
 
@@ -842,8 +844,6 @@ static int set_protocol_running(struct pcbit_dev *dev)
                printk(KERN_DEBUG "pcbit: initialization failed\n");
                printk(KERN_DEBUG "pcbit: firmware not loaded\n");
 
-               dev->l2_state = L2_DOWN;
-
 #ifdef DEBUG
                printk(KERN_DEBUG "Bank3 = %02x\n",
                       readb(dev->sh_mem + BANK3));
index 92acc81f844d161a7e47d9477c2e49e941929d58..d6f19b168e8a1a6101fd11dea5de140eed90dffb 100644 (file)
@@ -390,8 +390,8 @@ static void __exit sc_exit(void)
                /*
                 * kill the timers
                 */
-               del_timer(&(sc_adapter[i]->reset_timer));
-               del_timer(&(sc_adapter[i]->stat_timer));
+               del_timer_sync(&(sc_adapter[i]->reset_timer));
+               del_timer_sync(&(sc_adapter[i]->stat_timer));
 
                /*
                 * Tell I4L we're toast
index 494b888a65681bde29911f13bea5ab02ed1c32d2..89402c3b64f8406ebc0e81a7d79fb341fa57df3d 100644 (file)
@@ -177,11 +177,6 @@ config NETCONSOLE_DYNAMIC
 config NETPOLL
        def_bool NETCONSOLE
 
-config NETPOLL_TRAP
-       bool "Netpoll traffic trapping"
-       default n
-       depends on NETPOLL
-
 config NET_POLL_CONTROLLER
        def_bool NETPOLL
 
index dcde56057fe14f7bb64a8221db03b2d72a22d842..b667a51ed21517a3ee6cf2be6ab4c7e306a713a2 100644 (file)
@@ -768,11 +768,11 @@ static int ad_lacpdu_send(struct port *port)
 
        lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
 
-       memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+       ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
        /* Note: source address is set to be the member's PERMANENT address,
         * because we use it to identify loopback lacpdus in receive.
         */
-       memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+       ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr);
        lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
 
        lacpdu_header->lacpdu = port->lacpdu;
@@ -810,11 +810,11 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
 
        marker_header = (struct bond_marker_header *)skb_put(skb, length);
 
-       memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+       ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
        /* Note: source address is set to be the member's PERMANENT address,
         * because we use it to identify loopback MARKERs in receive.
         */
-       memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+       ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr);
        marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
 
        marker_header->marker = *marker;
@@ -1079,7 +1079,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                        /* detect loopback situation */
                        if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
                                              &(port->actor_system))) {
-                               pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+                               pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
+                                      "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
                                       port->slave->bond->dev->name,
                                       port->slave->dev->name);
                                return;
@@ -1283,11 +1284,11 @@ static void ad_port_selection_logic(struct port *port)
                        /* meaning: the port was related to an aggregator
                         * but was not on the aggregator port list
                         */
-                       pr_warn("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
-                               port->slave->bond->dev->name,
-                               port->actor_port_number,
-                               port->slave->dev->name,
-                               port->aggregator->aggregator_identifier);
+                       pr_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+                                           port->slave->bond->dev->name,
+                                           port->actor_port_number,
+                                           port->slave->dev->name,
+                                           port->aggregator->aggregator_identifier);
                }
        }
        /* search on all aggregators for a suitable aggregator for this port */
@@ -1444,9 +1445,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
                break;
 
        default:
-               pr_warn("%s: Impossible agg select mode %d\n",
-                       curr->slave->bond->dev->name,
-                       __get_agg_selection_mode(curr->lag_ports));
+               pr_warn_ratelimited("%s: Impossible agg select mode %d\n",
+                                   curr->slave->bond->dev->name,
+                                   __get_agg_selection_mode(curr->lag_ports));
                break;
        }
 
@@ -1559,9 +1560,9 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 
                /* check if any partner replys */
                if (best->is_individual) {
-                       pr_warn("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
-                               best->slave ?
-                               best->slave->bond->dev->name : "NULL");
+                       pr_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+                                           best->slave ?
+                                           best->slave->bond->dev->name : "NULL");
                }
 
                best->is_active = 1;
@@ -1948,7 +1949,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                         * new aggregator
                         */
                        if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
-                               pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
+                               pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
                                         aggregator->aggregator_identifier,
                                         new_aggregator->aggregator_identifier);
 
@@ -2080,8 +2081,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                /* select the active aggregator for the bond */
                if (port) {
                        if (!port->slave) {
-                               pr_warn("%s: Warning: bond's first port is uninitialized\n",
-                                       bond->dev->name);
+                               pr_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
+                                                   bond->dev->name);
                                goto re_arm;
                        }
 
@@ -2095,8 +2096,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        bond_for_each_slave_rcu(bond, slave, iter) {
                port = &(SLAVE_AD_INFO(slave).port);
                if (!port->slave) {
-                       pr_warn("%s: Warning: Found an uninitialized port\n",
-                               bond->dev->name);
+                       pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
+                                           bond->dev->name);
                        goto re_arm;
                }
 
@@ -2157,8 +2158,8 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
                port = &(SLAVE_AD_INFO(slave).port);
 
                if (!port->slave) {
-                       pr_warn("%s: Warning: port of slave %s is uninitialized\n",
-                               slave->dev->name, slave->bond->dev->name);
+                       pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
+                                           slave->dev->name, slave->bond->dev->name);
                        return ret;
                }
 
@@ -2310,9 +2311,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->actor_oper_port_key = (port->actor_admin_port_key &=
                                             ~AD_SPEED_KEY_BITS);
        }
-       pr_debug("Port %d changed link status to %s",
-               port->actor_port_number,
-               (link == BOND_LINK_UP) ? "UP" : "DOWN");
+       pr_debug("Port %d changed link status to %s\n",
+                port->actor_port_number,
+                link == BOND_LINK_UP ? "UP" : "DOWN");
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
@@ -2390,17 +2391,16 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
                }
        }
 
-       if (aggregator) {
-               ad_info->aggregator_id = aggregator->aggregator_identifier;
-               ad_info->ports = aggregator->num_of_ports;
-               ad_info->actor_key = aggregator->actor_oper_aggregator_key;
-               ad_info->partner_key = aggregator->partner_oper_aggregator_key;
-               memcpy(ad_info->partner_system,
-                      aggregator->partner_system.mac_addr_value, ETH_ALEN);
-               return 0;
-       }
+       if (!aggregator)
+               return -1;
 
-       return -1;
+       ad_info->aggregator_id = aggregator->aggregator_identifier;
+       ad_info->ports = aggregator->num_of_ports;
+       ad_info->actor_key = aggregator->actor_oper_aggregator_key;
+       ad_info->partner_key = aggregator->partner_oper_aggregator_key;
+       ether_addr_copy(ad_info->partner_system,
+                       aggregator->partner_system.mac_addr_value);
+       return 0;
 }
 
 /* Wrapper used to hold bond->lock so no slave manipulation can occur */
@@ -2479,7 +2479,7 @@ out:
        return NETDEV_TX_OK;
 err_free:
        /* no suitable interface, frame not sent */
-       kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        goto out;
 }
 
index f4dd9592ac62561fb8967843446d6a9c0463b7e7..bb03b1df2f3e799da32d82ca2a5226c8bd031647 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/netdevice.h>
 #include <linux/if_ether.h>
 
-// General definitions
+/* General definitions */
 #define PKT_TYPE_LACPDU         cpu_to_be16(ETH_P_SLOW)
 #define AD_TIMER_INTERVAL       100 /*msec*/
 
@@ -47,54 +47,54 @@ enum {
        BOND_AD_COUNT = 2,
 };
 
-// rx machine states(43.4.11 in the 802.3ad standard)
+/* rx machine states(43.4.11 in the 802.3ad standard) */
 typedef enum {
        AD_RX_DUMMY,
-       AD_RX_INITIALIZE,     // rx Machine
-       AD_RX_PORT_DISABLED,  // rx Machine
-       AD_RX_LACP_DISABLED,  // rx Machine
-       AD_RX_EXPIRED,        // rx Machine
-       AD_RX_DEFAULTED,      // rx Machine
-       AD_RX_CURRENT         // rx Machine
+       AD_RX_INITIALIZE,       /* rx Machine */
+       AD_RX_PORT_DISABLED,    /* rx Machine */
+       AD_RX_LACP_DISABLED,    /* rx Machine */
+       AD_RX_EXPIRED,          /* rx Machine */
+       AD_RX_DEFAULTED,        /* rx Machine */
+       AD_RX_CURRENT           /* rx Machine */
 } rx_states_t;
 
-// periodic machine states(43.4.12 in the 802.3ad standard)
+/* periodic machine states(43.4.12 in the 802.3ad standard) */
 typedef enum {
        AD_PERIODIC_DUMMY,
-       AD_NO_PERIODIC,        // periodic machine
-       AD_FAST_PERIODIC,      // periodic machine
-       AD_SLOW_PERIODIC,      // periodic machine
-       AD_PERIODIC_TX     // periodic machine
+       AD_NO_PERIODIC,         /* periodic machine */
+       AD_FAST_PERIODIC,       /* periodic machine */
+       AD_SLOW_PERIODIC,       /* periodic machine */
+       AD_PERIODIC_TX          /* periodic machine */
 } periodic_states_t;
 
-// mux machine states(43.4.13 in the 802.3ad standard)
+/* mux machine states(43.4.13 in the 802.3ad standard) */
 typedef enum {
        AD_MUX_DUMMY,
-       AD_MUX_DETACHED,       // mux machine
-       AD_MUX_WAITING,        // mux machine
-       AD_MUX_ATTACHED,       // mux machine
-       AD_MUX_COLLECTING_DISTRIBUTING // mux machine
+       AD_MUX_DETACHED,        /* mux machine */
+       AD_MUX_WAITING,         /* mux machine */
+       AD_MUX_ATTACHED,        /* mux machine */
+       AD_MUX_COLLECTING_DISTRIBUTING  /* mux machine */
 } mux_states_t;
 
-// tx machine states(43.4.15 in the 802.3ad standard)
+/* tx machine states(43.4.15 in the 802.3ad standard) */
 typedef enum {
        AD_TX_DUMMY,
-       AD_TRANSMIT        // tx Machine
+       AD_TRANSMIT             /* tx Machine */
 } tx_states_t;
 
-// rx indication types
+/* rx indication types */
 typedef enum {
-       AD_TYPE_LACPDU = 1,    // type lacpdu
-       AD_TYPE_MARKER     // type marker
+       AD_TYPE_LACPDU = 1,     /* type lacpdu */
+       AD_TYPE_MARKER          /* type marker */
 } pdu_type_t;
 
-// rx marker indication types
+/* rx marker indication types */
 typedef enum {
-       AD_MARKER_INFORMATION_SUBTYPE = 1, // marker imformation subtype
-       AD_MARKER_RESPONSE_SUBTYPE     // marker response subtype
+       AD_MARKER_INFORMATION_SUBTYPE = 1,      /* marker imformation subtype */
+       AD_MARKER_RESPONSE_SUBTYPE              /* marker response subtype */
 } bond_marker_subtype_t;
 
-// timers types(43.4.9 in the 802.3ad standard)
+/* timers types(43.4.9 in the 802.3ad standard) */
 typedef enum {
        AD_CURRENT_WHILE_TIMER,
        AD_ACTOR_CHURN_TIMER,
@@ -105,35 +105,35 @@ typedef enum {
 
 #pragma pack(1)
 
-// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard)
+/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
 typedef struct lacpdu {
-       u8 subtype;                  // = LACP(= 0x01)
+       u8 subtype;             /* = LACP(= 0x01) */
        u8 version_number;
-       u8 tlv_type_actor_info;       // = actor information(type/length/value)
-       u8 actor_information_length; // = 20
+       u8 tlv_type_actor_info; /* = actor information(type/length/value) */
+       u8 actor_information_length;    /* = 20 */
        __be16 actor_system_priority;
        struct mac_addr actor_system;
        __be16 actor_key;
        __be16 actor_port_priority;
        __be16 actor_port;
        u8 actor_state;
-       u8 reserved_3_1[3];          // = 0
-       u8 tlv_type_partner_info;     // = partner information
-       u8 partner_information_length;   // = 20
+       u8 reserved_3_1[3];             /* = 0 */
+       u8 tlv_type_partner_info;       /* = partner information */
+       u8 partner_information_length;  /* = 20 */
        __be16 partner_system_priority;
        struct mac_addr partner_system;
        __be16 partner_key;
        __be16 partner_port_priority;
        __be16 partner_port;
        u8 partner_state;
-       u8 reserved_3_2[3];          // = 0
-       u8 tlv_type_collector_info;       // = collector information
-       u8 collector_information_length; // = 16
+       u8 reserved_3_2[3];             /* = 0 */
+       u8 tlv_type_collector_info;     /* = collector information */
+       u8 collector_information_length;/* = 16 */
        __be16 collector_max_delay;
        u8 reserved_12[12];
-       u8 tlv_type_terminator;      // = terminator
-       u8 terminator_length;        // = 0
-       u8 reserved_50[50];          // = 0
+       u8 tlv_type_terminator;         /* = terminator */
+       u8 terminator_length;           /* = 0 */
+       u8 reserved_50[50];             /* = 0 */
 } __packed lacpdu_t;
 
 typedef struct lacpdu_header {
@@ -141,20 +141,20 @@ typedef struct lacpdu_header {
        struct lacpdu lacpdu;
 } __packed lacpdu_header_t;
 
-// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
+/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
 typedef struct bond_marker {
-       u8 subtype;              //  = 0x02  (marker PDU)
-       u8 version_number;       //  = 0x01
-       u8 tlv_type;             //  = 0x01  (marker information)
-       //  = 0x02  (marker response information)
-       u8 marker_length;        //  = 0x16
-       u16 requester_port;      //   The number assigned to the port by the requester
-       struct mac_addr requester_system;      //   The requester's system id
-       u32 requester_transaction_id;   //   The transaction id allocated by the requester,
-       u16 pad;                 //  = 0
-       u8 tlv_type_terminator;      //  = 0x00
-       u8 terminator_length;        //  = 0x00
-       u8 reserved_90[90];          //  = 0
+       u8 subtype;             /* = 0x02  (marker PDU) */
+       u8 version_number;      /* = 0x01 */
+       u8 tlv_type;            /* = 0x01  (marker information) */
+       /* = 0x02  (marker response information) */
+       u8 marker_length;       /* = 0x16 */
+       u16 requester_port;     /* The number assigned to the port by the requester */
+       struct mac_addr requester_system;       /* The requester's system id */
+       u32 requester_transaction_id;           /* The transaction id allocated by the requester, */
+       u16 pad;                /* = 0 */
+       u8 tlv_type_terminator; /* = 0x00 */
+       u8 terminator_length;   /* = 0x00 */
+       u8 reserved_90[90];     /* = 0 */
 } __packed bond_marker_t;
 
 typedef struct bond_marker_header {
@@ -173,7 +173,7 @@ struct port;
 #pragma pack(8)
 #endif
 
-// aggregator structure(43.4.5 in the 802.3ad standard)
+/* aggregator structure(43.4.5 in the 802.3ad standard) */
 typedef struct aggregator {
        struct mac_addr aggregator_mac_address;
        u16 aggregator_identifier;
@@ -183,12 +183,12 @@ typedef struct aggregator {
        struct mac_addr partner_system;
        u16 partner_system_priority;
        u16 partner_oper_aggregator_key;
-       u16 receive_state;              // BOOLEAN
-       u16 transmit_state;             // BOOLEAN
+       u16 receive_state;      /* BOOLEAN */
+       u16 transmit_state;     /* BOOLEAN */
        struct port *lag_ports;
-       // ****** PRIVATE PARAMETERS ******
-       struct slave *slave;        // pointer to the bond slave that this aggregator belongs to
-       u16 is_active;      // BOOLEAN. Indicates if this aggregator is active
+       /* ****** PRIVATE PARAMETERS ****** */
+       struct slave *slave;    /* pointer to the bond slave that this aggregator belongs to */
+       u16 is_active;          /* BOOLEAN. Indicates if this aggregator is active */
        u16 num_of_ports;
 } aggregator_t;
 
@@ -201,12 +201,12 @@ struct port_params {
        u16 port_state;
 };
 
-// port structure(43.4.6 in the 802.3ad standard)
+/* port structure(43.4.6 in the 802.3ad standard) */
 typedef struct port {
        u16 actor_port_number;
        u16 actor_port_priority;
-       struct mac_addr actor_system;          // This parameter is added here although it is not specified in the standard, just for simplification
-       u16 actor_system_priority;       // This parameter is added here although it is not specified in the standard, just for simplification
+       struct mac_addr actor_system;   /* This parameter is added here although it is not specified in the standard, just for simplification */
+       u16 actor_system_priority;      /* This parameter is added here although it is not specified in the standard, just for simplification */
        u16 actor_port_aggregator_identifier;
        bool ntt;
        u16 actor_admin_port_key;
@@ -219,24 +219,24 @@ typedef struct port {
 
        bool is_enabled;
 
-       // ****** PRIVATE PARAMETERS ******
-       u16 sm_vars;          // all state machines variables for this port
-       rx_states_t sm_rx_state;        // state machine rx state
-       u16 sm_rx_timer_counter;    // state machine rx timer counter
-       periodic_states_t sm_periodic_state;// state machine periodic state
-       u16 sm_periodic_timer_counter;  // state machine periodic timer counter
-       mux_states_t sm_mux_state;      // state machine mux state
-       u16 sm_mux_timer_counter;   // state machine mux timer counter
-       tx_states_t sm_tx_state;        // state machine tx state
-       u16 sm_tx_timer_counter;    // state machine tx timer counter(allways on - enter to transmit state 3 time per second)
-       struct slave *slave;        // pointer to the bond slave that this port belongs to
-       struct aggregator *aggregator;     // pointer to an aggregator that this port related to
-       struct port *next_port_in_aggregator; // Next port on the linked list of the parent aggregator
-       u32 transaction_id;         // continuous number for identification of Marker PDU's;
-       struct lacpdu lacpdu;          // the lacpdu that will be sent for this port
+       /* ****** PRIVATE PARAMETERS ****** */
+       u16 sm_vars;            /* all state machines variables for this port */
+       rx_states_t sm_rx_state;        /* state machine rx state */
+       u16 sm_rx_timer_counter;        /* state machine rx timer counter */
+       periodic_states_t sm_periodic_state;    /* state machine periodic state */
+       u16 sm_periodic_timer_counter;  /* state machine periodic timer counter */
+       mux_states_t sm_mux_state;      /* state machine mux state */
+       u16 sm_mux_timer_counter;       /* state machine mux timer counter */
+       tx_states_t sm_tx_state;        /* state machine tx state */
+       u16 sm_tx_timer_counter;        /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+       struct slave *slave;            /* pointer to the bond slave that this port belongs to */
+       struct aggregator *aggregator;  /* pointer to an aggregator that this port related to */
+       struct port *next_port_in_aggregator;   /* Next port on the linked list of the parent aggregator */
+       u32 transaction_id;             /* continuous number for identification of Marker PDU's; */
+       struct lacpdu lacpdu;           /* the lacpdu that will be sent for this port */
 } port_t;
 
-// system structure
+/* system structure */
 struct ad_system {
        u16 sys_priority;
        struct mac_addr sys_mac_addr;
@@ -246,27 +246,26 @@ struct ad_system {
 #pragma pack()
 #endif
 
-// ================= AD Exported structures to the main bonding code ==================
+/* ========== AD Exported structures to the main bonding code ========== */
 #define BOND_AD_INFO(bond)   ((bond)->ad_info)
 #define SLAVE_AD_INFO(slave) ((slave)->ad_info)
 
 struct ad_bond_info {
-       struct ad_system system;            /* 802.3ad system structure */
-       u32 agg_select_timer;       // Timer to select aggregator after all adapter's hand shakes
+       struct ad_system system;        /* 802.3ad system structure */
+       u32 agg_select_timer;           /* Timer to select aggregator after all adapter's hand shakes */
        u16 aggregator_identifier;
 };
 
 struct ad_slave_info {
-       struct aggregator aggregator;       // 802.3ad aggregator structure
-       struct port port;                   // 802.3ad port structure
-       spinlock_t state_machine_lock; /* mutex state machines vs.
-                                         incoming LACPDU */
+       struct aggregator aggregator;   /* 802.3ad aggregator structure */
+       struct port port;               /* 802.3ad port structure */
+       spinlock_t state_machine_lock;  /* mutex state machines vs. incoming LACPDU */
        u16 id;
 };
 
-// ================= AD Exported functions to the main bonding code ==================
+/* ========== AD Exported functions to the main bonding code ========== */
 void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-void  bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
 void bond_3ad_unbind_slave(struct slave *slave);
 void bond_3ad_state_machine_handler(struct work_struct *);
 void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
@@ -281,5 +280,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave);
 int bond_3ad_set_carrier(struct bonding *bond);
 void bond_3ad_update_lacp_rate(struct bonding *bond);
-#endif //__BOND_3AD_H__
+#endif /* __BOND_3AD_H__ */
 
index e8f133e926aae720d09d76117bd0c84dcc90d775..9f69e818b0009db7881b3f8c862393836e5a604b 100644 (file)
@@ -93,9 +93,8 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
        int i;
        u8 hash = 0;
 
-       for (i = 0; i < hash_size; i++) {
+       for (i = 0; i < hash_size; i++)
                hash ^= hash_start[i];
-       }
 
        return hash;
 }
@@ -190,9 +189,8 @@ static int tlb_initialize(struct bonding *bond)
 
        bond_info->tx_hashtbl = new_hashtbl;
 
-       for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
+       for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
                tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
-       }
 
        _unlock_tx_hashtbl_bh(bond);
 
@@ -264,9 +262,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
                        hash_table[hash_index].next = next_index;
                        hash_table[hash_index].prev = TLB_NULL_INDEX;
 
-                       if (next_index != TLB_NULL_INDEX) {
+                       if (next_index != TLB_NULL_INDEX)
                                hash_table[next_index].prev = hash_index;
-                       }
 
                        slave_info->head = hash_index;
                        slave_info->load +=
@@ -274,9 +271,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
                }
        }
 
-       if (assigned_slave) {
+       if (assigned_slave)
                hash_table[hash_index].tx_bytes += skb_len;
-       }
 
        return assigned_slave;
 }
@@ -329,7 +325,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
 
        _lock_rx_hashtbl_bh(bond);
 
-       hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+       hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
        client_info = &(bond_info->rx_hashtbl[hash_index]);
 
        if ((client_info->assigned) &&
@@ -337,7 +333,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
            (client_info->ip_dst == arp->ip_src) &&
            (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
                /* update the clients MAC address */
-               memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
+               ether_addr_copy(client_info->mac_dst, arp->mac_src);
                client_info->ntt = 1;
                bond_info->rx_ntt = 1;
        }
@@ -451,9 +447,8 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
  */
 static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 {
-       if (!bond->curr_active_slave) {
+       if (!bond->curr_active_slave)
                return;
-       }
 
        if (!bond->alb_info.primary_is_promisc) {
                if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
@@ -513,9 +508,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
 
        write_lock_bh(&bond->curr_slave_lock);
 
-       if (slave != bond->curr_active_slave) {
+       if (slave != bond->curr_active_slave)
                rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
-       }
 
        write_unlock_bh(&bond->curr_slave_lock);
 }
@@ -524,9 +518,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
 {
        int i;
 
-       if (!client_info->slave) {
+       if (!client_info->slave)
                return;
-       }
 
        for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
                struct sk_buff *skb;
@@ -574,9 +567,8 @@ static void rlb_update_rx_clients(struct bonding *bond)
                client_info = &(bond_info->rx_hashtbl[hash_index]);
                if (client_info->ntt) {
                        rlb_update_client(client_info);
-                       if (bond_info->rlb_update_retry_counter == 0) {
+                       if (bond_info->rlb_update_retry_counter == 0)
                                client_info->ntt = 0;
-                       }
                }
        }
 
@@ -610,10 +602,10 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
                }
        }
 
-       // update the team's flag only after the whole iteration
+       /* update the team's flag only after the whole iteration */
        if (ntt) {
                bond_info->rx_ntt = 1;
-               //fasten the change
+               /* fasten the change */
                bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
        }
 
@@ -677,9 +669,9 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
                        /* the entry is already assigned to this client */
                        if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
                                /* update mac address from arp */
-                               memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+                               ether_addr_copy(client_info->mac_dst, arp->mac_dst);
                        }
-                       memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+                       ether_addr_copy(client_info->mac_src, arp->mac_src);
 
                        assigned_slave = client_info->slave;
                        if (assigned_slave) {
@@ -719,8 +711,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
                 * will be updated with clients actual unicast mac address
                 * upon receiving an arp reply.
                 */
-               memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
-               memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+               ether_addr_copy(client_info->mac_dst, arp->mac_dst);
+               ether_addr_copy(client_info->mac_src, arp->mac_src);
                client_info->slave = assigned_slave;
 
                if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -770,9 +762,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                * rx channel
                */
                tx_slave = rlb_choose_channel(skb, bond);
-               if (tx_slave) {
-                       memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
-               }
+               if (tx_slave)
+                       ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
                pr_debug("Server sent ARP Reply packet\n");
        } else if (arp->op_code == htons(ARPOP_REQUEST)) {
                /* Create an entry in the rx_hashtbl for this client as a
@@ -824,9 +815,8 @@ static void rlb_rebalance(struct bonding *bond)
        }
 
        /* update the team's flag only after the whole iteration */
-       if (ntt) {
+       if (ntt)
                bond_info->rx_ntt = 1;
-       }
        _unlock_rx_hashtbl_bh(bond);
 }
 
@@ -923,7 +913,7 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+       u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
        u32 index;
 
        _lock_rx_hashtbl_bh(bond);
@@ -957,9 +947,8 @@ static int rlb_initialize(struct bonding *bond)
 
        bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
 
-       for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
+       for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
                rlb_init_table_entry(bond_info->rx_hashtbl + i);
-       }
 
        _unlock_rx_hashtbl_bh(bond);
 
@@ -1014,9 +1003,9 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        char *data;
 
        memset(&pkt, 0, size);
-       memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
-       memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
-       pkt.type = cpu_to_be16(ETH_P_LOOP);
+       ether_addr_copy(pkt.mac_dst, mac_addr);
+       ether_addr_copy(pkt.mac_src, mac_addr);
+       pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
 
        skb = dev_alloc_skb(size);
        if (!skb)
@@ -1097,7 +1086,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
 {
        u8 tmp_mac_addr[ETH_ALEN];
 
-       memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
        alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
        alb_set_slave_mac_addr(slave2, tmp_mac_addr);
 
@@ -1254,9 +1243,9 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        if (free_mac_slave) {
                alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
 
-               pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
-                          bond->dev->name, slave->dev->name,
-                          free_mac_slave->dev->name);
+               pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+                       bond->dev->name, slave->dev->name,
+                       free_mac_slave->dev->name);
 
        } else if (has_bond_addr) {
                pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
@@ -1294,12 +1283,12 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
 
        bond_for_each_slave(bond, slave, iter) {
                /* save net_device's current hw address */
-               memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
+               ether_addr_copy(tmp_addr, slave->dev->dev_addr);
 
                res = dev_set_mac_address(slave->dev, addr);
 
                /* restore net_device's hw address */
-               memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+               ether_addr_copy(slave->dev->dev_addr, tmp_addr);
 
                if (res)
                        goto unwind;
@@ -1315,9 +1304,9 @@ unwind:
        bond_for_each_slave(bond, rollback_slave, iter) {
                if (rollback_slave == slave)
                        break;
-               memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+               ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
                dev_set_mac_address(rollback_slave->dev, &sa);
-               memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+               ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
        }
 
        return res;
@@ -1330,9 +1319,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
        int res;
 
        res = tlb_initialize(bond);
-       if (res) {
+       if (res)
                return res;
-       }
 
        if (rlb_enabled) {
                bond->alb_info.rlb_enabled = 1;
@@ -1355,9 +1343,8 @@ void bond_alb_deinitialize(struct bonding *bond)
 
        tlb_deinitialize(bond);
 
-       if (bond_info->rlb_enabled) {
+       if (bond_info->rlb_enabled)
                rlb_deinitialize(bond);
-       }
 }
 
 int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
@@ -1436,14 +1423,13 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                        break;
                }
 
-               hash_start = (char*)eth_data->h_dest;
+               hash_start = (char *)eth_data->h_dest;
                hash_size = ETH_ALEN;
                break;
        case ETH_P_ARP:
                do_tx_balance = 0;
-               if (bond_info->rlb_enabled) {
+               if (bond_info->rlb_enabled)
                        tx_slave = rlb_arp_xmit(skb, bond);
-               }
                break;
        default:
                do_tx_balance = 0;
@@ -1463,23 +1449,22 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 
        if (tx_slave && SLAVE_IS_OK(tx_slave)) {
                if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
-                       memcpy(eth_data->h_source,
-                              tx_slave->dev->dev_addr,
-                              ETH_ALEN);
+                       ether_addr_copy(eth_data->h_source,
+                                       tx_slave->dev->dev_addr);
                }
 
                bond_dev_queue_xmit(bond, skb, tx_slave->dev);
                goto out;
-       } else {
-               if (tx_slave) {
-                       _lock_tx_hashtbl(bond);
-                       __tlb_clear_slave(bond, tx_slave, 0);
-                       _unlock_tx_hashtbl(bond);
-               }
+       }
+
+       if (tx_slave) {
+               _lock_tx_hashtbl(bond);
+               __tlb_clear_slave(bond, tx_slave, 0);
+               _unlock_tx_hashtbl(bond);
        }
 
        /* no suitable interface, frame not sent */
-       kfree_skb(skb);
+       dev_kfree_skb_any(skb);
 out:
        return NETDEV_TX_OK;
 }
@@ -1577,11 +1562,10 @@ void bond_alb_monitor(struct work_struct *work)
                                --bond_info->rlb_update_delay_counter;
                        } else {
                                rlb_update_rx_clients(bond);
-                               if (bond_info->rlb_update_retry_counter) {
+                               if (bond_info->rlb_update_retry_counter)
                                        --bond_info->rlb_update_retry_counter;
-                               } else {
+                               else
                                        bond_info->rx_ntt = 0;
-                               }
                        }
                }
        }
@@ -1598,23 +1582,20 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
        int res;
 
        res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
-       if (res) {
+       if (res)
                return res;
-       }
 
        res = alb_handle_addr_collision_on_attach(bond, slave);
-       if (res) {
+       if (res)
                return res;
-       }
 
        tlb_init_slave(slave);
 
        /* order a rebalance ASAP */
        bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
 
-       if (bond->alb_info.rlb_enabled) {
+       if (bond->alb_info.rlb_enabled)
                bond->alb_info.rlb_rebalance = 1;
-       }
 
        return 0;
 }
@@ -1645,9 +1626,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
 
        if (link == BOND_LINK_DOWN) {
                tlb_clear_slave(bond, slave, 0);
-               if (bond->alb_info.rlb_enabled) {
+               if (bond->alb_info.rlb_enabled)
                        rlb_clear_slave(bond, slave);
-               }
        } else if (link == BOND_LINK_UP) {
                /* order a rebalance ASAP */
                bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
@@ -1723,14 +1703,14 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
                struct sockaddr sa;
                u8 tmp_addr[ETH_ALEN];
 
-               memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+               ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
 
                memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
                sa.sa_family = bond->dev->type;
                /* we don't care if it can't change its mac, best effort */
                dev_set_mac_address(new_slave->dev, &sa);
 
-               memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+               ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
        }
 
        /* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1759,14 +1739,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
        struct slave *swap_slave;
        int res;
 
-       if (!is_valid_ether_addr(sa->sa_data)) {
+       if (!is_valid_ether_addr(sa->sa_data))
                return -EADDRNOTAVAIL;
-       }
 
        res = alb_set_mac_address(bond, addr);
-       if (res) {
+       if (res)
                return res;
-       }
 
        memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
 
@@ -1774,9 +1752,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
         * Otherwise we'll need to pass the new address to it and handle
         * duplications.
         */
-       if (!bond->curr_active_slave) {
+       if (!bond->curr_active_slave)
                return 0;
-       }
 
        swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
 
@@ -1800,8 +1777,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
 
 void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
 {
-       if (bond->alb_info.rlb_enabled) {
+       if (bond->alb_info.rlb_enabled)
                rlb_clear_vlan(bond, vlan_id);
-       }
 }
 
index 5fc4c2351478b21f98ba8ed2d2c329275e30ed39..2d3f7fa541ffe755fc1bf5f9e51aeaa464b4e032 100644 (file)
@@ -69,7 +69,7 @@ void bond_debug_register(struct bonding *bond)
                debugfs_create_dir(bond->dev->name, bonding_debug_root);
 
        if (!bond->debug_dir) {
-               pr_warning("%s: Warning: failed to register to debugfs\n",
+               pr_warn("%s: Warning: failed to register to debugfs\n",
                        bond->dev->name);
                return;
        }
@@ -98,9 +98,8 @@ void bond_debug_reregister(struct bonding *bond)
        if (d) {
                bond->debug_dir = d;
        } else {
-               pr_warning("%s: Warning: failed to reregister, "
-                               "so just unregister old one\n",
-                               bond->dev->name);
+               pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
+                       bond->dev->name);
                bond_debug_unregister(bond);
        }
 }
@@ -110,8 +109,7 @@ void bond_create_debugfs(void)
        bonding_debug_root = debugfs_create_dir("bonding", NULL);
 
        if (!bonding_debug_root) {
-               pr_warning("Warning: Cannot create bonding directory"
-                               " in debugfs\n");
+               pr_warn("Warning: Cannot create bonding directory in debugfs\n");
        }
 }
 
index e5628fc725c3fc3885b7deac79074caa41edbae4..95a6ca7d9e51950078d5c5f6a2e7281ab18621e9 100644 (file)
@@ -673,12 +673,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
                write_unlock_bh(&bond->curr_slave_lock);
 
                if (old_active) {
-                       memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
-                       memcpy(saddr.sa_data, old_active->dev->dev_addr,
-                              ETH_ALEN);
+                       ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
+                       ether_addr_copy(saddr.sa_data,
+                                       old_active->dev->dev_addr);
                        saddr.sa_family = new_active->dev->type;
                } else {
-                       memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
+                       ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
                        saddr.sa_family = bond->dev->type;
                }
 
@@ -692,7 +692,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
                if (!old_active)
                        goto out;
 
-               memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
+               ether_addr_copy(saddr.sa_data, tmp_mac);
                saddr.sa_family = old_active->dev->type;
 
                rv = dev_set_mac_address(old_active->dev, &saddr);
@@ -798,11 +798,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                return;
 
        if (new_active) {
-               new_active->jiffies = jiffies;
+               new_active->last_link_up = jiffies;
 
                if (new_active->link == BOND_LINK_BACK) {
                        if (USES_PRIMARY(bond->params.mode)) {
-                               pr_info("%s: making interface %s the new active one %d ms earlier.\n",
+                               pr_info("%s: making interface %s the new active one %d ms earlier\n",
                                        bond->dev->name, new_active->dev->name,
                                        (bond->params.updelay - new_active->delay) * bond->params.miimon);
                        }
@@ -817,7 +817,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                                bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
                } else {
                        if (USES_PRIMARY(bond->params.mode)) {
-                               pr_info("%s: making interface %s the new active one.\n",
+                               pr_info("%s: making interface %s the new active one\n",
                                        bond->dev->name, new_active->dev->name);
                        }
                }
@@ -910,7 +910,7 @@ void bond_select_active_slave(struct bonding *bond)
                        pr_info("%s: first active interface up!\n",
                                bond->dev->name);
                } else {
-                       pr_info("%s: now running without any active interface !\n",
+                       pr_info("%s: now running without any active interface!\n",
                                bond->dev->name);
                }
        }
@@ -922,12 +922,12 @@ static inline int slave_enable_netpoll(struct slave *slave)
        struct netpoll *np;
        int err = 0;
 
-       np = kzalloc(sizeof(*np), GFP_ATOMIC);
+       np = kzalloc(sizeof(*np), GFP_KERNEL);
        err = -ENOMEM;
        if (!np)
                goto out;
 
-       err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
+       err = __netpoll_setup(np, slave->dev);
        if (err) {
                kfree(np);
                goto out;
@@ -946,14 +946,6 @@ static inline void slave_disable_netpoll(struct slave *slave)
        slave->np = NULL;
        __netpoll_free_async(np);
 }
-static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
-{
-       if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
-               return false;
-       if (!slave_dev->netdev_ops->ndo_poll_controller)
-               return false;
-       return true;
-}
 
 static void bond_poll_controller(struct net_device *bond_dev)
 {
@@ -970,7 +962,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
                        slave_disable_netpoll(slave);
 }
 
-static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
 {
        struct bonding *bond = netdev_priv(dev);
        struct list_head *iter;
@@ -1119,9 +1111,6 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
        slave = bond_slave_get_rcu(skb->dev);
        bond = slave->bond;
 
-       if (bond->params.arp_interval)
-               slave->dev->last_rx = jiffies;
-
        recv_probe = ACCESS_ONCE(bond->recv_probe);
        if (recv_probe) {
                ret = recv_probe(skb, bond, slave);
@@ -1146,7 +1135,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                        kfree_skb(skb);
                        return RX_HANDLER_CONSUMED;
                }
-               memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
+               ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
        }
 
        return ret;
@@ -1187,13 +1176,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        if (!bond->params.use_carrier &&
            slave_dev->ethtool_ops->get_link == NULL &&
            slave_ops->ndo_do_ioctl == NULL) {
-               pr_warning("%s: Warning: no link monitoring support for %s\n",
-                          bond_dev->name, slave_dev->name);
+               pr_warn("%s: Warning: no link monitoring support for %s\n",
+                       bond_dev->name, slave_dev->name);
        }
 
        /* already enslaved */
        if (slave_dev->flags & IFF_SLAVE) {
-               pr_debug("Error, Device was already enslaved\n");
+               pr_debug("Error: Device was already enslaved\n");
                return -EBUSY;
        }
 
@@ -1211,9 +1200,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                               bond_dev->name, slave_dev->name, bond_dev->name);
                        return -EPERM;
                } else {
-                       pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
-                                  bond_dev->name, slave_dev->name,
-                                  slave_dev->name, bond_dev->name);
+                       pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+                               bond_dev->name, slave_dev->name,
+                               slave_dev->name, bond_dev->name);
                }
        } else {
                pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
@@ -1226,7 +1215,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * enslaving it; the old ifenslave will not.
         */
        if ((slave_dev->flags & IFF_UP)) {
-               pr_err("%s is up. This may be due to an out of date ifenslave.\n",
+               pr_err("%s is up - this may be due to an out of date ifenslave\n",
                       slave_dev->name);
                res = -EPERM;
                goto err_undo_flags;
@@ -1270,24 +1259,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                                 bond_dev);
                }
        } else if (bond_dev->type != slave_dev->type) {
-               pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
-                      slave_dev->name,
-                      slave_dev->type, bond_dev->type);
+               pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
+                      slave_dev->name, slave_dev->type, bond_dev->type);
                res = -EINVAL;
                goto err_undo_flags;
        }
 
        if (slave_ops->ndo_set_mac_address == NULL) {
                if (!bond_has_slaves(bond)) {
-                       pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+                       pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
                                bond_dev->name);
                        if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
                                bond->params.fail_over_mac = BOND_FOM_ACTIVE;
-                               pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+                               pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
                                        bond_dev->name);
                        }
                } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
-                       pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
+                       pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
                               bond_dev->name);
                        res = -EOPNOTSUPP;
                        goto err_undo_flags;
@@ -1326,7 +1314,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * that need it, and for restoring it upon release, and then
         * set it to the master's address
         */
-       memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
 
        if (!bond->params.fail_over_mac ||
            bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
@@ -1410,10 +1398,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        bond_update_speed_duplex(new_slave);
 
-       new_slave->last_arp_rx = jiffies -
+       new_slave->last_rx = jiffies -
                (msecs_to_jiffies(bond->params.arp_interval) + 1);
        for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
-               new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
+               new_slave->target_last_arp_rx[i] = new_slave->last_rx;
 
        if (bond->params.miimon && !bond->params.use_carrier) {
                link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1428,12 +1416,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                         * supported); thus, we don't need to change
                         * the messages for netif_carrier.
                         */
-                       pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
-                              bond_dev->name, slave_dev->name);
+                       pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
+                               bond_dev->name, slave_dev->name);
                } else if (link_reporting == -1) {
                        /* unable get link status using mii/ethtool */
-                       pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
-                                  bond_dev->name, slave_dev->name);
+                       pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
+                               bond_dev->name, slave_dev->name);
                }
        }
 
@@ -1457,10 +1445,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        if (new_slave->link != BOND_LINK_DOWN)
-               new_slave->jiffies = jiffies;
+               new_slave->last_link_up = jiffies;
        pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
-               new_slave->link == BOND_LINK_DOWN ? "DOWN" :
-                       (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+                new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+                (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
 
        if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
                /* if there is a primary slave, remember it */
@@ -1520,9 +1508,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        slave_dev->npinfo = bond->dev->npinfo;
        if (slave_dev->npinfo) {
                if (slave_enable_netpoll(new_slave)) {
-                       pr_info("Error, %s: master_dev is using netpoll, "
-                                "but new slave device does not support netpoll.\n",
-                                bond_dev->name);
+                       pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
+                               bond_dev->name);
                        res = -EBUSY;
                        goto err_detach;
                }
@@ -1560,10 +1547,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                unblock_netpoll_tx();
        }
 
-       pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
+       pr_info("%s: Enslaving %s as %s interface with %s link\n",
                bond_dev->name, slave_dev->name,
-               bond_is_active_slave(new_slave) ? "n active" : " backup",
-               new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+               bond_is_active_slave(new_slave) ? "an active" : "a backup",
+               new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
 
        /* enslave is successful */
        return 0;
@@ -1603,7 +1590,7 @@ err_restore_mac:
                 * MAC if this slave's MAC is in use by the bond, or at
                 * least print a warning.
                 */
-               memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
+               ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
                addr.sa_family = slave_dev->type;
                dev_set_mac_address(slave_dev, &addr);
        }
@@ -1648,7 +1635,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* slave is not a slave or master is not master of this slave */
        if (!(slave_dev->flags & IFF_SLAVE) ||
            !netdev_has_upper_dev(slave_dev, bond_dev)) {
-               pr_err("%s: Error: cannot release %s.\n",
+               pr_err("%s: Error: cannot release %s\n",
                       bond_dev->name, slave_dev->name);
                return -EINVAL;
        }
@@ -1679,7 +1666,7 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        write_unlock_bh(&bond->lock);
 
-       pr_info("%s: releasing %s interface %s\n",
+       pr_info("%s: Releasing %s interface %s\n",
                bond_dev->name,
                bond_is_active_slave(slave) ? "active" : "backup",
                slave_dev->name);
@@ -1692,10 +1679,10 @@ static int __bond_release_one(struct net_device *bond_dev,
                     bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
                if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond_has_slaves(bond))
-                       pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
-                                  bond_dev->name, slave_dev->name,
-                                  slave->perm_hwaddr,
-                                  bond_dev->name, slave_dev->name);
+                       pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
+                               bond_dev->name, slave_dev->name,
+                               slave->perm_hwaddr,
+                               bond_dev->name, slave_dev->name);
        }
 
        if (bond->primary_slave == slave)
@@ -1736,10 +1723,10 @@ static int __bond_release_one(struct net_device *bond_dev,
                eth_hw_addr_random(bond_dev);
 
                if (vlan_uses_dev(bond_dev)) {
-                       pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
-                                  bond_dev->name, bond_dev->name);
-                       pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
-                                  bond_dev->name);
+                       pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
+                               bond_dev->name, bond_dev->name);
+                       pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
+                               bond_dev->name);
                }
        }
 
@@ -1755,7 +1742,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        bond_compute_features(bond);
        if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
            (old_features & NETIF_F_VLAN_CHALLENGED))
-               pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+               pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
                        bond_dev->name, slave_dev->name, bond_dev->name);
 
        /* must do this from outside any spinlocks */
@@ -1790,7 +1777,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
            bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
                /* restore original ("permanent") mac address */
-               memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+               ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
                addr.sa_family = slave_dev->type;
                dev_set_mac_address(slave_dev, &addr);
        }
@@ -1823,7 +1810,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        ret = bond_release(bond_dev, slave_dev);
        if (ret == 0 && !bond_has_slaves(bond)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-               pr_info("%s: destroying bond %s.\n",
+               pr_info("%s: Destroying bond %s\n",
                        bond_dev->name, bond_dev->name);
                unregister_netdevice(bond_dev);
        }
@@ -1837,9 +1824,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
        info->bond_mode = bond->params.mode;
        info->miimon = bond->params.miimon;
 
-       read_lock(&bond->lock);
        info->num_slaves = bond->slave_cnt;
-       read_unlock(&bond->lock);
 
        return 0;
 }
@@ -1851,7 +1836,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
        int i = 0, res = -ENODEV;
        struct slave *slave;
 
-       read_lock(&bond->lock);
        bond_for_each_slave(bond, slave, iter) {
                if (i++ == (int)info->slave_id) {
                        res = 0;
@@ -1862,7 +1846,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
                        break;
                }
        }
-       read_unlock(&bond->lock);
 
        return res;
 }
@@ -1892,7 +1875,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        slave->link = BOND_LINK_FAIL;
                        slave->delay = bond->params.downdelay;
                        if (slave->delay) {
-                               pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
+                               pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
                                        bond->dev->name,
                                        (bond->params.mode ==
                                         BOND_MODE_ACTIVEBACKUP) ?
@@ -1908,8 +1891,8 @@ static int bond_miimon_inspect(struct bonding *bond)
                                 * recovered before downdelay expired
                                 */
                                slave->link = BOND_LINK_UP;
-                               slave->jiffies = jiffies;
-                               pr_info("%s: link status up again after %d ms for interface %s.\n",
+                               slave->last_link_up = jiffies;
+                               pr_info("%s: link status up again after %d ms for interface %s\n",
                                        bond->dev->name,
                                        (bond->params.downdelay - slave->delay) *
                                        bond->params.miimon,
@@ -1934,7 +1917,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                        slave->delay = bond->params.updelay;
 
                        if (slave->delay) {
-                               pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
+                               pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
                                        bond->dev->name, slave->dev->name,
                                        ignore_updelay ? 0 :
                                        bond->params.updelay *
@@ -1944,7 +1927,7 @@ static int bond_miimon_inspect(struct bonding *bond)
                case BOND_LINK_BACK:
                        if (!link_state) {
                                slave->link = BOND_LINK_DOWN;
-                               pr_info("%s: link status down again after %d ms for interface %s.\n",
+                               pr_info("%s: link status down again after %d ms for interface %s\n",
                                        bond->dev->name,
                                        (bond->params.updelay - slave->delay) *
                                        bond->params.miimon,
@@ -1983,7 +1966,7 @@ static void bond_miimon_commit(struct bonding *bond)
 
                case BOND_LINK_UP:
                        slave->link = BOND_LINK_UP;
-                       slave->jiffies = jiffies;
+                       slave->last_link_up = jiffies;
 
                        if (bond->params.mode == BOND_MODE_8023AD) {
                                /* prevent it from being the active one */
@@ -1996,7 +1979,7 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_set_backup_slave(slave);
                        }
 
-                       pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+                       pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
                                bond->dev->name, slave->dev->name,
                                slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
                                slave->duplex ? "full" : "half");
@@ -2141,24 +2124,40 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
  * switches in VLAN mode (especially if ports are configured as
  * "native" to a VLAN) might not pass non-tagged frames.
  */
-static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id)
+static void bond_arp_send(struct net_device *slave_dev, int arp_op,
+                         __be32 dest_ip, __be32 src_ip,
+                         struct bond_vlan_tag *inner,
+                         struct bond_vlan_tag *outer)
 {
        struct sk_buff *skb;
 
-       pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
-                slave_dev->name, &dest_ip, &src_ip, vlan_id);
+       pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
+                arp_op, slave_dev->name, &dest_ip, &src_ip);
 
        skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
                         NULL, slave_dev->dev_addr, NULL);
 
        if (!skb) {
-               pr_err("ARP packet allocation failed\n");
+               net_err_ratelimited("ARP packet allocation failed\n");
                return;
        }
-       if (vlan_id) {
-               skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
+       if (outer->vlan_id) {
+               if (inner->vlan_id) {
+                       pr_debug("inner tag: proto %X vid %X\n",
+                                ntohs(inner->vlan_proto), inner->vlan_id);
+                       skb = __vlan_put_tag(skb, inner->vlan_proto,
+                                            inner->vlan_id);
+                       if (!skb) {
+                               net_err_ratelimited("failed to insert inner VLAN tag\n");
+                               return;
+                       }
+               }
+
+               pr_debug("outer reg: proto %X vid %X\n",
+                        ntohs(outer->vlan_proto), outer->vlan_id);
+               skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id);
                if (!skb) {
-                       pr_err("failed to insert VLAN tag\n");
+                       net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
                }
        }
@@ -2171,23 +2170,32 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
        struct net_device *upper, *vlan_upper;
        struct list_head *iter, *vlan_iter;
        struct rtable *rt;
+       struct bond_vlan_tag inner, outer;
        __be32 *targets = bond->params.arp_targets, addr;
-       int i, vlan_id;
+       int i;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
                pr_debug("basa: target %pI4\n", &targets[i]);
+               inner.vlan_proto = 0;
+               inner.vlan_id = 0;
+               outer.vlan_proto = 0;
+               outer.vlan_id = 0;
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
                                     RTO_ONLINK, 0);
                if (IS_ERR(rt)) {
-                       pr_debug("%s: no route to arp_ip_target %pI4\n",
-                                bond->dev->name, &targets[i]);
+                       /* there's no route to target - try to send arp
+                        * probe to generate any traffic (arp_validate=0)
+                        */
+                       if (bond->params.arp_validate)
+                               net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
+                                                    bond->dev->name,
+                                                    &targets[i]);
+                       bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer);
                        continue;
                }
 
-               vlan_id = 0;
-
                /* bond device itself */
                if (rt->dst.dev == bond->dev)
                        goto found;
@@ -2197,17 +2205,30 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 * found we verify its upper dev list, searching for the
                 * rt->dst.dev. If found we save the tag of the vlan and
                 * proceed to send the packet.
-                *
-                * TODO: QinQ?
                 */
                netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
                                                  vlan_iter) {
                        if (!is_vlan_dev(vlan_upper))
                                continue;
+
+                       if (vlan_upper == rt->dst.dev) {
+                               outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
+                               outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
+                               rcu_read_unlock();
+                               goto found;
+                       }
                        netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
                                                          iter) {
                                if (upper == rt->dst.dev) {
-                                       vlan_id = vlan_dev_vlan_id(vlan_upper);
+                                       /* If the upper dev is a vlan dev too,
+                                        *  set the vlan tag to inner tag.
+                                        */
+                                       if (is_vlan_dev(upper)) {
+                                               inner.vlan_proto = vlan_dev_vlan_proto(upper);
+                                               inner.vlan_id = vlan_dev_vlan_id(upper);
+                                       }
+                                       outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper);
+                                       outer.vlan_id = vlan_dev_vlan_id(vlan_upper);
                                        rcu_read_unlock();
                                        goto found;
                                }
@@ -2220,10 +2241,6 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 */
                netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                        if (upper == rt->dst.dev) {
-                               /* if it's a vlan - get its VID */
-                               if (is_vlan_dev(upper))
-                                       vlan_id = vlan_dev_vlan_id(upper);
-
                                rcu_read_unlock();
                                goto found;
                        }
@@ -2242,7 +2259,7 @@ found:
                addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
-                             addr, vlan_id);
+                             addr, &inner, &outer);
        }
 }
 
@@ -2260,7 +2277,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
                pr_debug("bva: sip %pI4 not found in targets\n", &sip);
                return;
        }
-       slave->last_arp_rx = jiffies;
+       slave->last_rx = jiffies;
        slave->target_last_arp_rx[i] = jiffies;
 }
 
@@ -2268,17 +2285,19 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
                 struct slave *slave)
 {
        struct arphdr *arp = (struct arphdr *)skb->data;
+       struct slave *curr_active_slave;
        unsigned char *arp_ptr;
        __be32 sip, tip;
-       int alen;
+       int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
 
-       if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
+       if (!slave_do_arp_validate(bond, slave)) {
+               if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
+                   !slave_do_arp_validate_only(bond, slave))
+                       slave->last_rx = jiffies;
                return RX_HANDLER_ANOTHER;
-
-       read_lock(&bond->lock);
-
-       if (!slave_do_arp_validate(bond, slave))
-               goto out_unlock;
+       } else if (!is_arp) {
+               return RX_HANDLER_ANOTHER;
+       }
 
        alen = arp_hdr_len(bond->dev);
 
@@ -2312,6 +2331,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
                 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
                 &sip, &tip);
 
+       curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
        /*
         * Backup slaves won't see the ARP reply, but do come through
         * here for each ARP probe (so we swap the sip/tip to validate
@@ -2325,15 +2346,15 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
         * is done to avoid endless looping when we can't reach the
         * arp_ip_target and fool ourselves with our own arp requests.
         */
+
        if (bond_is_active_slave(slave))
                bond_validate_arp(bond, slave, sip, tip);
-       else if (bond->curr_active_slave &&
-                time_after(slave_last_rx(bond, bond->curr_active_slave),
-                           bond->curr_active_slave->jiffies))
+       else if (curr_active_slave &&
+                time_after(slave_last_rx(bond, curr_active_slave),
+                           curr_active_slave->last_link_up))
                bond_validate_arp(bond, slave, tip, sip);
 
 out_unlock:
-       read_unlock(&bond->lock);
        if (arp != (struct arphdr *)skb->data)
                kfree(arp);
        return RX_HANDLER_ANOTHER;
@@ -2376,9 +2397,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
        oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
        /* see if any of the previous devices are up now (i.e. they have
         * xmt and rcv traffic). the curr_active_slave does not come into
-        * the picture unless it is null. also, slave->jiffies is not needed
-        * here because we send an arp on each slave and give a slave as
-        * long as it needs to get the tx/rx within the delta.
+        * the picture unless it is null. also, slave->last_link_up is not
+        * needed here because we send an arp on each slave and give a slave
+        * as long as it needs to get the tx/rx within the delta.
         * TODO: what about up/down delay in arp mode? it wasn't here before
         *       so it can wait
         */
@@ -2387,7 +2408,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
 
                if (slave->link != BOND_LINK_UP) {
                        if (bond_time_in_interval(bond, trans_start, 1) &&
-                           bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
+                           bond_time_in_interval(bond, slave->last_rx, 1)) {
 
                                slave->link  = BOND_LINK_UP;
                                slave_state_changed = 1;
@@ -2398,7 +2419,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                                 * is closed.
                                 */
                                if (!oldcurrent) {
-                                       pr_info("%s: link status definitely up for interface %s",
+                                       pr_info("%s: link status definitely up for interface %s\n",
                                                bond->dev->name,
                                                slave->dev->name);
                                        do_failover = 1;
@@ -2416,7 +2437,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                         * if we don't know our ip yet
                         */
                        if (!bond_time_in_interval(bond, trans_start, 2) ||
-                           !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
+                           !bond_time_in_interval(bond, slave->last_rx, 2)) {
 
                                slave->link  = BOND_LINK_DOWN;
                                slave_state_changed = 1;
@@ -2424,9 +2445,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                                if (slave->link_failure_count < UINT_MAX)
                                        slave->link_failure_count++;
 
-                               pr_info("%s: interface %s is now down.\n",
-                                       bond->dev->name,
-                                       slave->dev->name);
+                               pr_info("%s: interface %s is now down\n",
+                                       bond->dev->name, slave->dev->name);
 
                                if (slave == oldcurrent)
                                        do_failover = 1;
@@ -2505,7 +2525,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
                 * active.  This avoids bouncing, as the last receive
                 * times need a full ARP monitor cycle to be updated.
                 */
-               if (bond_time_in_interval(bond, slave->jiffies, 2))
+               if (bond_time_in_interval(bond, slave->last_link_up, 2))
                        continue;
 
                /*
@@ -2576,7 +2596,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
                                        bond->current_arp_slave = NULL;
                                }
 
-                               pr_info("%s: link status definitely up for interface %s.\n",
+                               pr_info("%s: link status definitely up for interface %s\n",
                                        bond->dev->name, slave->dev->name);
 
                                if (!bond->curr_active_slave ||
@@ -2682,7 +2702,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                        bond_set_slave_inactive_flags(slave,
                                                      BOND_SLAVE_NOTIFY_LATER);
 
-                       pr_info("%s: backup interface %s is now down.\n",
+                       pr_info("%s: backup interface %s is now down\n",
                                bond->dev->name, slave->dev->name);
                }
                if (slave == curr_arp_slave)
@@ -2698,7 +2718,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        new_slave->link = BOND_LINK_BACK;
        bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
        bond_arp_send_all(bond, new_slave);
-       new_slave->jiffies = jiffies;
+       new_slave->last_link_up = jiffies;
        rcu_assign_pointer(bond->current_arp_slave, new_slave);
 
 check_state:
@@ -2879,9 +2899,9 @@ static int bond_slave_netdev_event(unsigned long event,
                        break;
                }
 
-               pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
-                       bond->dev->name, bond->primary_slave ? slave_dev->name :
-                                                              "none");
+               pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
+                       bond->dev->name,
+                       bond->primary_slave ? slave_dev->name : "none");
 
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
@@ -2917,8 +2937,7 @@ static int bond_netdev_event(struct notifier_block *this,
        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
 
        pr_debug("event_dev: %s, event: %lx\n",
-                event_dev ? event_dev->name : "None",
-                event);
+                event_dev ? event_dev->name : "None", event);
 
        if (!(event_dev->priv_flags & IFF_BONDING))
                return NOTIFY_DONE;
@@ -2967,7 +2986,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
        fk->ports = 0;
        noff = skb_network_offset(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
-               if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+               if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
                        return false;
                iph = ip_hdr(skb);
                fk->src = iph->saddr;
@@ -2976,7 +2995,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
                if (!ip_is_fragment(iph))
                        proto = iph->protocol;
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
-               if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+               if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
                        return false;
                iph6 = ipv6_hdr(skb);
                fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
@@ -3087,8 +3106,7 @@ static int bond_open(struct net_device *bond_dev)
 
        if (bond->params.arp_interval) {  /* arp interval, in milliseconds. */
                queue_delayed_work(bond->wq, &bond->arp_work, 0);
-               if (bond->params.arp_validate)
-                       bond->recv_probe = bond_arp_rcv;
+               bond->recv_probe = bond_arp_rcv;
        }
 
        if (bond->params.mode == BOND_MODE_8023AD) {
@@ -3375,8 +3393,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
        struct list_head *iter;
        int res = 0;
 
-       pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
-                (bond_dev ? bond_dev->name : "None"), new_mtu);
+       pr_debug("bond=%p, name=%s, new_mtu=%d\n",
+                bond, bond_dev ? bond_dev->name : "None", new_mtu);
 
        /* Can't hold bond->lock with bh disabled here since
         * some base drivers panic. On the other hand we can't
@@ -3395,8 +3413,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 
        bond_for_each_slave(bond, slave, iter) {
                pr_debug("s %p c_m %p\n",
-                        slave,
-                        slave->dev->netdev_ops->ndo_change_mtu);
+                        slave, slave->dev->netdev_ops->ndo_change_mtu);
 
                res = dev_set_mtu(slave->dev, new_mtu);
 
@@ -3484,15 +3501,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         */
 
        bond_for_each_slave(bond, slave, iter) {
-               const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
                pr_debug("slave %p %s\n", slave, slave->dev->name);
-
-               if (slave_ops->ndo_set_mac_address == NULL) {
-                       res = -EOPNOTSUPP;
-                       pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
-                       goto unwind;
-               }
-
                res = dev_set_mac_address(slave->dev, addr);
                if (res) {
                        /* TODO: consider downing the slave
@@ -3568,7 +3577,7 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl
                }
        }
        /* no slave that can tx has been found */
-       kfree_skb(skb);
+       dev_kfree_skb_any(skb);
 }
 
 /**
@@ -3644,7 +3653,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
        if (slave)
                bond_dev_queue_xmit(bond, skb, slave->dev);
        else
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
@@ -3676,8 +3685,8 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
                        struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 
                        if (!skb2) {
-                               pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
-                                      bond_dev->name);
+                               net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
+                                                   bond_dev->name, __func__);
                                continue;
                        }
                        /* bond_dev_queue_xmit always returns 0 */
@@ -3687,7 +3696,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
        if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
                bond_dev_queue_xmit(bond, skb, slave->dev);
        else
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
@@ -3774,7 +3783,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
                pr_err("%s: Error: Unknown bonding mode %d\n",
                       dev->name, bond->params.mode);
                WARN_ON_ONCE(1);
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 }
@@ -3788,14 +3797,14 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * If we risk deadlock from transmitting this in the
         * netpoll path, tell netpoll to queue the frame for later tx
         */
-       if (is_netpoll_tx_blocked(dev))
+       if (unlikely(is_netpoll_tx_blocked(dev)))
                return NETDEV_TX_BUSY;
 
        rcu_read_lock();
        if (bond_has_slaves(bond))
                ret = __bond_start_xmit(skb, dev);
        else
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
        rcu_read_unlock();
 
        return ret;
@@ -3958,7 +3967,7 @@ static void bond_uninit(struct net_device *bond_dev)
        /* Release the bonded slaves */
        bond_for_each_slave(bond, slave, iter)
                __bond_release_one(bond_dev, slave->dev, true);
-       pr_info("%s: released all slaves\n", bond_dev->name);
+       pr_info("%s: Released all slaves\n", bond_dev->name);
 
        list_del(&bond->bond_list);
 
@@ -3967,56 +3976,11 @@ static void bond_uninit(struct net_device *bond_dev)
 
 /*------------------------- Module initialization ---------------------------*/
 
-int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl)
-{
-       int i;
-
-       for (i = 0; tbl[i].modename; i++)
-               if (mode == tbl[i].mode)
-                       return tbl[i].mode;
-
-       return -1;
-}
-
-static int bond_parm_tbl_lookup_name(const char *modename,
-                                    const struct bond_parm_tbl *tbl)
-{
-       int i;
-
-       for (i = 0; tbl[i].modename; i++)
-               if (strcmp(modename, tbl[i].modename) == 0)
-                       return tbl[i].mode;
-
-       return -1;
-}
-
-/*
- * Convert string input module parms.  Accept either the
- * number of the mode or its string name.  A bit complicated because
- * some mode names are substrings of other names, and calls from sysfs
- * may have whitespace in the name (trailing newlines, for example).
- */
-int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
-{
-       int modeint;
-       char *p, modestr[BOND_MAX_MODENAME_LEN + 1];
-
-       for (p = (char *)buf; *p; p++)
-               if (!(isdigit(*p) || isspace(*p)))
-                       break;
-
-       if (*p && sscanf(buf, "%20s", modestr) != 0)
-               return bond_parm_tbl_lookup_name(modestr, tbl);
-       else if (sscanf(buf, "%d", &modeint) != 0)
-               return bond_parm_tbl_lookup(modeint, tbl);
-
-       return -1;
-}
-
 static int bond_check_params(struct bond_params *params)
 {
        int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
-       struct bond_opt_value newval, *valptr;
+       struct bond_opt_value newval;
+       const struct bond_opt_value *valptr;
        int arp_all_targets_value;
 
        /*
@@ -4036,7 +4000,7 @@ static int bond_check_params(struct bond_params *params)
                if ((bond_mode != BOND_MODE_XOR) &&
                    (bond_mode != BOND_MODE_8023AD)) {
                        pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
-                              bond_mode_name(bond_mode));
+                               bond_mode_name(bond_mode));
                } else {
                        bond_opt_initstr(&newval, xmit_hash_policy);
                        valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
@@ -4077,74 +4041,71 @@ static int bond_check_params(struct bond_params *params)
                }
                params->ad_select = valptr->value;
                if (bond_mode != BOND_MODE_8023AD)
-                       pr_warning("ad_select param only affects 802.3ad mode\n");
+                       pr_warn("ad_select param only affects 802.3ad mode\n");
        } else {
                params->ad_select = BOND_AD_STABLE;
        }
 
        if (max_bonds < 0) {
-               pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
-                          max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+               pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
+                       max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
                max_bonds = BOND_DEFAULT_MAX_BONDS;
        }
 
        if (miimon < 0) {
-               pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
-                          miimon, INT_MAX);
+               pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+                       miimon, INT_MAX);
                miimon = 0;
        }
 
        if (updelay < 0) {
-               pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
-                          updelay, INT_MAX);
+               pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+                       updelay, INT_MAX);
                updelay = 0;
        }
 
        if (downdelay < 0) {
-               pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
-                          downdelay, INT_MAX);
+               pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+                       downdelay, INT_MAX);
                downdelay = 0;
        }
 
        if ((use_carrier != 0) && (use_carrier != 1)) {
-               pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
-                          use_carrier);
+               pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
+                       use_carrier);
                use_carrier = 1;
        }
 
        if (num_peer_notif < 0 || num_peer_notif > 255) {
-               pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
-                          num_peer_notif);
+               pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
+                       num_peer_notif);
                num_peer_notif = 1;
        }
 
        /* reset values for 802.3ad/TLB/ALB */
        if (BOND_NO_USES_ARP(bond_mode)) {
                if (!miimon) {
-                       pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
-                       pr_warning("Forcing miimon to 100msec\n");
+                       pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
+                       pr_warn("Forcing miimon to 100msec\n");
                        miimon = BOND_DEFAULT_MIIMON;
                }
        }
 
        if (tx_queues < 1 || tx_queues > 255) {
-               pr_warning("Warning: tx_queues (%d) should be between "
-                          "1 and 255, resetting to %d\n",
-                          tx_queues, BOND_DEFAULT_TX_QUEUES);
+               pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
+                       tx_queues, BOND_DEFAULT_TX_QUEUES);
                tx_queues = BOND_DEFAULT_TX_QUEUES;
        }
 
        if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
-               pr_warning("Warning: all_slaves_active module parameter (%d), "
-                          "not of valid value (0/1), so it was set to "
-                          "0\n", all_slaves_active);
+               pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
+                       all_slaves_active);
                all_slaves_active = 0;
        }
 
        if (resend_igmp < 0 || resend_igmp > 255) {
-               pr_warning("Warning: resend_igmp (%d) should be between "
-                          "0 and 255, resetting to %d\n",
-                          resend_igmp, BOND_DEFAULT_RESEND_IGMP);
+               pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
+                       resend_igmp, BOND_DEFAULT_RESEND_IGMP);
                resend_igmp = BOND_DEFAULT_RESEND_IGMP;
        }
 
@@ -4165,37 +4126,36 @@ static int bond_check_params(struct bond_params *params)
                        /* just warn the user the up/down delay will have
                         * no effect since miimon is zero...
                         */
-                       pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
-                                  updelay, downdelay);
+                       pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
+                               updelay, downdelay);
                }
        } else {
                /* don't allow arp monitoring */
                if (arp_interval) {
-                       pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
-                                  miimon, arp_interval);
+                       pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
+                               miimon, arp_interval);
                        arp_interval = 0;
                }
 
                if ((updelay % miimon) != 0) {
-                       pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
-                                  updelay, miimon,
-                                  (updelay / miimon) * miimon);
+                       pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+                               updelay, miimon, (updelay / miimon) * miimon);
                }
 
                updelay /= miimon;
 
                if ((downdelay % miimon) != 0) {
-                       pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
-                                  downdelay, miimon,
-                                  (downdelay / miimon) * miimon);
+                       pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
+                               downdelay, miimon,
+                               (downdelay / miimon) * miimon);
                }
 
                downdelay /= miimon;
        }
 
        if (arp_interval < 0) {
-               pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
-                          arp_interval, INT_MAX);
+               pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+                       arp_interval, INT_MAX);
                arp_interval = 0;
        }
 
@@ -4206,30 +4166,26 @@ static int bond_check_params(struct bond_params *params)
                __be32 ip;
                if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
                    IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
-                       pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
-                                  arp_ip_target[i]);
+                       pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
+                               arp_ip_target[i]);
                        arp_interval = 0;
                } else {
                        if (bond_get_targets_ip(arp_target, ip) == -1)
                                arp_target[arp_ip_count++] = ip;
                        else
-                               pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
-                                          &ip);
+                               pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
+                                       &ip);
                }
        }
 
        if (arp_interval && !arp_ip_count) {
                /* don't allow arping if no arp_ip_target given... */
-               pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
-                          arp_interval);
+               pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
+                       arp_interval);
                arp_interval = 0;
        }
 
        if (arp_validate) {
-               if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
-                       pr_err("arp_validate only supported in active-backup mode\n");
-                       return -EINVAL;
-               }
                if (!arp_interval) {
                        pr_err("arp_validate requires arp_interval\n");
                        return -EINVAL;
@@ -4271,23 +4227,23 @@ static int bond_check_params(struct bond_params *params)
                        arp_interval, valptr->string, arp_ip_count);
 
                for (i = 0; i < arp_ip_count; i++)
-                       pr_info(" %s", arp_ip_target[i]);
+                       pr_cont(" %s", arp_ip_target[i]);
 
-               pr_info("\n");
+               pr_cont("\n");
 
        } else if (max_bonds) {
                /* miimon and arp_interval not set, we need one so things
                 * work as expected, see bonding.txt for details
                 */
-               pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+               pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
        }
 
        if (primary && !USES_PRIMARY(bond_mode)) {
                /* currently, using a primary only makes sense
                 * in active backup, TLB or ALB modes
                 */
-               pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
-                          primary, bond_mode_name(bond_mode));
+               pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
+                       primary, bond_mode_name(bond_mode));
                primary = NULL;
        }
 
@@ -4316,14 +4272,14 @@ static int bond_check_params(struct bond_params *params)
                }
                fail_over_mac_value = valptr->value;
                if (bond_mode != BOND_MODE_ACTIVEBACKUP)
-                       pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
+                       pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
        } else {
                fail_over_mac_value = BOND_FOM_NONE;
        }
 
        if (lp_interval == 0) {
-               pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
-                          INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+               pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+                       INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
                lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
        }
 
index 70651f8e8e3b9791c5feb285483e983fd52ffd75..f847e165d252fb2a4528fe396b2c4f0553e81ac3 100644 (file)
@@ -181,7 +181,7 @@ static int bond_changelink(struct net_device *bond_dev,
                int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
 
                if (arp_interval && miimon) {
-                       pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+                       pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
                               bond->dev->name);
                        return -EINVAL;
                }
@@ -199,7 +199,7 @@ static int bond_changelink(struct net_device *bond_dev,
                nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
                        __be32 target = nla_get_be32(attr);
 
-                       bond_opt_initval(&newval, target);
+                       bond_opt_initval(&newval, (__force u64)target);
                        err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
                                             &newval);
                        if (err)
@@ -207,7 +207,7 @@ static int bond_changelink(struct net_device *bond_dev,
                        i++;
                }
                if (i == 0 && bond->params.arp_interval)
-                       pr_warn("%s: removing last arp target with arp_interval on\n",
+                       pr_warn("%s: Removing last arp target with arp_interval on\n",
                                bond->dev->name);
                if (err)
                        return err;
@@ -216,7 +216,7 @@ static int bond_changelink(struct net_device *bond_dev,
                int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
 
                if (arp_validate && miimon) {
-                       pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+                       pr_err("%s: ARP validating cannot be used with MII monitoring\n",
                               bond->dev->name);
                        return -EINVAL;
                }
index 298c26509095cdd6a6306aa954fe79b1ba46622b..724e30fa20b9fa70166b5d9b25ed9029fab6db73 100644 (file)
 #include <linux/inet.h>
 #include "bonding.h"
 
-static struct bond_opt_value bond_mode_tbl[] = {
+static int bond_option_active_slave_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval);
+static int bond_option_miimon_set(struct bonding *bond,
+                                 const struct bond_opt_value *newval);
+static int bond_option_updelay_set(struct bonding *bond,
+                                  const struct bond_opt_value *newval);
+static int bond_option_downdelay_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval);
+static int bond_option_use_carrier_set(struct bonding *bond,
+                                      const struct bond_opt_value *newval);
+static int bond_option_arp_interval_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval);
+static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
+static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
+static int bond_option_arp_ip_targets_set(struct bonding *bond,
+                                         const struct bond_opt_value *newval);
+static int bond_option_arp_validate_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval);
+static int bond_option_arp_all_targets_set(struct bonding *bond,
+                                          const struct bond_opt_value *newval);
+static int bond_option_primary_set(struct bonding *bond,
+                                  const struct bond_opt_value *newval);
+static int bond_option_primary_reselect_set(struct bonding *bond,
+                                           const struct bond_opt_value *newval);
+static int bond_option_fail_over_mac_set(struct bonding *bond,
+                                        const struct bond_opt_value *newval);
+static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+                                           const struct bond_opt_value *newval);
+static int bond_option_resend_igmp_set(struct bonding *bond,
+                                      const struct bond_opt_value *newval);
+static int bond_option_num_peer_notif_set(struct bonding *bond,
+                                         const struct bond_opt_value *newval);
+static int bond_option_all_slaves_active_set(struct bonding *bond,
+                                            const struct bond_opt_value *newval);
+static int bond_option_min_links_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval);
+static int bond_option_lp_interval_set(struct bonding *bond,
+                                      const struct bond_opt_value *newval);
+static int bond_option_pps_set(struct bonding *bond,
+                              const struct bond_opt_value *newval);
+static int bond_option_lacp_rate_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval);
+static int bond_option_ad_select_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval);
+static int bond_option_queue_id_set(struct bonding *bond,
+                                   const struct bond_opt_value *newval);
+static int bond_option_mode_set(struct bonding *bond,
+                               const struct bond_opt_value *newval);
+static int bond_option_slaves_set(struct bonding *bond,
+                                 const struct bond_opt_value *newval);
+
+
+static const struct bond_opt_value bond_mode_tbl[] = {
        { "balance-rr",    BOND_MODE_ROUNDROBIN,   BOND_VALFLAG_DEFAULT},
        { "active-backup", BOND_MODE_ACTIVEBACKUP, 0},
        { "balance-xor",   BOND_MODE_XOR,          0},
@@ -31,13 +83,13 @@ static struct bond_opt_value bond_mode_tbl[] = {
        { NULL,            -1,                     0},
 };
 
-static struct bond_opt_value bond_pps_tbl[] = {
+static const struct bond_opt_value bond_pps_tbl[] = {
        { "default", 1,         BOND_VALFLAG_DEFAULT},
        { "maxval",  USHRT_MAX, BOND_VALFLAG_MAX},
        { NULL,      -1,        0},
 };
 
-static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
+static const struct bond_opt_value bond_xmit_hashtype_tbl[] = {
        { "layer2",   BOND_XMIT_POLICY_LAYER2, BOND_VALFLAG_DEFAULT},
        { "layer3+4", BOND_XMIT_POLICY_LAYER34, 0},
        { "layer2+3", BOND_XMIT_POLICY_LAYER23, 0},
@@ -46,85 +98,88 @@ static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
        { NULL,       -1,                       0},
 };
 
-static struct bond_opt_value bond_arp_validate_tbl[] = {
-       { "none",   BOND_ARP_VALIDATE_NONE,   BOND_VALFLAG_DEFAULT},
-       { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
-       { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
-       { "all",    BOND_ARP_VALIDATE_ALL,    0},
-       { NULL,     -1,                       0},
+static const struct bond_opt_value bond_arp_validate_tbl[] = {
+       { "none",               BOND_ARP_VALIDATE_NONE,         BOND_VALFLAG_DEFAULT},
+       { "active",             BOND_ARP_VALIDATE_ACTIVE,       0},
+       { "backup",             BOND_ARP_VALIDATE_BACKUP,       0},
+       { "all",                BOND_ARP_VALIDATE_ALL,          0},
+       { "filter",             BOND_ARP_FILTER,                0},
+       { "filter_active",      BOND_ARP_FILTER_ACTIVE,         0},
+       { "filter_backup",      BOND_ARP_FILTER_BACKUP,         0},
+       { NULL,                 -1,                             0},
 };
 
-static struct bond_opt_value bond_arp_all_targets_tbl[] = {
+static const struct bond_opt_value bond_arp_all_targets_tbl[] = {
        { "any", BOND_ARP_TARGETS_ANY, BOND_VALFLAG_DEFAULT},
        { "all", BOND_ARP_TARGETS_ALL, 0},
        { NULL,  -1,                   0},
 };
 
-static struct bond_opt_value bond_fail_over_mac_tbl[] = {
+static const struct bond_opt_value bond_fail_over_mac_tbl[] = {
        { "none",   BOND_FOM_NONE,   BOND_VALFLAG_DEFAULT},
        { "active", BOND_FOM_ACTIVE, 0},
        { "follow", BOND_FOM_FOLLOW, 0},
        { NULL,     -1,              0},
 };
 
-static struct bond_opt_value bond_intmax_tbl[] = {
+static const struct bond_opt_value bond_intmax_tbl[] = {
        { "off",     0,       BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
 };
 
-static struct bond_opt_value bond_lacp_rate_tbl[] = {
+static const struct bond_opt_value bond_lacp_rate_tbl[] = {
        { "slow", AD_LACP_SLOW, 0},
        { "fast", AD_LACP_FAST, 0},
        { NULL,   -1,           0},
 };
 
-static struct bond_opt_value bond_ad_select_tbl[] = {
+static const struct bond_opt_value bond_ad_select_tbl[] = {
        { "stable",    BOND_AD_STABLE,    BOND_VALFLAG_DEFAULT},
        { "bandwidth", BOND_AD_BANDWIDTH, 0},
        { "count",     BOND_AD_COUNT,     0},
        { NULL,        -1,                0},
 };
 
-static struct bond_opt_value bond_num_peer_notif_tbl[] = {
+static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
        { "off",     0,   0},
        { "maxval",  255, BOND_VALFLAG_MAX},
        { "default", 1,   BOND_VALFLAG_DEFAULT},
        { NULL,      -1,  0}
 };
 
-static struct bond_opt_value bond_primary_reselect_tbl[] = {
+static const struct bond_opt_value bond_primary_reselect_tbl[] = {
        { "always",  BOND_PRI_RESELECT_ALWAYS,  BOND_VALFLAG_DEFAULT},
        { "better",  BOND_PRI_RESELECT_BETTER,  0},
        { "failure", BOND_PRI_RESELECT_FAILURE, 0},
        { NULL,      -1},
 };
 
-static struct bond_opt_value bond_use_carrier_tbl[] = {
+static const struct bond_opt_value bond_use_carrier_tbl[] = {
        { "off", 0,  0},
        { "on",  1,  BOND_VALFLAG_DEFAULT},
        { NULL,  -1, 0}
 };
 
-static struct bond_opt_value bond_all_slaves_active_tbl[] = {
+static const struct bond_opt_value bond_all_slaves_active_tbl[] = {
        { "off", 0,  BOND_VALFLAG_DEFAULT},
        { "on",  1,  0},
        { NULL,  -1, 0}
 };
 
-static struct bond_opt_value bond_resend_igmp_tbl[] = {
+static const struct bond_opt_value bond_resend_igmp_tbl[] = {
        { "off",     0,   0},
        { "maxval",  255, BOND_VALFLAG_MAX},
        { "default", 1,   BOND_VALFLAG_DEFAULT},
        { NULL,      -1,  0}
 };
 
-static struct bond_opt_value bond_lp_interval_tbl[] = {
+static const struct bond_opt_value bond_lp_interval_tbl[] = {
        { "minval",  1,       BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
        { "maxval",  INT_MAX, BOND_VALFLAG_MAX},
        { NULL,      -1,      0},
 };
 
-static struct bond_option bond_opts[] = {
+static const struct bond_option bond_opts[] = {
        [BOND_OPT_MODE] = {
                .id = BOND_OPT_MODE,
                .name = "mode",
@@ -152,7 +207,8 @@ static struct bond_option bond_opts[] = {
                .id = BOND_OPT_ARP_VALIDATE,
                .name = "arp_validate",
                .desc = "validate src/dst of ARP probes",
-               .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+               .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+                              BIT(BOND_MODE_ALB),
                .values = bond_arp_validate_tbl,
                .set = bond_option_arp_validate_set
        },
@@ -312,9 +368,9 @@ static struct bond_option bond_opts[] = {
 };
 
 /* Searches for a value in opt's values[] table */
-struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
 {
-       struct bond_option *opt;
+       const struct bond_option *opt;
        int i;
 
        opt = bond_opt_get(option);
@@ -328,7 +384,7 @@ struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
 }
 
 /* Searches for a value in opt's values[] table which matches the flagmask */
-static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
+static const struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
                                                 u32 flagmask)
 {
        int i;
@@ -345,7 +401,7 @@ static struct bond_opt_value *bond_opt_get_flags(const struct bond_option *opt,
  */
 static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
 {
-       struct bond_opt_value *minval, *maxval;
+       const struct bond_opt_value *minval, *maxval;
 
        minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
        maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
@@ -365,11 +421,12 @@ static bool bond_opt_check_range(const struct bond_option *opt, u64 val)
  * or the struct_opt_value that matched. It also strips the new line from
  * @val->string if it's present.
  */
-struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
-                                     struct bond_opt_value *val)
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+                                           struct bond_opt_value *val)
 {
        char *p, valstr[BOND_OPT_MAX_NAMELEN + 1] = { 0, };
-       struct bond_opt_value *tbl, *ret = NULL;
+       const struct bond_opt_value *tbl;
+       const struct bond_opt_value *ret = NULL;
        bool checkval;
        int i, rv;
 
@@ -448,7 +505,7 @@ static int bond_opt_check_deps(struct bonding *bond,
 static void bond_opt_dep_print(struct bonding *bond,
                               const struct bond_option *opt)
 {
-       struct bond_opt_value *modeval;
+       const struct bond_opt_value *modeval;
        struct bond_params *params;
 
        params = &bond->params;
@@ -461,9 +518,9 @@ static void bond_opt_dep_print(struct bonding *bond,
 
 static void bond_opt_error_interpret(struct bonding *bond,
                                     const struct bond_option *opt,
-                                    int error, struct bond_opt_value *val)
+                                    int error, const struct bond_opt_value *val)
 {
-       struct bond_opt_value *minval, *maxval;
+       const struct bond_opt_value *minval, *maxval;
        char *p;
 
        switch (error) {
@@ -474,10 +531,10 @@ static void bond_opt_error_interpret(struct bonding *bond,
                                p = strchr(val->string, '\n');
                                if (p)
                                        *p = '\0';
-                               pr_err("%s: option %s: invalid value (%s).\n",
+                               pr_err("%s: option %s: invalid value (%s)\n",
                                       bond->dev->name, opt->name, val->string);
                        } else {
-                               pr_err("%s: option %s: invalid value (%llu).\n",
+                               pr_err("%s: option %s: invalid value (%llu)\n",
                                       bond->dev->name, opt->name, val->value);
                        }
                }
@@ -485,7 +542,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
                maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
                if (!maxval)
                        break;
-               pr_err("%s: option %s: allowed values %llu - %llu.\n",
+               pr_err("%s: option %s: allowed values %llu - %llu\n",
                       bond->dev->name, opt->name, minval ? minval->value : 0,
                       maxval->value);
                break;
@@ -493,11 +550,11 @@ static void bond_opt_error_interpret(struct bonding *bond,
                bond_opt_dep_print(bond, opt);
                break;
        case -ENOTEMPTY:
-               pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+               pr_err("%s: option %s: unable to set because the bond device has slaves\n",
                       bond->dev->name, opt->name);
                break;
        case -EBUSY:
-               pr_err("%s: option %s: unable to set because the bond device is up.\n",
+               pr_err("%s: option %s: unable to set because the bond device is up\n",
                       bond->dev->name, opt->name);
                break;
        default:
@@ -518,7 +575,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
 int __bond_opt_set(struct bonding *bond,
                   unsigned int option, struct bond_opt_value *val)
 {
-       struct bond_opt_value *retval = NULL;
+       const struct bond_opt_value *retval = NULL;
        const struct bond_option *opt;
        int ret = -ENOENT;
 
@@ -573,7 +630,7 @@ int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf)
  * This function checks if option is valid and if so returns a pointer
  * to its entry in the bond_opts[] option array.
  */
-struct bond_option *bond_opt_get(unsigned int option)
+const struct bond_option *bond_opt_get(unsigned int option)
 {
        if (!BOND_OPT_VALID(option))
                return NULL;
@@ -581,7 +638,7 @@ struct bond_option *bond_opt_get(unsigned int option)
        return &bond_opts[option];
 }
 
-int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
+int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
 {
        if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) {
                pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
@@ -590,7 +647,7 @@ int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
                bond->params.arp_interval = 0;
                /* set miimon to default value */
                bond->params.miimon = BOND_DEFAULT_MIIMON;
-               pr_info("%s: Setting MII monitoring interval to %d.\n",
+               pr_info("%s: Setting MII monitoring interval to %d\n",
                        bond->dev->name, bond->params.miimon);
        }
 
@@ -619,8 +676,8 @@ struct net_device *bond_option_active_slave_get(struct bonding *bond)
        return __bond_option_active_slave_get(bond, bond->curr_active_slave);
 }
 
-int bond_option_active_slave_set(struct bonding *bond,
-                                struct bond_opt_value *newval)
+static int bond_option_active_slave_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval)
 {
        char ifname[IFNAMSIZ] = { 0, };
        struct net_device *slave_dev;
@@ -637,13 +694,13 @@ int bond_option_active_slave_set(struct bonding *bond,
 
        if (slave_dev) {
                if (!netif_is_bond_slave(slave_dev)) {
-                       pr_err("Device %s is not bonding slave.\n",
+                       pr_err("Device %s is not bonding slave\n",
                               slave_dev->name);
                        return -EINVAL;
                }
 
                if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
-                       pr_err("%s: Device %s is not our slave.\n",
+                       pr_err("%s: Device %s is not our slave\n",
                               bond->dev->name, slave_dev->name);
                        return -EINVAL;
                }
@@ -654,9 +711,8 @@ int bond_option_active_slave_set(struct bonding *bond,
 
        /* check to see if we are clearing active */
        if (!slave_dev) {
-               pr_info("%s: Clearing current active slave.\n",
-               bond->dev->name);
-               rcu_assign_pointer(bond->curr_active_slave, NULL);
+               pr_info("%s: Clearing current active slave\n", bond->dev->name);
+               RCU_INIT_POINTER(bond->curr_active_slave, NULL);
                bond_select_active_slave(bond);
        } else {
                struct slave *old_active = bond->curr_active_slave;
@@ -666,16 +722,16 @@ int bond_option_active_slave_set(struct bonding *bond,
 
                if (new_active == old_active) {
                        /* do nothing */
-                       pr_info("%s: %s is already the current active slave.\n",
+                       pr_info("%s: %s is already the current active slave\n",
                                bond->dev->name, new_active->dev->name);
                } else {
                        if (old_active && (new_active->link == BOND_LINK_UP) &&
                            IS_UP(new_active->dev)) {
-                               pr_info("%s: Setting %s as active slave.\n",
+                               pr_info("%s: Setting %s as active slave\n",
                                        bond->dev->name, new_active->dev->name);
                                bond_change_active_slave(bond, new_active);
                        } else {
-                               pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+                               pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
                                       bond->dev->name, new_active->dev->name,
                                       new_active->dev->name);
                                ret = -EINVAL;
@@ -689,21 +745,22 @@ int bond_option_active_slave_set(struct bonding *bond,
        return ret;
 }
 
-int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_miimon_set(struct bonding *bond,
+                                 const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting MII monitoring interval to %llu.\n",
+       pr_info("%s: Setting MII monitoring interval to %llu\n",
                bond->dev->name, newval->value);
        bond->params.miimon = newval->value;
        if (bond->params.updelay)
-               pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+               pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
                        bond->dev->name,
                        bond->params.updelay * bond->params.miimon);
        if (bond->params.downdelay)
-               pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+               pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
                        bond->dev->name,
                        bond->params.downdelay * bond->params.miimon);
        if (newval->value && bond->params.arp_interval) {
-               pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+               pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
                        bond->dev->name);
                bond->params.arp_interval = 0;
                if (bond->params.arp_validate)
@@ -726,7 +783,8 @@ int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
        return 0;
 }
 
-int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_updelay_set(struct bonding *bond,
+                                  const struct bond_opt_value *newval)
 {
        int value = newval->value;
 
@@ -743,15 +801,14 @@ int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
                        bond->params.miimon);
        }
        bond->params.updelay = value / bond->params.miimon;
-       pr_info("%s: Setting up delay to %d.\n",
-               bond->dev->name,
-               bond->params.updelay * bond->params.miimon);
+       pr_info("%s: Setting up delay to %d\n",
+               bond->dev->name, bond->params.updelay * bond->params.miimon);
 
        return 0;
 }
 
-int bond_option_downdelay_set(struct bonding *bond,
-                             struct bond_opt_value *newval)
+static int bond_option_downdelay_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval)
 {
        int value = newval->value;
 
@@ -768,37 +825,36 @@ int bond_option_downdelay_set(struct bonding *bond,
                        bond->params.miimon);
        }
        bond->params.downdelay = value / bond->params.miimon;
-       pr_info("%s: Setting down delay to %d.\n",
-               bond->dev->name,
-               bond->params.downdelay * bond->params.miimon);
+       pr_info("%s: Setting down delay to %d\n",
+               bond->dev->name, bond->params.downdelay * bond->params.miimon);
 
        return 0;
 }
 
-int bond_option_use_carrier_set(struct bonding *bond,
-                               struct bond_opt_value *newval)
+static int bond_option_use_carrier_set(struct bonding *bond,
+                                      const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting use_carrier to %llu.\n",
+       pr_info("%s: Setting use_carrier to %llu\n",
                bond->dev->name, newval->value);
        bond->params.use_carrier = newval->value;
 
        return 0;
 }
 
-int bond_option_arp_interval_set(struct bonding *bond,
-                                struct bond_opt_value *newval)
+static int bond_option_arp_interval_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+       pr_info("%s: Setting ARP monitoring interval to %llu\n",
                bond->dev->name, newval->value);
        bond->params.arp_interval = newval->value;
        if (newval->value) {
                if (bond->params.miimon) {
-                       pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+                       pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
                                bond->dev->name, bond->dev->name);
                        bond->params.miimon = 0;
                }
                if (!bond->params.arp_targets[0])
-                       pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+                       pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
                                bond->dev->name);
        }
        if (bond->dev->flags & IFF_UP) {
@@ -813,8 +869,7 @@ int bond_option_arp_interval_set(struct bonding *bond,
                        cancel_delayed_work_sync(&bond->arp_work);
                } else {
                        /* arp_validate can be set only in active-backup mode */
-                       if (bond->params.arp_validate)
-                               bond->recv_probe = bond_arp_rcv;
+                       bond->recv_probe = bond_arp_rcv;
                        cancel_delayed_work_sync(&bond->mii_work);
                        queue_delayed_work(bond->wq, &bond->arp_work, 0);
                }
@@ -857,19 +912,18 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
 
        ind = bond_get_targets_ip(targets, 0); /* first free slot */
        if (ind == -1) {
-               pr_err("%s: ARP target table is full!\n",
-                      bond->dev->name);
+               pr_err("%s: ARP target table is full!\n", bond->dev->name);
                return -EINVAL;
        }
 
-       pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+       pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
 
        _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
 
        return 0;
 }
 
-int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
+static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
 {
        int ret;
 
@@ -881,7 +935,7 @@ int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
        return ret;
 }
 
-int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
+static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
 {
        __be32 *targets = bond->params.arp_targets;
        struct list_head *iter;
@@ -897,17 +951,16 @@ int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
 
        ind = bond_get_targets_ip(targets, target);
        if (ind == -1) {
-               pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+               pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
                       bond->dev->name, &target);
                return -EINVAL;
        }
 
        if (ind == 0 && !targets[1] && bond->params.arp_interval)
-               pr_warn("%s: removing last arp target with arp_interval on\n",
+               pr_warn("%s: Removing last arp target with arp_interval on\n",
                        bond->dev->name);
 
-       pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
-               &target);
+       pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
 
        /* not to race with bond_arp_rcv */
        write_lock_bh(&bond->lock);
@@ -938,8 +991,8 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond)
        write_unlock_bh(&bond->lock);
 }
 
-int bond_option_arp_ip_targets_set(struct bonding *bond,
-                                  struct bond_opt_value *newval)
+static int bond_option_arp_ip_targets_set(struct bonding *bond,
+                                         const struct bond_opt_value *newval)
 {
        int ret = -EPERM;
        __be32 target;
@@ -955,7 +1008,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
                else if (newval->string[0] == '-')
                        ret = bond_option_arp_ip_target_rem(bond, target);
                else
-                       pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+                       pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
                               bond->dev->name);
        } else {
                target = newval->value;
@@ -965,10 +1018,10 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
        return ret;
 }
 
-int bond_option_arp_validate_set(struct bonding *bond,
-                                struct bond_opt_value *newval)
+static int bond_option_arp_validate_set(struct bonding *bond,
+                                       const struct bond_opt_value *newval)
 {
-       pr_info("%s: setting arp_validate to %s (%llu).\n",
+       pr_info("%s: Setting arp_validate to %s (%llu)\n",
                bond->dev->name, newval->string, newval->value);
 
        if (bond->dev->flags & IFF_UP) {
@@ -982,17 +1035,18 @@ int bond_option_arp_validate_set(struct bonding *bond,
        return 0;
 }
 
-int bond_option_arp_all_targets_set(struct bonding *bond,
-                                   struct bond_opt_value *newval)
+static int bond_option_arp_all_targets_set(struct bonding *bond,
+                                          const struct bond_opt_value *newval)
 {
-       pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+       pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
                bond->dev->name, newval->string, newval->value);
        bond->params.arp_all_targets = newval->value;
 
        return 0;
 }
 
-int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_primary_set(struct bonding *bond,
+                                  const struct bond_opt_value *newval)
 {
        char *p, *primary = newval->string;
        struct list_head *iter;
@@ -1007,8 +1061,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
                *p = '\0';
        /* check to see if we are clearing primary */
        if (!strlen(primary)) {
-               pr_info("%s: Setting primary slave to None.\n",
-                       bond->dev->name);
+               pr_info("%s: Setting primary slave to None\n", bond->dev->name);
                bond->primary_slave = NULL;
                memset(bond->params.primary, 0, sizeof(bond->params.primary));
                bond_select_active_slave(bond);
@@ -1017,7 +1070,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
 
        bond_for_each_slave(bond, slave, iter) {
                if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
-                       pr_info("%s: Setting %s as primary slave.\n",
+                       pr_info("%s: Setting %s as primary slave\n",
                                bond->dev->name, slave->dev->name);
                        bond->primary_slave = slave;
                        strcpy(bond->params.primary, slave->dev->name);
@@ -1027,15 +1080,14 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
        }
 
        if (bond->primary_slave) {
-               pr_info("%s: Setting primary slave to None.\n",
-                       bond->dev->name);
+               pr_info("%s: Setting primary slave to None\n", bond->dev->name);
                bond->primary_slave = NULL;
                bond_select_active_slave(bond);
        }
        strncpy(bond->params.primary, primary, IFNAMSIZ);
        bond->params.primary[IFNAMSIZ - 1] = 0;
 
-       pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+       pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
                bond->dev->name, primary, bond->dev->name);
 
 out:
@@ -1046,10 +1098,10 @@ out:
        return 0;
 }
 
-int bond_option_primary_reselect_set(struct bonding *bond,
-                                    struct bond_opt_value *newval)
+static int bond_option_primary_reselect_set(struct bonding *bond,
+                                           const struct bond_opt_value *newval)
 {
-       pr_info("%s: setting primary_reselect to %s (%llu).\n",
+       pr_info("%s: Setting primary_reselect to %s (%llu)\n",
                bond->dev->name, newval->string, newval->value);
        bond->params.primary_reselect = newval->value;
 
@@ -1062,46 +1114,46 @@ int bond_option_primary_reselect_set(struct bonding *bond,
        return 0;
 }
 
-int bond_option_fail_over_mac_set(struct bonding *bond,
-                                 struct bond_opt_value *newval)
+static int bond_option_fail_over_mac_set(struct bonding *bond,
+                                        const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+       pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
                bond->dev->name, newval->string, newval->value);
        bond->params.fail_over_mac = newval->value;
 
        return 0;
 }
 
-int bond_option_xmit_hash_policy_set(struct bonding *bond,
-                                    struct bond_opt_value *newval)
+static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+                                           const struct bond_opt_value *newval)
 {
-       pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+       pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
                bond->dev->name, newval->string, newval->value);
        bond->params.xmit_policy = newval->value;
 
        return 0;
 }
 
-int bond_option_resend_igmp_set(struct bonding *bond,
-                               struct bond_opt_value *newval)
+static int bond_option_resend_igmp_set(struct bonding *bond,
+                                      const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting resend_igmp to %llu.\n",
+       pr_info("%s: Setting resend_igmp to %llu\n",
                bond->dev->name, newval->value);
        bond->params.resend_igmp = newval->value;
 
        return 0;
 }
 
-int bond_option_num_peer_notif_set(struct bonding *bond,
-                                  struct bond_opt_value *newval)
+static int bond_option_num_peer_notif_set(struct bonding *bond,
+                                  const struct bond_opt_value *newval)
 {
        bond->params.num_peer_notif = newval->value;
 
        return 0;
 }
 
-int bond_option_all_slaves_active_set(struct bonding *bond,
-                                     struct bond_opt_value *newval)
+static int bond_option_all_slaves_active_set(struct bonding *bond,
+                                            const struct bond_opt_value *newval)
 {
        struct list_head *iter;
        struct slave *slave;
@@ -1121,8 +1173,8 @@ int bond_option_all_slaves_active_set(struct bonding *bond,
        return 0;
 }
 
-int bond_option_min_links_set(struct bonding *bond,
-                             struct bond_opt_value *newval)
+static int bond_option_min_links_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval)
 {
        pr_info("%s: Setting min links value to %llu\n",
                bond->dev->name, newval->value);
@@ -1131,15 +1183,16 @@ int bond_option_min_links_set(struct bonding *bond,
        return 0;
 }
 
-int bond_option_lp_interval_set(struct bonding *bond,
-                               struct bond_opt_value *newval)
+static int bond_option_lp_interval_set(struct bonding *bond,
+                                      const struct bond_opt_value *newval)
 {
        bond->params.lp_interval = newval->value;
 
        return 0;
 }
 
-int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_pps_set(struct bonding *bond,
+                              const struct bond_opt_value *newval)
 {
        bond->params.packets_per_slave = newval->value;
        if (newval->value > 0) {
@@ -1156,10 +1209,10 @@ int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
        return 0;
 }
 
-int bond_option_lacp_rate_set(struct bonding *bond,
-                             struct bond_opt_value *newval)
+static int bond_option_lacp_rate_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting LACP rate to %s (%llu).\n",
+       pr_info("%s: Setting LACP rate to %s (%llu)\n",
                bond->dev->name, newval->string, newval->value);
        bond->params.lacp_fast = newval->value;
        bond_3ad_update_lacp_rate(bond);
@@ -1167,18 +1220,18 @@ int bond_option_lacp_rate_set(struct bonding *bond,
        return 0;
 }
 
-int bond_option_ad_select_set(struct bonding *bond,
-                             struct bond_opt_value *newval)
+static int bond_option_ad_select_set(struct bonding *bond,
+                                    const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting ad_select to %s (%llu).\n",
+       pr_info("%s: Setting ad_select to %s (%llu)\n",
                bond->dev->name, newval->string, newval->value);
        bond->params.ad_select = newval->value;
 
        return 0;
 }
 
-int bond_option_queue_id_set(struct bonding *bond,
-                            struct bond_opt_value *newval)
+static int bond_option_queue_id_set(struct bonding *bond,
+                                   const struct bond_opt_value *newval)
 {
        struct slave *slave, *update_slave;
        struct net_device *sdev;
@@ -1200,8 +1253,7 @@ int bond_option_queue_id_set(struct bonding *bond,
                goto err_no_cmd;
 
        /* Check buffer length, valid ifname and queue id */
-       if (strlen(newval->string) > IFNAMSIZ ||
-           !dev_valid_name(newval->string) ||
+       if (!dev_valid_name(newval->string) ||
            qid > bond->dev->real_num_tx_queues)
                goto err_no_cmd;
 
@@ -1233,14 +1285,14 @@ out:
        return ret;
 
 err_no_cmd:
-       pr_info("invalid input for queue_id set for %s.\n",
-               bond->dev->name);
+       pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
        ret = -EPERM;
        goto out;
 
 }
 
-int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
+static int bond_option_slaves_set(struct bonding *bond,
+                                 const struct bond_opt_value *newval)
 {
        char command[IFNAMSIZ + 1] = { 0, };
        struct net_device *dev;
@@ -1255,7 +1307,7 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
 
        dev = __dev_get_by_name(dev_net(bond->dev), ifname);
        if (!dev) {
-               pr_info("%s: Interface %s does not exist!\n",
+               pr_info("%s: interface %s does not exist!\n",
                        bond->dev->name, ifname);
                ret = -ENODEV;
                goto out;
@@ -1263,12 +1315,12 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
 
        switch (command[0]) {
        case '+':
-               pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+               pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
                ret = bond_enslave(bond->dev, dev);
                break;
 
        case '-':
-               pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+               pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
                ret = bond_release(bond->dev, dev);
                break;
 
@@ -1280,7 +1332,7 @@ out:
        return ret;
 
 err_no_cmd:
-       pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+       pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
               bond->dev->name);
        ret = -EPERM;
        goto out;
index 433d37f6940b997649147ba4d8c0f415bdba47c9..12be9e1bfb0c0d048229a1698c2b384847fbd794 100644 (file)
@@ -81,8 +81,8 @@ struct bonding;
 
 struct bond_option {
        int id;
-       char *name;
-       char *desc;
+       const char *name;
+       const char *desc;
        u32 flags;
 
        /* unsuppmodes is used to denote modes in which the option isn't
@@ -92,18 +92,19 @@ struct bond_option {
        /* supported values which this option can have, can be a subset of
         * BOND_OPTVAL_RANGE's value range
         */
-       struct bond_opt_value *values;
+       const struct bond_opt_value *values;
 
-       int (*set)(struct bonding *bond, struct bond_opt_value *val);
+       int (*set)(struct bonding *bond, const struct bond_opt_value *val);
 };
 
 int __bond_opt_set(struct bonding *bond, unsigned int option,
                   struct bond_opt_value *val);
 int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf);
-struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
-                                     struct bond_opt_value *val);
-struct bond_option *bond_opt_get(unsigned int option);
-struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
+
+const struct bond_opt_value *bond_opt_parse(const struct bond_option *opt,
+                                           struct bond_opt_value *val);
+const struct bond_option *bond_opt_get(unsigned int option);
+const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val);
 
 /* This helper is used to initialize a bond_opt_value structure for parameter
  * passing. There should be either a valid string or value, but not both.
@@ -122,49 +123,6 @@ static inline void __bond_opt_init(struct bond_opt_value *optval,
 #define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value)
 #define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX)
 
-int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_xmit_hash_policy_set(struct bonding *bond,
-                                    struct bond_opt_value *newval);
-int bond_option_arp_validate_set(struct bonding *bond,
-                                struct bond_opt_value *newval);
-int bond_option_arp_all_targets_set(struct bonding *bond,
-                                   struct bond_opt_value *newval);
-int bond_option_fail_over_mac_set(struct bonding *bond,
-                                 struct bond_opt_value *newval);
-int bond_option_arp_interval_set(struct bonding *bond,
-                                struct bond_opt_value *newval);
-int bond_option_arp_ip_targets_set(struct bonding *bond,
-                                  struct bond_opt_value *newval);
 void bond_option_arp_ip_targets_clear(struct bonding *bond);
-int bond_option_downdelay_set(struct bonding *bond,
-                             struct bond_opt_value *newval);
-int bond_option_updelay_set(struct bonding *bond,
-                           struct bond_opt_value *newval);
-int bond_option_lacp_rate_set(struct bonding *bond,
-                             struct bond_opt_value *newval);
-int bond_option_min_links_set(struct bonding *bond,
-                             struct bond_opt_value *newval);
-int bond_option_ad_select_set(struct bonding *bond,
-                             struct bond_opt_value *newval);
-int bond_option_num_peer_notif_set(struct bonding *bond,
-                                  struct bond_opt_value *newval);
-int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval);
-int bond_option_primary_set(struct bonding *bond,
-                           struct bond_opt_value *newval);
-int bond_option_primary_reselect_set(struct bonding *bond,
-                                    struct bond_opt_value *newval);
-int bond_option_use_carrier_set(struct bonding *bond,
-                               struct bond_opt_value *newval);
-int bond_option_active_slave_set(struct bonding *bond,
-                                struct bond_opt_value *newval);
-int bond_option_queue_id_set(struct bonding *bond,
-                            struct bond_opt_value *newval);
-int bond_option_all_slaves_active_set(struct bonding *bond,
-                                     struct bond_opt_value *newval);
-int bond_option_resend_igmp_set(struct bonding *bond,
-                               struct bond_opt_value *newval);
-int bond_option_lp_interval_set(struct bonding *bond,
-                               struct bond_opt_value *newval);
-int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval);
+
 #endif /* _BOND_OPTIONS_H */
index 3ac20e78eafc628bf87c852b9e2fd9c2b20bf992..013fdd0f45e94340917ee2aeecc8529d386862e2 100644 (file)
@@ -65,13 +65,11 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
 static void bond_info_show_master(struct seq_file *seq)
 {
        struct bonding *bond = seq->private;
-       struct bond_opt_value *optval;
+       const struct bond_opt_value *optval;
        struct slave *curr;
        int i;
 
-       read_lock(&bond->curr_slave_lock);
-       curr = bond->curr_active_slave;
-       read_unlock(&bond->curr_slave_lock);
+       curr = rcu_dereference(bond->curr_active_slave);
 
        seq_printf(seq, "Bonding Mode: %s",
                   bond_mode_name(bond->params.mode));
@@ -254,8 +252,8 @@ void bond_create_proc_entry(struct bonding *bond)
                                                    S_IRUGO, bn->proc_dir,
                                                    &bond_info_fops, bond);
                if (bond->proc_entry == NULL)
-                       pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
-                                  DRV_NAME, bond_dev->name);
+                       pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
+                               DRV_NAME, bond_dev->name);
                else
                        memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
        }
@@ -281,8 +279,8 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
        if (!bn->proc_dir) {
                bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
                if (!bn->proc_dir)
-                       pr_warning("Warning: cannot create /proc/net/%s\n",
-                                  DRV_NAME);
+                       pr_warn("Warning: Cannot create /proc/net/%s\n",
+                               DRV_NAME);
        }
 }
 
index 643fcc110299b7b5c9f728703a849d8fd13d5ce8..0e8b268da0a08f58c4443c6c36aa62bb9ef0f071 100644 (file)
@@ -117,9 +117,9 @@ static ssize_t bonding_store_bonds(struct class *cls,
                rv = bond_create(bn->net, ifname);
                if (rv) {
                        if (rv == -EEXIST)
-                               pr_info("%s already exists.\n", ifname);
+                               pr_info("%s already exists\n", ifname);
                        else
-                               pr_info("%s creation failed.\n", ifname);
+                               pr_info("%s creation failed\n", ifname);
                        res = rv;
                }
        } else if (command[0] == '-') {
@@ -144,7 +144,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
        return res;
 
 err_no_cmd:
-       pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
+       pr_err("no command found in bonding_masters - use +ifname or -ifname\n");
        return -EPERM;
 }
 
@@ -220,7 +220,7 @@ static ssize_t bonding_show_mode(struct device *d,
                                 struct device_attribute *attr, char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_MODE, bond->params.mode);
 
@@ -251,7 +251,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
                                      char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
 
@@ -282,7 +282,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
                                         char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
                               bond->params.arp_validate);
@@ -314,7 +314,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
                                         char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
                               bond->params.arp_all_targets);
@@ -348,7 +348,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
                                          char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
                               bond->params.fail_over_mac);
@@ -505,7 +505,7 @@ static ssize_t bonding_show_lacp(struct device *d,
                                 char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
 
@@ -558,7 +558,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
                                      char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
 
@@ -686,7 +686,7 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
                                             char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct bond_opt_value *val;
+       const struct bond_opt_value *val;
 
        val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
                               bond->params.primary_reselect);
@@ -1135,7 +1135,7 @@ int bond_create_sysfs(struct bond_net *bn)
                /* Is someone being kinky and naming a device bonding_master? */
                if (__dev_get_by_name(bn->net,
                                      class_attr_bonding_masters.attr.name))
-                       pr_err("network device named %s already exists in sysfs",
+                       pr_err("network device named %s already exists in sysfs\n",
                               class_attr_bonding_masters.attr.name);
                ret = 0;
        }
index 2b0fdec695f78fbf77d9cb7c9cd43cb5b7adb30b..b8bdd0acc8f334ac97bca2ddfea602f473c3272f 100644 (file)
@@ -188,8 +188,9 @@ struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
        int    delay;
-       unsigned long jiffies;
-       unsigned long last_arp_rx;
+       /* all three in jiffies */
+       unsigned long last_link_up;
+       unsigned long last_rx;
        unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
        s8     link;    /* one of BOND_LINK_XXXX */
        s8     new_link;
@@ -265,6 +266,11 @@ struct bonding {
 #define bond_slave_get_rtnl(dev) \
        ((struct slave *) rtnl_dereference(dev->rx_handler_data))
 
+struct bond_vlan_tag {
+       __be16          vlan_proto;
+       unsigned short  vlan_id;
+};
+
 /**
  * Returns NULL if the net_device does not belong to any of the bond's slaves
  *
@@ -292,7 +298,7 @@ static inline void bond_set_active_slave(struct slave *slave)
 {
        if (slave->backup) {
                slave->backup = 0;
-               rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+               rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
        }
 }
 
@@ -300,7 +306,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
 {
        if (!slave->backup) {
                slave->backup = 1;
-               rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+               rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
        }
 }
 
@@ -312,7 +318,7 @@ static inline void bond_set_slave_state(struct slave *slave,
 
        slave->backup = slave_state;
        if (notify) {
-               rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+               rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
                slave->should_notify = 0;
        } else {
                if (slave->should_notify)
@@ -342,7 +348,7 @@ static inline void bond_slave_state_notify(struct bonding *bond)
 
        bond_for_each_slave(bond, tmp, iter) {
                if (tmp->should_notify) {
-                       rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+                       rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_ATOMIC);
                        tmp->should_notify = 0;
                }
        }
@@ -374,6 +380,11 @@ static inline bool bond_is_active_slave(struct slave *slave)
 #define BOND_ARP_VALIDATE_BACKUP       (1 << BOND_STATE_BACKUP)
 #define BOND_ARP_VALIDATE_ALL          (BOND_ARP_VALIDATE_ACTIVE | \
                                         BOND_ARP_VALIDATE_BACKUP)
+#define BOND_ARP_FILTER                        (BOND_ARP_VALIDATE_ALL + 1)
+#define BOND_ARP_FILTER_ACTIVE         (BOND_ARP_VALIDATE_ACTIVE | \
+                                        BOND_ARP_FILTER)
+#define BOND_ARP_FILTER_BACKUP         (BOND_ARP_VALIDATE_BACKUP | \
+                                        BOND_ARP_FILTER)
 
 #define BOND_SLAVE_NOTIFY_NOW          true
 #define BOND_SLAVE_NOTIFY_LATER                false
@@ -384,6 +395,12 @@ static inline int slave_do_arp_validate(struct bonding *bond,
        return bond->params.arp_validate & (1 << bond_slave_state(slave));
 }
 
+static inline int slave_do_arp_validate_only(struct bonding *bond,
+                                            struct slave *slave)
+{
+       return bond->params.arp_validate & BOND_ARP_FILTER;
+}
+
 /* Get the oldest arp which we've received on this slave for bond's
  * arp_targets.
  */
@@ -403,14 +420,10 @@ static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
 static inline unsigned long slave_last_rx(struct bonding *bond,
                                        struct slave *slave)
 {
-       if (slave_do_arp_validate(bond, slave)) {
-               if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
-                       return slave_oldest_target_arp_rx(bond, slave);
-               else
-                       return slave->last_arp_rx;
-       }
+       if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+               return slave_oldest_target_arp_rx(bond, slave);
 
-       return slave->dev->last_rx;
+       return slave->last_rx;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -487,8 +500,6 @@ void bond_sysfs_slave_del(struct slave *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
-int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
-int bond_parm_tbl_lookup(int mode, const struct bond_parm_tbl *tbl);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
 void bond_create_debugfs(void);
@@ -501,8 +512,6 @@ void bond_setup(struct net_device *bond_dev);
 unsigned int bond_get_num_tx_queues(void);
 int bond_netlink_init(void);
 void bond_netlink_fini(void);
-int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
-int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
 struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
 struct net_device *bond_option_active_slave_get(struct bonding *bond);
 const char *bond_slave_link_status(s8 link);
index 88a6a5810ec6820a1e8f2b674021a4a142804885..fc73865bb83a705f9fa1d1f3d70e1c6191952b9b 100644 (file)
@@ -204,7 +204,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
 
        skb->protocol = htons(ETH_P_CAIF);
        skb_reset_mac_header(skb);
-       skb->dev = ser->dev;
        debugfs_rx(ser, data, count);
        /* Push received packet up the stack. */
        ret = netif_rx_ni(skb);
index 155db68e13bae83ce006d7835bdbc93d38cdd878..ff54c0eb2052137d8f1561448f3c654f34d354df 100644 (file)
@@ -554,7 +554,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
 
                skb->protocol = htons(ETH_P_CAIF);
                skb_reset_mac_header(skb);
-               skb->dev = cfspi->ndev;
 
                /*
                 * Push received packet up the stack.
index 6efe27458116cf7f5af0c48f68f7cc8a68a55cdd..f07fa89b5fd5b294a5a234e949985080a0617a1d 100644 (file)
@@ -420,7 +420,11 @@ static void at91_chip_start(struct net_device *dev)
        at91_transceiver_switch(priv, 1);
 
        /* enable chip */
-       at91_write(priv, AT91_MR, AT91_MR_CANEN);
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+               reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
+       else
+               reg_mr = AT91_MR_CANEN;
+       at91_write(priv, AT91_MR, reg_mr);
 
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
 
@@ -1190,6 +1194,7 @@ static const struct net_device_ops at91_netdev_ops = {
        .ndo_open       = at91_open,
        .ndo_stop       = at91_close,
        .ndo_start_xmit = at91_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
@@ -1341,7 +1346,8 @@ static int at91_can_probe(struct platform_device *pdev)
        priv->can.bittiming_const = &at91_bittiming_const;
        priv->can.do_set_mode = at91_set_mode;
        priv->can.do_get_berr_counter = at91_get_berr_counter;
-       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+               CAN_CTRLMODE_LISTENONLY;
        priv->dev = dev;
        priv->reg_base = addr;
        priv->devtype_data = *devtype_data;
index 8d2b89a12e09b5f19c35757fc7fddb75689395e5..543ecceb33e91341ca0a6651e92d3dcbce1d0978 100644 (file)
@@ -528,6 +528,7 @@ static const struct net_device_ops bfin_can_netdev_ops = {
        .ndo_open               = bfin_can_open,
        .ndo_stop               = bfin_can_close,
        .ndo_start_xmit         = bfin_can_start_xmit,
+       .ndo_change_mtu         = can_change_mtu,
 };
 
 static int bfin_can_probe(struct platform_device *pdev)
index 951bfede8f3d80b7026b2bfba219d5c3e092f210..9c32e9ef76942192fd94fca634a6565468458946 100644 (file)
@@ -1277,6 +1277,7 @@ static const struct net_device_ops c_can_netdev_ops = {
        .ndo_open = c_can_open,
        .ndo_stop = c_can_close,
        .ndo_start_xmit = c_can_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 int register_c_can_dev(struct net_device *dev)
index 0f12abf6591ce79bdc30b1f69844f7d811ac5d1e..d8379278d648a2160f63d386eb5056dc47d70b7a 100644 (file)
@@ -823,6 +823,7 @@ static const struct net_device_ops cc770_netdev_ops = {
        .ndo_open = cc770_open,
        .ndo_stop = cc770_close,
        .ndo_start_xmit = cc770_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 int register_cc770dev(struct net_device *dev)
index fc59bc6f040b623fcc57ceb4af0bccad3a38281e..c7a260478749ad163ec133df88e7a0086b220a73 100644 (file)
@@ -99,10 +99,10 @@ static int can_update_spt(const struct can_bittiming_const *btc,
        return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
 }
 
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+                             const struct can_bittiming_const *btc)
 {
        struct can_priv *priv = netdev_priv(dev);
-       const struct can_bittiming_const *btc = priv->bittiming_const;
        long rate, best_rate = 0;
        long best_error = 1000000000, error = 0;
        int best_tseg = 0, best_brp = 0, brp = 0;
@@ -110,9 +110,6 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
        int spt_error = 1000, spt = 0, sampl_pt;
        u64 v64;
 
-       if (!priv->bittiming_const)
-               return -ENOTSUPP;
-
        /* Use CIA recommended sample points */
        if (bt->sample_point) {
                sampl_pt = bt->sample_point;
@@ -204,7 +201,8 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
        return 0;
 }
 #else /* !CONFIG_CAN_CALC_BITTIMING */
-static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
+                             const struct can_bittiming_const *btc)
 {
        netdev_err(dev, "bit-timing calculation not available\n");
        return -EINVAL;
@@ -217,16 +215,13 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt)
  * prescaler value brp. You can find more information in the header
  * file linux/can/netlink.h.
  */
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
+                              const struct can_bittiming_const *btc)
 {
        struct can_priv *priv = netdev_priv(dev);
-       const struct can_bittiming_const *btc = priv->bittiming_const;
        int tseg1, alltseg;
        u64 brp64;
 
-       if (!priv->bittiming_const)
-               return -ENOTSUPP;
-
        tseg1 = bt->prop_seg + bt->phase_seg1;
        if (!bt->sjw)
                bt->sjw = 1;
@@ -254,26 +249,29 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt)
        return 0;
 }
 
-static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt)
+static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+                            const struct can_bittiming_const *btc)
 {
-       struct can_priv *priv = netdev_priv(dev);
        int err;
 
        /* Check if the CAN device has bit-timing parameters */
-       if (priv->bittiming_const) {
+       if (!btc)
+               return -ENOTSUPP;
 
-               /* Non-expert mode? Check if the bitrate has been pre-defined */
-               if (!bt->tq)
-                       /* Determine bit-timing parameters */
-                       err = can_calc_bittiming(dev, bt);
-               else
-                       /* Check bit-timing params and calculate proper brp */
-                       err = can_fixup_bittiming(dev, bt);
-               if (err)
-                       return err;
-       }
+       /*
+        * Depending on the given can_bittiming parameter structure the CAN
+        * timing parameters are calculated based on the provided bitrate OR
+        * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
+        * provided directly which are then checked and fixed up.
+        */
+       if (!bt->tq && bt->bitrate)
+               err = can_calc_bittiming(dev, bt, btc);
+       else if (bt->tq && !bt->bitrate)
+               err = can_fixup_bittiming(dev, bt, btc);
+       else
+               err = -EINVAL;
 
-       return 0;
+       return err;
 }
 
 /*
@@ -317,7 +315,9 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
        BUG_ON(idx >= priv->echo_skb_max);
 
        /* check flag whether this packet has to be looped back */
-       if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
+       if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
+           (skb->protocol != htons(ETH_P_CAN) &&
+            skb->protocol != htons(ETH_P_CANFD))) {
                kfree_skb(skb);
                return;
        }
@@ -329,7 +329,6 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
                        return;
 
                /* make settings for echo to reduce code in irq context */
-               skb->protocol = htons(ETH_P_CAN);
                skb->pkt_type = PACKET_BROADCAST;
                skb->ip_summed = CHECKSUM_UNNECESSARY;
                skb->dev = dev;
@@ -512,6 +511,30 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
 }
 EXPORT_SYMBOL_GPL(alloc_can_skb);
 
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+                               struct canfd_frame **cfd)
+{
+       struct sk_buff *skb;
+
+       skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+                              sizeof(struct canfd_frame));
+       if (unlikely(!skb))
+               return NULL;
+
+       skb->protocol = htons(ETH_P_CANFD);
+       skb->pkt_type = PACKET_BROADCAST;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       can_skb_reserve(skb);
+       can_skb_prv(skb)->ifindex = dev->ifindex;
+
+       *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+       memset(*cfd, 0, sizeof(struct canfd_frame));
+
+       return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
 struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
 {
        struct sk_buff *skb;
@@ -571,6 +594,39 @@ void free_candev(struct net_device *dev)
 }
 EXPORT_SYMBOL_GPL(free_candev);
 
+/*
+ * changing MTU and control mode for CAN/CANFD devices
+ */
+int can_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct can_priv *priv = netdev_priv(dev);
+
+       /* Do not allow changing the MTU while running */
+       if (dev->flags & IFF_UP)
+               return -EBUSY;
+
+       /* allow change of MTU according to the CANFD ability of the device */
+       switch (new_mtu) {
+       case CAN_MTU:
+               priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+               break;
+
+       case CANFD_MTU:
+               if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
+                       return -EINVAL;
+
+               priv->ctrlmode |= CAN_CTRLMODE_FD;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       dev->mtu = new_mtu;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(can_change_mtu);
+
 /*
  * Common open function when the device gets opened.
  *
@@ -581,11 +637,19 @@ int open_candev(struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
 
-       if (!priv->bittiming.tq && !priv->bittiming.bitrate) {
+       if (!priv->bittiming.bitrate) {
                netdev_err(dev, "bit-timing not yet defined\n");
                return -EINVAL;
        }
 
+       /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
+       if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
+           (!priv->data_bittiming.bitrate ||
+            (priv->data_bittiming.bitrate < priv->bittiming.bitrate))) {
+               netdev_err(dev, "incorrect/missing data bit-timing\n");
+               return -EINVAL;
+       }
+
        /* Switch carrier on if device was stopped while in bus-off state */
        if (!netif_carrier_ok(dev))
                netif_carrier_on(dev);
@@ -624,6 +688,10 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
                                = { .len = sizeof(struct can_bittiming_const) },
        [IFLA_CAN_CLOCK]        = { .len = sizeof(struct can_clock) },
        [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
+       [IFLA_CAN_DATA_BITTIMING]
+                               = { .len = sizeof(struct can_bittiming) },
+       [IFLA_CAN_DATA_BITTIMING_CONST]
+                               = { .len = sizeof(struct can_bittiming_const) },
 };
 
 static int can_changelink(struct net_device *dev,
@@ -642,9 +710,7 @@ static int can_changelink(struct net_device *dev,
                if (dev->flags & IFF_UP)
                        return -EBUSY;
                memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
-               if ((!bt.bitrate && !bt.tq) || (bt.bitrate && bt.tq))
-                       return -EINVAL;
-               err = can_get_bittiming(dev, &bt);
+               err = can_get_bittiming(dev, &bt, priv->bittiming_const);
                if (err)
                        return err;
                memcpy(&priv->bittiming, &bt, sizeof(bt));
@@ -668,6 +734,12 @@ static int can_changelink(struct net_device *dev,
                        return -EOPNOTSUPP;
                priv->ctrlmode &= ~cm->mask;
                priv->ctrlmode |= cm->flags;
+
+               /* CAN_CTRLMODE_FD can only be set when driver supports FD */
+               if (priv->ctrlmode & CAN_CTRLMODE_FD)
+                       dev->mtu = CANFD_MTU;
+               else
+                       dev->mtu = CAN_MTU;
        }
 
        if (data[IFLA_CAN_RESTART_MS]) {
@@ -686,6 +758,27 @@ static int can_changelink(struct net_device *dev,
                        return err;
        }
 
+       if (data[IFLA_CAN_DATA_BITTIMING]) {
+               struct can_bittiming dbt;
+
+               /* Do not allow changing bittiming while running */
+               if (dev->flags & IFF_UP)
+                       return -EBUSY;
+               memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
+                      sizeof(dbt));
+               err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
+               if (err)
+                       return err;
+               memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+
+               if (priv->do_set_data_bittiming) {
+                       /* Finally, set the bit-timing registers */
+                       err = priv->do_set_data_bittiming(dev);
+                       if (err)
+                               return err;
+               }
+       }
+
        return 0;
 }
 
@@ -694,7 +787,8 @@ static size_t can_get_size(const struct net_device *dev)
        struct can_priv *priv = netdev_priv(dev);
        size_t size = 0;
 
-       size += nla_total_size(sizeof(struct can_bittiming));   /* IFLA_CAN_BITTIMING */
+       if (priv->bittiming.bitrate)                            /* IFLA_CAN_BITTIMING */
+               size += nla_total_size(sizeof(struct can_bittiming));
        if (priv->bittiming_const)                              /* IFLA_CAN_BITTIMING_CONST */
                size += nla_total_size(sizeof(struct can_bittiming_const));
        size += nla_total_size(sizeof(struct can_clock));       /* IFLA_CAN_CLOCK */
@@ -703,6 +797,10 @@ static size_t can_get_size(const struct net_device *dev)
        size += nla_total_size(sizeof(u32));                    /* IFLA_CAN_RESTART_MS */
        if (priv->do_get_berr_counter)                          /* IFLA_CAN_BERR_COUNTER */
                size += nla_total_size(sizeof(struct can_berr_counter));
+       if (priv->data_bittiming.bitrate)                       /* IFLA_CAN_DATA_BITTIMING */
+               size += nla_total_size(sizeof(struct can_bittiming));
+       if (priv->data_bittiming_const)                         /* IFLA_CAN_DATA_BITTIMING_CONST */
+               size += nla_total_size(sizeof(struct can_bittiming_const));
 
        return size;
 }
@@ -716,19 +814,34 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (priv->do_get_state)
                priv->do_get_state(dev, &state);
-       if (nla_put(skb, IFLA_CAN_BITTIMING,
-                   sizeof(priv->bittiming), &priv->bittiming) ||
+
+       if ((priv->bittiming.bitrate &&
+            nla_put(skb, IFLA_CAN_BITTIMING,
+                    sizeof(priv->bittiming), &priv->bittiming)) ||
+
            (priv->bittiming_const &&
             nla_put(skb, IFLA_CAN_BITTIMING_CONST,
                     sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+
            nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
            nla_put_u32(skb, IFLA_CAN_STATE, state) ||
            nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
            nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+
            (priv->do_get_berr_counter &&
             !priv->do_get_berr_counter(dev, &bec) &&
-            nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)))
+            nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
+
+           (priv->data_bittiming.bitrate &&
+            nla_put(skb, IFLA_CAN_DATA_BITTIMING,
+                    sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
+
+           (priv->data_bittiming_const &&
+            nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
+                    sizeof(*priv->data_bittiming_const),
+                    priv->data_bittiming_const)))
                return -EMSGSIZE;
+
        return 0;
 }
 
index 61376abdab395cd941f9a118c46e9912e17f4452..f425ec2c7839de4abe1a481cf94d0fd03435c845 100644 (file)
@@ -1011,6 +1011,7 @@ static const struct net_device_ops flexcan_netdev_ops = {
        .ndo_open       = flexcan_open,
        .ndo_stop       = flexcan_close,
        .ndo_start_xmit = flexcan_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static int register_flexcandev(struct net_device *dev)
@@ -1132,9 +1133,9 @@ static int flexcan_probe(struct platform_device *pdev)
        of_id = of_match_device(flexcan_of_match, &pdev->dev);
        if (of_id) {
                devtype_data = of_id->data;
-       } else if (pdev->id_entry->driver_data) {
+       } else if (platform_get_device_id(pdev)->driver_data) {
                devtype_data = (struct flexcan_devtype_data *)
-                       pdev->id_entry->driver_data;
+                       platform_get_device_id(pdev)->driver_data;
        } else {
                return -ENODEV;
        }
@@ -1201,8 +1202,7 @@ static int flexcan_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int flexcan_suspend(struct device *device)
+static int __maybe_unused flexcan_suspend(struct device *device)
 {
        struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
@@ -1221,7 +1221,7 @@ static int flexcan_suspend(struct device *device)
        return 0;
 }
 
-static int flexcan_resume(struct device *device)
+static int __maybe_unused flexcan_resume(struct device *device)
 {
        struct net_device *dev = dev_get_drvdata(device);
        struct flexcan_priv *priv = netdev_priv(dev);
@@ -1233,7 +1233,6 @@ static int flexcan_resume(struct device *device)
        }
        return flexcan_chip_enable(priv);
 }
-#endif /* CONFIG_PM_SLEEP */
 
 static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
 
index ab506d6cab37743a6e207e6bfc3899db749b5148..3fd9fd942c6ef0b38ddaca66492c115f5f5ea771 100644 (file)
@@ -1578,6 +1578,7 @@ static const struct net_device_ops grcan_netdev_ops = {
        .ndo_open       = grcan_open,
        .ndo_stop       = grcan_close,
        .ndo_start_xmit = grcan_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static int grcan_setup_netdev(struct platform_device *ofdev,
index 71594e5676fdc31fc422a542cb1786013a0f4ea4..2382c04dc7807d9c513e64bc1b4f4a2c0ea181e1 100644 (file)
@@ -198,9 +198,6 @@ struct ican3_dev {
        struct net_device *ndev;
        struct napi_struct napi;
 
-       /* Device for printing */
-       struct device *dev;
-
        /* module number */
        unsigned int num;
 
@@ -295,7 +292,7 @@ static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
        xord = locl ^ peer;
 
        if ((xord & MSYNC_RB_MASK) == 0x00) {
-               dev_dbg(mod->dev, "no mbox for reading\n");
+               netdev_dbg(mod->ndev, "no mbox for reading\n");
                return -ENOMEM;
        }
 
@@ -340,7 +337,7 @@ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
        xord = locl ^ peer;
 
        if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) {
-               dev_err(mod->dev, "no mbox for writing\n");
+               netdev_err(mod->ndev, "no mbox for writing\n");
                return -ENOMEM;
        }
 
@@ -542,7 +539,7 @@ static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
        memcpy_fromio(&desc, desc_addr, sizeof(desc));
 
        if (!(desc.control & DESC_VALID)) {
-               dev_dbg(mod->dev, "%s: no free buffers\n", __func__);
+               netdev_dbg(mod->ndev, "%s: no free buffers\n", __func__);
                return -ENOMEM;
        }
 
@@ -573,7 +570,7 @@ static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
        memcpy_fromio(&desc, desc_addr, sizeof(desc));
 
        if (!(desc.control & DESC_VALID)) {
-               dev_dbg(mod->dev, "%s: no buffers to recv\n", __func__);
+               netdev_dbg(mod->ndev, "%s: no buffers to recv\n", __func__);
                return -ENOMEM;
        }
 
@@ -883,7 +880,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
  */
 static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg)
 {
-       dev_dbg(mod->dev, "IDVERS response: %s\n", msg->data);
+       netdev_dbg(mod->ndev, "IDVERS response: %s\n", msg->data);
 }
 
 static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
@@ -899,7 +896,7 @@ static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
         * error frame for userspace
         */
        if (msg->spec == MSG_MSGLOST) {
-               dev_err(mod->dev, "lost %d control messages\n", msg->data[0]);
+               netdev_err(mod->ndev, "lost %d control messages\n", msg->data[0]);
                return;
        }
 
@@ -939,13 +936,13 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
 
        /* we can only handle the SJA1000 part */
        if (msg->data[1] != CEVTIND_CHIP_SJA1000) {
-               dev_err(mod->dev, "unable to handle errors on non-SJA1000\n");
+               netdev_err(mod->ndev, "unable to handle errors on non-SJA1000\n");
                return -ENODEV;
        }
 
        /* check the message length for sanity */
        if (le16_to_cpu(msg->len) < 6) {
-               dev_err(mod->dev, "error message too short\n");
+               netdev_err(mod->ndev, "error message too short\n");
                return -EINVAL;
        }
 
@@ -967,7 +964,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
         */
        if (isrc == CEVTIND_BEI) {
                int ret;
-               dev_dbg(mod->dev, "bus error interrupt\n");
+               netdev_dbg(mod->ndev, "bus error interrupt\n");
 
                /* TX error */
                if (!(ecc & ECC_DIR)) {
@@ -983,7 +980,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
                 */
                ret = ican3_set_buserror(mod, 1);
                if (ret) {
-                       dev_err(mod->dev, "unable to re-enable bus-error\n");
+                       netdev_err(mod->ndev, "unable to re-enable bus-error\n");
                        return ret;
                }
 
@@ -998,7 +995,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
 
        /* data overrun interrupt */
        if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
-               dev_dbg(mod->dev, "data overrun interrupt\n");
+               netdev_dbg(mod->ndev, "data overrun interrupt\n");
                cf->can_id |= CAN_ERR_CRTL;
                cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
                stats->rx_over_errors++;
@@ -1007,7 +1004,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
 
        /* error warning + passive interrupt */
        if (isrc == CEVTIND_EI) {
-               dev_dbg(mod->dev, "error warning + passive interrupt\n");
+               netdev_dbg(mod->ndev, "error warning + passive interrupt\n");
                if (status & SR_BS) {
                        state = CAN_STATE_BUS_OFF;
                        cf->can_id |= CAN_ERR_BUSOFF;
@@ -1088,7 +1085,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
                complete(&mod->termination_comp);
                break;
        default:
-               dev_err(mod->dev, "received an unknown inquiry response\n");
+               netdev_err(mod->ndev, "received an unknown inquiry response\n");
                break;
        }
 }
@@ -1096,7 +1093,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
 static void ican3_handle_unknown_message(struct ican3_dev *mod,
                                        struct ican3_msg *msg)
 {
-       dev_warn(mod->dev, "received unknown message: spec 0x%.2x length %d\n",
+       netdev_warn(mod->ndev, "received unknown message: spec 0x%.2x length %d\n",
                           msg->spec, le16_to_cpu(msg->len));
 }
 
@@ -1105,7 +1102,7 @@ static void ican3_handle_unknown_message(struct ican3_dev *mod,
  */
 static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
 {
-       dev_dbg(mod->dev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
+       netdev_dbg(mod->ndev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__,
                           mod->num, msg->spec, le16_to_cpu(msg->len));
 
        switch (msg->spec) {
@@ -1406,7 +1403,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
                msleep(10);
        } while (time_before(jiffies, start + HZ / 4));
 
-       dev_err(mod->dev, "failed to reset CAN module\n");
+       netdev_err(mod->ndev, "failed to reset CAN module\n");
        return -ETIMEDOUT;
 }
 
@@ -1425,7 +1422,7 @@ static int ican3_startup_module(struct ican3_dev *mod)
 
        ret = ican3_reset_module(mod);
        if (ret) {
-               dev_err(mod->dev, "unable to reset module\n");
+               netdev_err(mod->ndev, "unable to reset module\n");
                return ret;
        }
 
@@ -1434,41 +1431,41 @@ static int ican3_startup_module(struct ican3_dev *mod)
 
        ret = ican3_msg_connect(mod);
        if (ret) {
-               dev_err(mod->dev, "unable to connect to module\n");
+               netdev_err(mod->ndev, "unable to connect to module\n");
                return ret;
        }
 
        ican3_init_new_host_interface(mod);
        ret = ican3_msg_newhostif(mod);
        if (ret) {
-               dev_err(mod->dev, "unable to switch to new-style interface\n");
+               netdev_err(mod->ndev, "unable to switch to new-style interface\n");
                return ret;
        }
 
        /* default to "termination on" */
        ret = ican3_set_termination(mod, true);
        if (ret) {
-               dev_err(mod->dev, "unable to enable termination\n");
+               netdev_err(mod->ndev, "unable to enable termination\n");
                return ret;
        }
 
        /* default to "bus errors enabled" */
        ret = ican3_set_buserror(mod, 1);
        if (ret) {
-               dev_err(mod->dev, "unable to set bus-error\n");
+               netdev_err(mod->ndev, "unable to set bus-error\n");
                return ret;
        }
 
        ican3_init_fast_host_interface(mod);
        ret = ican3_msg_fasthostif(mod);
        if (ret) {
-               dev_err(mod->dev, "unable to switch to fast host interface\n");
+               netdev_err(mod->ndev, "unable to switch to fast host interface\n");
                return ret;
        }
 
        ret = ican3_set_id_filter(mod, true);
        if (ret) {
-               dev_err(mod->dev, "unable to set acceptance filter\n");
+               netdev_err(mod->ndev, "unable to set acceptance filter\n");
                return ret;
        }
 
@@ -1487,14 +1484,14 @@ static int ican3_open(struct net_device *ndev)
        /* open the CAN layer */
        ret = open_candev(ndev);
        if (ret) {
-               dev_err(mod->dev, "unable to start CAN layer\n");
+               netdev_err(mod->ndev, "unable to start CAN layer\n");
                return ret;
        }
 
        /* bring the bus online */
        ret = ican3_set_bus_state(mod, true);
        if (ret) {
-               dev_err(mod->dev, "unable to set bus-on\n");
+               netdev_err(mod->ndev, "unable to set bus-on\n");
                close_candev(ndev);
                return ret;
        }
@@ -1518,7 +1515,7 @@ static int ican3_stop(struct net_device *ndev)
        /* bring the bus offline, stop receiving packets */
        ret = ican3_set_bus_state(mod, false);
        if (ret) {
-               dev_err(mod->dev, "unable to set bus-off\n");
+               netdev_err(mod->ndev, "unable to set bus-off\n");
                return ret;
        }
 
@@ -1545,7 +1542,7 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        /* check that we can actually transmit */
        if (!ican3_txok(mod)) {
-               dev_err(mod->dev, "BUG: no free descriptors\n");
+               netdev_err(mod->ndev, "BUG: no free descriptors\n");
                spin_unlock_irqrestore(&mod->lock, flags);
                return NETDEV_TX_BUSY;
        }
@@ -1597,6 +1594,7 @@ static const struct net_device_ops ican3_netdev_ops = {
        .ndo_open       = ican3_open,
        .ndo_stop       = ican3_stop,
        .ndo_start_xmit = ican3_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 /*
@@ -1657,7 +1655,7 @@ static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
        /* bring the bus online */
        ret = ican3_set_bus_state(mod, true);
        if (ret) {
-               dev_err(mod->dev, "unable to set bus-on\n");
+               netdev_err(ndev, "unable to set bus-on\n");
                return ret;
        }
 
@@ -1682,7 +1680,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
 
        ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
        if (ret == 0) {
-               dev_info(mod->dev, "%s timed out\n", __func__);
+               netdev_info(mod->ndev, "%s timed out\n", __func__);
                return -ETIMEDOUT;
        }
 
@@ -1708,7 +1706,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
 
        ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
        if (ret == 0) {
-               dev_info(mod->dev, "%s timed out\n", __func__);
+               netdev_info(mod->ndev, "%s timed out\n", __func__);
                return -ETIMEDOUT;
        }
 
@@ -1778,7 +1776,6 @@ static int ican3_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, ndev);
        mod = netdev_priv(ndev);
        mod->ndev = ndev;
-       mod->dev = &pdev->dev;
        mod->num = pdata->modno;
        netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
        skb_queue_head_init(&mod->echoq);
index cdb9808d12dbc9883250ccceb5ac156be6a4c84d..28c11f81524524fe521eb0c24177fffa76323885 100644 (file)
@@ -601,10 +601,10 @@ static int mcp251x_do_set_bittiming(struct net_device *net)
                          (bt->prop_seg - 1));
        mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
                           (bt->phase_seg2 - 1));
-       dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
-                mcp251x_read_reg(spi, CNF1),
-                mcp251x_read_reg(spi, CNF2),
-                mcp251x_read_reg(spi, CNF3));
+       dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
+               mcp251x_read_reg(spi, CNF1),
+               mcp251x_read_reg(spi, CNF2),
+               mcp251x_read_reg(spi, CNF3));
 
        return 0;
 }
@@ -672,7 +672,7 @@ static int mcp251x_hw_probe(struct spi_device *spi)
 
 static int mcp251x_power_enable(struct regulator *reg, int enable)
 {
-       if (IS_ERR(reg))
+       if (IS_ERR_OR_NULL(reg))
                return 0;
 
        if (enable)
@@ -996,6 +996,7 @@ static const struct net_device_ops mcp251x_netdev_ops = {
        .ndo_open = mcp251x_open,
        .ndo_stop = mcp251x_stop,
        .ndo_start_xmit = mcp251x_hard_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static const struct of_device_id mcp251x_of_match[] = {
@@ -1155,8 +1156,6 @@ static int mcp251x_can_probe(struct spi_device *spi)
 
        devm_can_led_init(net);
 
-       dev_info(&spi->dev, "probed\n");
-
        return ret;
 
 error_probe:
@@ -1197,9 +1196,7 @@ static int mcp251x_can_remove(struct spi_device *spi)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-
-static int mcp251x_can_suspend(struct device *dev)
+static int __maybe_unused mcp251x_can_suspend(struct device *dev)
 {
        struct spi_device *spi = to_spi_device(dev);
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1221,7 +1218,7 @@ static int mcp251x_can_suspend(struct device *dev)
                priv->after_suspend = AFTER_SUSPEND_DOWN;
        }
 
-       if (!IS_ERR(priv->power)) {
+       if (!IS_ERR_OR_NULL(priv->power)) {
                regulator_disable(priv->power);
                priv->after_suspend |= AFTER_SUSPEND_POWER;
        }
@@ -1229,7 +1226,7 @@ static int mcp251x_can_suspend(struct device *dev)
        return 0;
 }
 
-static int mcp251x_can_resume(struct device *dev)
+static int __maybe_unused mcp251x_can_resume(struct device *dev)
 {
        struct spi_device *spi = to_spi_device(dev);
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -1249,7 +1246,6 @@ static int mcp251x_can_resume(struct device *dev)
        enable_irq(spi->irq);
        return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
        mcp251x_can_resume);
index b9f3faabb0f30ecbc466664ec6e5ca564fa52409..e0c9be5e2ab74676e8e3bb9fe026b145d3f4d3ab 100644 (file)
@@ -647,9 +647,10 @@ static int mscan_close(struct net_device *dev)
 }
 
 static const struct net_device_ops mscan_netdev_ops = {
-       .ndo_open               = mscan_open,
-       .ndo_stop               = mscan_close,
-       .ndo_start_xmit         = mscan_start_xmit,
+       .ndo_open       = mscan_open,
+       .ndo_stop       = mscan_close,
+       .ndo_start_xmit = mscan_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 int register_mscandev(struct net_device *dev, int mscan_clksrc)
index 6c077eb87b5e90c98b9431bd5dcb3238685ee1ef..6472562efedc921468212f8b8300b88dff6f2e95 100644 (file)
@@ -950,6 +950,7 @@ static const struct net_device_ops pch_can_netdev_ops = {
        .ndo_open               = pch_can_open,
        .ndo_stop               = pch_close,
        .ndo_start_xmit         = pch_xmit,
+       .ndo_change_mtu         = can_change_mtu,
 };
 
 static void pch_can_remove(struct pci_dev *pdev)
index ff2ba86cd4a495cdd29020f02561a1f31926ff11..4b18b87655231c3f5ee445a30dc8b85fcddb265c 100644 (file)
@@ -17,16 +17,9 @@ config CAN_SJA1000_PLATFORM
          the "platform bus" (Linux abstraction for directly to the
          processor attached devices).  Which can be found on various
          boards from Phytec (http://www.phytec.de) like the PCM027,
-         PCM038.
-
-config CAN_SJA1000_OF_PLATFORM
-       tristate "Generic OF Platform Bus based SJA1000 driver"
-       depends on OF
-       ---help---
-         This driver adds support for the SJA1000 chips connected to
-         the OpenFirmware "platform bus" found on embedded systems with
-         OpenFirmware bindings, e.g. if you have a PowerPC based system
-         you may want to enable this option.
+         PCM038. It also provides the OpenFirmware "platform bus" found
+         on embedded systems with OpenFirmware bindings, e.g. if you
+         have a PowerPC based system you may want to enable this option.
 
 config CAN_EMS_PCMCIA
        tristate "EMS CPC-CARD Card"
index b3d05cbfec3668cfc8df21989adc8b6643465ae3..531d5fcc97e58bded41ac3f3022a2a84a41ba6f0 100644 (file)
@@ -5,7 +5,6 @@
 obj-$(CONFIG_CAN_SJA1000) += sja1000.o
 obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
 obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
-obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
 obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
 obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
 obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
index d790b874ca79ba4cb4d522a8664f24c99ea3ebb8..fd13dbf07d9c0cb3d912c99f326b6e31be965366 100644 (file)
@@ -323,6 +323,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
                        priv->cdr = EMS_PCI_CDR;
 
                        SET_NETDEV_DEV(dev, &pdev->dev);
+                       dev->dev_id = i;
 
                        if (card->version == 1)
                                /* reset int flag of pita */
index 9e535f2ef52bb03365cc6ccf6f5ac2ababc6e7dc..381de998d2f1655b955bc701add77a30fe44ac21 100644 (file)
@@ -211,6 +211,7 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
                priv = netdev_priv(dev);
                priv->priv = card;
                SET_NETDEV_DEV(dev, &pdev->dev);
+               dev->dev_id = i;
 
                priv->irq_flags = IRQF_SHARED;
                dev->irq = pdev->irq;
index c96eb14699d5ad8a00655d71d80c8bd7fcbcf1c9..23b8e1324e25ef9e31a78a8c7591a1c968c01c29 100644 (file)
@@ -270,6 +270,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
                 priv->reg_base, board->conf_addr, dev->irq);
 
        SET_NETDEV_DEV(dev, &pdev->dev);
+       dev->dev_id = channel;
 
        /* Register SJA1000 device */
        err = register_sja1000dev(dev);
index 065ca49eb45e72c48c9d1cc5f8fbda0256fdb195..c540e3d12e3d826260dbb590e8045c92fa4efb2b 100644 (file)
@@ -642,6 +642,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                icr |= chan->icr_mask;
 
                SET_NETDEV_DEV(dev, &pdev->dev);
+               dev->dev_id = i;
 
                /* Create chain of SJA1000 devices */
                chan->prev_dev = pci_get_drvdata(pdev);
index f7ad754dd2aa1427380b79eabf6c661d6114ba48..dd56133cc4616180bc5841bda60dd576e3ebcd75 100644 (file)
@@ -550,6 +550,7 @@ static int pcan_add_channels(struct pcan_pccard *card)
                priv = netdev_priv(netdev);
                priv->priv = card;
                SET_NETDEV_DEV(netdev, &pdev->dev);
+               netdev->dev_id = i;
 
                priv->irq_flags = IRQF_SHARED;
                netdev->irq = pdev->irq;
index fbb61a0d901fb3c367d2d1dc15a5a524dab43b93..ec39b7cb2287b4764ffbfadffa29ed5286c5c545 100644 (file)
@@ -587,6 +587,7 @@ static int plx_pci_add_card(struct pci_dev *pdev,
                        priv->cdr = ci->cdr;
 
                        SET_NETDEV_DEV(dev, &pdev->dev);
+                       dev->dev_id = i;
 
                        /* Register SJA1000 device */
                        err = register_sja1000dev(dev);
index f17c3018b7c7ffb3f7d75785c3ddd2218b0f3f79..f31499a32d7dcf4fde4eb1a0864af4f15bfb6118 100644 (file)
@@ -106,8 +106,7 @@ static int sja1000_probe_chip(struct net_device *dev)
        struct sja1000_priv *priv = netdev_priv(dev);
 
        if (priv->reg_base && sja1000_is_absent(priv)) {
-               printk(KERN_INFO "%s: probing @0x%lX failed\n",
-                      DRV_NAME, dev->base_addr);
+               netdev_err(dev, "probing failed\n");
                return 0;
        }
        return -1;
@@ -643,9 +642,10 @@ void free_sja1000dev(struct net_device *dev)
 EXPORT_SYMBOL_GPL(free_sja1000dev);
 
 static const struct net_device_ops sja1000_netdev_ops = {
-       .ndo_open               = sja1000_open,
-       .ndo_stop               = sja1000_close,
-       .ndo_start_xmit         = sja1000_start_xmit,
+       .ndo_open       = sja1000_open,
+       .ndo_stop       = sja1000_close,
+       .ndo_start_xmit = sja1000_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 int register_sja1000dev(struct net_device *dev)
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
deleted file mode 100644 (file)
index 2f6e245..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
- *
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
- * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
- * definition in your flattened device tree source (DTS) file similar to:
- *
- *   can@3,100 {
- *           compatible = "nxp,sja1000";
- *           reg = <3 0x100 0x80>;
- *           interrupts = <2 0>;
- *           interrupt-parent = <&mpic>;
- *           nxp,external-clock-frequency = <16000000>;
- *   };
- *
- * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
- * information.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/can/dev.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include "sja1000.h"
-
-#define DRV_NAME "sja1000_of_platform"
-
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
-MODULE_LICENSE("GPL v2");
-
-#define SJA1000_OFP_CAN_CLOCK  (16000000 / 2)
-
-#define SJA1000_OFP_OCR        OCR_TX0_PULLDOWN
-#define SJA1000_OFP_CDR        (CDR_CBP | CDR_CLK_OFF)
-
-static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
-{
-       return ioread8(priv->reg_base + reg);
-}
-
-static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
-                                 int reg, u8 val)
-{
-       iowrite8(val, priv->reg_base + reg);
-}
-
-static int sja1000_ofp_remove(struct platform_device *ofdev)
-{
-       struct net_device *dev = platform_get_drvdata(ofdev);
-       struct sja1000_priv *priv = netdev_priv(dev);
-       struct device_node *np = ofdev->dev.of_node;
-       struct resource res;
-
-       unregister_sja1000dev(dev);
-       free_sja1000dev(dev);
-       iounmap(priv->reg_base);
-       irq_dispose_mapping(dev->irq);
-
-       of_address_to_resource(np, 0, &res);
-       release_mem_region(res.start, resource_size(&res));
-
-       return 0;
-}
-
-static int sja1000_ofp_probe(struct platform_device *ofdev)
-{
-       struct device_node *np = ofdev->dev.of_node;
-       struct net_device *dev;
-       struct sja1000_priv *priv;
-       struct resource res;
-       u32 prop;
-       int err, irq, res_size;
-       void __iomem *base;
-
-       err = of_address_to_resource(np, 0, &res);
-       if (err) {
-               dev_err(&ofdev->dev, "invalid address\n");
-               return err;
-       }
-
-       res_size = resource_size(&res);
-
-       if (!request_mem_region(res.start, res_size, DRV_NAME)) {
-               dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
-               return -EBUSY;
-       }
-
-       base = ioremap_nocache(res.start, res_size);
-       if (!base) {
-               dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
-               err = -ENOMEM;
-               goto exit_release_mem;
-       }
-
-       irq = irq_of_parse_and_map(np, 0);
-       if (irq == 0) {
-               dev_err(&ofdev->dev, "no irq found\n");
-               err = -ENODEV;
-               goto exit_unmap_mem;
-       }
-
-       dev = alloc_sja1000dev(0);
-       if (!dev) {
-               err = -ENOMEM;
-               goto exit_dispose_irq;
-       }
-
-       priv = netdev_priv(dev);
-
-       priv->read_reg = sja1000_ofp_read_reg;
-       priv->write_reg = sja1000_ofp_write_reg;
-
-       err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
-       if (!err)
-               priv->can.clock.freq = prop / 2;
-       else
-               priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
-
-       err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
-       if (!err)
-               priv->ocr |= prop & OCR_MODE_MASK;
-       else
-               priv->ocr |= OCR_MODE_NORMAL; /* default */
-
-       err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
-       if (!err)
-               priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
-       else
-               priv->ocr |= OCR_TX0_PULLDOWN; /* default */
-
-       err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
-       if (!err && prop) {
-               u32 divider = priv->can.clock.freq * 2 / prop;
-
-               if (divider > 1)
-                       priv->cdr |= divider / 2 - 1;
-               else
-                       priv->cdr |= CDR_CLKOUT_MASK;
-       } else {
-               priv->cdr |= CDR_CLK_OFF; /* default */
-       }
-
-       if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
-               priv->cdr |= CDR_CBP; /* default */
-
-       priv->irq_flags = IRQF_SHARED;
-       priv->reg_base = base;
-
-       dev->irq = irq;
-
-       dev_info(&ofdev->dev,
-                "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
-                priv->reg_base, dev->irq, priv->can.clock.freq,
-                priv->ocr, priv->cdr);
-
-       platform_set_drvdata(ofdev, dev);
-       SET_NETDEV_DEV(dev, &ofdev->dev);
-
-       err = register_sja1000dev(dev);
-       if (err) {
-               dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
-                       DRV_NAME, err);
-               goto exit_free_sja1000;
-       }
-
-       return 0;
-
-exit_free_sja1000:
-       free_sja1000dev(dev);
-exit_dispose_irq:
-       irq_dispose_mapping(irq);
-exit_unmap_mem:
-       iounmap(base);
-exit_release_mem:
-       release_mem_region(res.start, res_size);
-
-       return err;
-}
-
-static struct of_device_id sja1000_ofp_table[] = {
-       {.compatible = "nxp,sja1000"},
-       {},
-};
-MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
-
-static struct platform_driver sja1000_ofp_driver = {
-       .driver = {
-               .owner = THIS_MODULE,
-               .name = DRV_NAME,
-               .of_match_table = sja1000_ofp_table,
-       },
-       .probe = sja1000_ofp_probe,
-       .remove = sja1000_ofp_remove,
-};
-
-module_platform_driver(sja1000_ofp_driver);
index 943df645b45905d26df84052184c53e2085a414f..95a844a7ee7b0befa46e7d6dd9704c9ad99fac8b 100644 (file)
 #include <linux/can/dev.h>
 #include <linux/can/platform/sja1000.h>
 #include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
 
 #include "sja1000.h"
 
 #define DRV_NAME "sja1000_platform"
+#define SP_CAN_CLOCK  (16000000 / 2)
 
 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
 MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
 MODULE_ALIAS("platform:" DRV_NAME);
 MODULE_LICENSE("GPL v2");
@@ -66,59 +70,16 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
        iowrite8(val, priv->reg_base + reg * 4);
 }
 
-static int sp_probe(struct platform_device *pdev)
+static void sp_populate(struct sja1000_priv *priv,
+                       struct sja1000_platform_data *pdata,
+                       unsigned long resource_mem_flags)
 {
-       int err;
-       void __iomem *addr;
-       struct net_device *dev;
-       struct sja1000_priv *priv;
-       struct resource *res_mem, *res_irq;
-       struct sja1000_platform_data *pdata;
-
-       pdata = dev_get_platdata(&pdev->dev);
-       if (!pdata) {
-               dev_err(&pdev->dev, "No platform data provided!\n");
-               err = -ENODEV;
-               goto exit;
-       }
-
-       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res_mem || !res_irq) {
-               err = -ENODEV;
-               goto exit;
-       }
-
-       if (!request_mem_region(res_mem->start, resource_size(res_mem),
-                               DRV_NAME)) {
-               err = -EBUSY;
-               goto exit;
-       }
-
-       addr = ioremap_nocache(res_mem->start, resource_size(res_mem));
-       if (!addr) {
-               err = -ENOMEM;
-               goto exit_release;
-       }
-
-       dev = alloc_sja1000dev(0);
-       if (!dev) {
-               err = -ENOMEM;
-               goto exit_iounmap;
-       }
-       priv = netdev_priv(dev);
-
-       dev->irq = res_irq->start;
-       priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
-       if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
-               priv->irq_flags |= IRQF_SHARED;
-       priv->reg_base = addr;
        /* The CAN clock frequency is half the oscillator clock frequency */
        priv->can.clock.freq = pdata->osc_freq / 2;
        priv->ocr = pdata->ocr;
        priv->cdr = pdata->cdr;
 
-       switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+       switch (resource_mem_flags & IORESOURCE_MEM_TYPE_MASK) {
        case IORESOURCE_MEM_32BIT:
                priv->read_reg = sp_read_reg32;
                priv->write_reg = sp_write_reg32;
@@ -133,6 +94,124 @@ static int sp_probe(struct platform_device *pdev)
                priv->write_reg = sp_write_reg8;
                break;
        }
+}
+
+static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
+{
+       int err;
+       u32 prop;
+
+       err = of_property_read_u32(of, "reg-io-width", &prop);
+       if (err)
+               prop = 1; /* 8 bit is default */
+
+       switch (prop) {
+       case 4:
+               priv->read_reg = sp_read_reg32;
+               priv->write_reg = sp_write_reg32;
+               break;
+       case 2:
+               priv->read_reg = sp_read_reg16;
+               priv->write_reg = sp_write_reg16;
+               break;
+       case 1: /* fallthrough */
+       default:
+               priv->read_reg = sp_read_reg8;
+               priv->write_reg = sp_write_reg8;
+       }
+
+       err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+       if (!err)
+               priv->can.clock.freq = prop / 2;
+       else
+               priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+
+       err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
+       if (!err)
+               priv->ocr |= prop & OCR_MODE_MASK;
+       else
+               priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+       err = of_property_read_u32(of, "nxp,tx-output-config", &prop);
+       if (!err)
+               priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+       else
+               priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+       err = of_property_read_u32(of, "nxp,clock-out-frequency", &prop);
+       if (!err && prop) {
+               u32 divider = priv->can.clock.freq * 2 / prop;
+
+               if (divider > 1)
+                       priv->cdr |= divider / 2 - 1;
+               else
+                       priv->cdr |= CDR_CLKOUT_MASK;
+       } else {
+               priv->cdr |= CDR_CLK_OFF; /* default */
+       }
+
+       if (!of_property_read_bool(of, "nxp,no-comparator-bypass"))
+               priv->cdr |= CDR_CBP; /* default */
+}
+
+static int sp_probe(struct platform_device *pdev)
+{
+       int err, irq = 0;
+       void __iomem *addr;
+       struct net_device *dev;
+       struct sja1000_priv *priv;
+       struct resource *res_mem, *res_irq = NULL;
+       struct sja1000_platform_data *pdata;
+       struct device_node *of = pdev->dev.of_node;
+
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata && !of) {
+               dev_err(&pdev->dev, "No platform data provided!\n");
+               return -ENODEV;
+       }
+
+       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res_mem)
+               return -ENODEV;
+
+       if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+                                    resource_size(res_mem), DRV_NAME))
+               return -EBUSY;
+
+       addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+                                   resource_size(res_mem));
+       if (!addr)
+               return -ENOMEM;
+
+       if (of)
+               irq = irq_of_parse_and_map(of, 0);
+       else
+               res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+       if (!irq && !res_irq)
+               return -ENODEV;
+
+       dev = alloc_sja1000dev(0);
+       if (!dev)
+               return -ENOMEM;
+       priv = netdev_priv(dev);
+
+       if (res_irq) {
+               irq = res_irq->start;
+               priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+               if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+                       priv->irq_flags |= IRQF_SHARED;
+       } else {
+               priv->irq_flags = IRQF_SHARED;
+       }
+
+       dev->irq = irq;
+       priv->reg_base = addr;
+
+       if (of)
+               sp_populate_of(priv, of);
+       else
+               sp_populate(priv, pdata, res_mem->flags);
 
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
@@ -150,39 +229,32 @@ static int sp_probe(struct platform_device *pdev)
 
  exit_free:
        free_sja1000dev(dev);
- exit_iounmap:
-       iounmap(addr);
- exit_release:
-       release_mem_region(res_mem->start, resource_size(res_mem));
- exit:
        return err;
 }
 
 static int sp_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
-       struct sja1000_priv *priv = netdev_priv(dev);
-       struct resource *res;
 
        unregister_sja1000dev(dev);
-
-       if (priv->reg_base)
-               iounmap(priv->reg_base);
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(res->start, resource_size(res));
-
        free_sja1000dev(dev);
 
        return 0;
 }
 
+static struct of_device_id sp_of_table[] = {
+       {.compatible = "nxp,sja1000"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, sp_of_table);
+
 static struct platform_driver sp_driver = {
        .probe = sp_probe,
        .remove = sp_remove,
        .driver = {
                .name = DRV_NAME,
                .owner = THIS_MODULE,
+               .of_match_table = sp_of_table,
        },
 };
 
index 3fcdae266377a8141546f239ff805160d6cadbb9..f5b16e0e3a125f4e38a93408e92b8218808339af 100644 (file)
@@ -411,10 +411,16 @@ static void slc_free_netdev(struct net_device *dev)
        slcan_devs[i] = NULL;
 }
 
+static int slcan_change_mtu(struct net_device *dev, int new_mtu)
+{
+       return -EINVAL;
+}
+
 static const struct net_device_ops slc_netdev_ops = {
        .ndo_open               = slc_open,
        .ndo_stop               = slc_close,
        .ndo_start_xmit         = slc_xmit,
+       .ndo_change_mtu         = slcan_change_mtu,
 };
 
 static void slc_setup(struct net_device *dev)
index 9ea0dcde94ce0fe948fd3f059a9084516c8beaee..7d8c8f3672dd993119a28f478e5cc43514aa131c 100644 (file)
@@ -628,6 +628,7 @@ static const struct net_device_ops softing_netdev_ops = {
        .ndo_open = softing_netdev_open,
        .ndo_stop = softing_netdev_stop,
        .ndo_start_xmit = softing_netdev_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static const struct can_bittiming_const softing_btr_const = {
@@ -832,6 +833,7 @@ static int softing_pdev_probe(struct platform_device *pdev)
                        ret = -ENOMEM;
                        goto netdev_failed;
                }
+               netdev->dev_id = j;
                priv = netdev_priv(card->net[j]);
                priv->index = j;
                ret = softing_netdev_register(netdev);
index 2c62fe6c8fa967382796e99bbe1b85c37cd12916..258b9c4856ec6c1726c0e78a64becf19ae8c40d9 100644 (file)
@@ -871,6 +871,7 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
        .ndo_open               = ti_hecc_open,
        .ndo_stop               = ti_hecc_close,
        .ndo_start_xmit         = ti_hecc_xmit,
+       .ndo_change_mtu         = can_change_mtu,
 };
 
 static int ti_hecc_probe(struct platform_device *pdev)
index 52c42fd49510014f909b63efefa9ba1891dc8e11..00f2534dde736f1dd8cda9b016bd6a78f79eeab0 100644 (file)
@@ -883,6 +883,7 @@ static const struct net_device_ops ems_usb_netdev_ops = {
        .ndo_open = ems_usb_open,
        .ndo_stop = ems_usb_close,
        .ndo_start_xmit = ems_usb_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static const struct can_bittiming_const ems_usb_bittiming_const = {
index 7fbe85935f1d1071e7efbd917797a3da19938a70..b7c9e8b11460a3d6bd0e4fe7836f0e07e73eb586 100644 (file)
@@ -888,6 +888,7 @@ static const struct net_device_ops esd_usb2_netdev_ops = {
        .ndo_open = esd_usb2_open,
        .ndo_stop = esd_usb2_close,
        .ndo_start_xmit = esd_usb2_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static const struct can_bittiming_const esd_usb2_bittiming_const = {
@@ -1024,6 +1025,7 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
        netdev->netdev_ops = &esd_usb2_netdev_ops;
 
        SET_NETDEV_DEV(netdev, &intf->dev);
+       netdev->dev_id = index;
 
        err = register_candev(netdev);
        if (err) {
index e77d11049747047a3824d7e9a1dc32fc004f552d..4ca46edc061d761169a85fc58937dee449a4776f 100644 (file)
@@ -1388,6 +1388,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
        .ndo_open = kvaser_usb_open,
        .ndo_stop = kvaser_usb_close,
        .ndo_start_xmit = kvaser_usb_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static const struct can_bittiming_const kvaser_usb_bittiming_const = {
@@ -1529,6 +1530,7 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
        netdev->netdev_ops = &kvaser_usb_netdev_ops;
 
        SET_NETDEV_DEV(netdev, &intf->dev);
+       netdev->dev_id = channel;
 
        dev->nets[channel] = priv;
 
index 0b7a4c3b01a2976176878607bd457a9ea282e21e..644e6ab8a489b162ba399a01faa10cc5b09cf3bb 100644 (file)
@@ -702,6 +702,7 @@ static const struct net_device_ops peak_usb_netdev_ops = {
        .ndo_open = peak_usb_ndo_open,
        .ndo_stop = peak_usb_ndo_stop,
        .ndo_start_xmit = peak_usb_ndo_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 /*
@@ -769,6 +770,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
        usb_set_intfdata(intf, dev);
 
        SET_NETDEV_DEV(netdev, &intf->dev);
+       netdev->dev_id = ctrl_idx;
 
        err = register_candev(netdev);
        if (err) {
index a0fa1fd5092ba8243dcba8c6abfa67eac0f60694..cde263459932aff184d919b1881de5c9885831fa 100644 (file)
@@ -887,6 +887,7 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
        .ndo_open = usb_8dev_open,
        .ndo_stop = usb_8dev_close,
        .ndo_start_xmit = usb_8dev_start_xmit,
+       .ndo_change_mtu = can_change_mtu,
 };
 
 static const struct can_bittiming_const usb_8dev_bittiming_const = {
index bd8f84b0b894ebfe616a85b365598347d648200f..0932ffbf381b5b5c877b9988318912fcea930473 100644 (file)
@@ -63,10 +63,10 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
 
                dstats = per_cpu_ptr(dev->dstats, i);
                do {
-                       start = u64_stats_fetch_begin_bh(&dstats->syncp);
+                       start = u64_stats_fetch_begin_irq(&dstats->syncp);
                        tbytes = dstats->tx_bytes;
                        tpackets = dstats->tx_packets;
-               } while (u64_stats_fetch_retry_bh(&dstats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
                stats->tx_bytes += tbytes;
                stats->tx_packets += tpackets;
        }
@@ -88,16 +88,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int dummy_dev_init(struct net_device *dev)
 {
-       int i;
-       dev->dstats = alloc_percpu(struct pcpu_dstats);
+       dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
        if (!dev->dstats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_dstats *dstats;
-               dstats = per_cpu_ptr(dev->dstats, i);
-               u64_stats_init(&dstats->syncp);
-       }
        return 0;
 }
 
index c53384d41c96a974df073a383a2ea15b5373a3da..35df0b9e6848b0f1bd964f7b93ed2f18988c6b08 100644 (file)
@@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        spin_unlock_irqrestore(&lp->lock, flags);
 
-       dev_kfree_skb (skb);
+       dev_consume_skb_any (skb);
 
        /* Clear the Tx status stack. */
        {
index 5992860a39c97747e64befccd939f2e1f99e78f4..063557e037f21b8b43fa3d1dfe64a19c870d4789 100644 (file)
@@ -1,23 +1,24 @@
-/*======================================================================
-
-    A PCMCIA ethernet driver for the 3com 3c589 card.
-
-    Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
-
-    3c589_cs.c 1.162 2001/10/13 00:08:50
-
-    The network driver code is based on Donald Becker's 3c589 code:
-
-    Written 1994 by Donald Becker.
-    Copyright 1993 United States Government as represented by the
-    Director, National Security Agency.  This software may be used and
-    distributed according to the terms of the GNU General Public License,
-    incorporated herein by reference.
-    Donald Becker may be reached at becker@scyld.com
-
-    Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-======================================================================*/
+/* ======================================================================
+ *
+ * A PCMCIA ethernet driver for the 3com 3c589 card.
+ *
+ * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+ *
+ * 3c589_cs.c 1.162 2001/10/13 00:08:50
+ *
+ * The network driver code is based on Donald Becker's 3c589 code:
+ *
+ * Written 1994 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.  This software may be used and
+ * distributed according to the terms of the GNU General Public License,
+ * incorporated herein by reference.
+ * Donald Becker may be reached at becker@scyld.com
+ *
+ * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * ======================================================================
+ */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/ioport.h>
 #include <linux/bitops.h>
 #include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
 
 #include <pcmcia/cistpl.h>
 #include <pcmcia/cisreg.h>
 #include <pcmcia/ciscode.h>
 #include <pcmcia/ds.h>
 
-#include <asm/uaccess.h>
-#include <asm/io.h>
 
 /* To minimize the size of the driver source I only define operating
-   constants if they are used several times.  You'll need the manual
-   if you want to understand driver details. */
+ * constants if they are used several times. You'll need the manual
+ * if you want to understand driver details.
+ */
+
 /* Offsets from base I/O address. */
 #define EL3_DATA       0x00
 #define EL3_TIMER      0x0a
@@ -65,7 +68,9 @@
 #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
 
 /* The top five bits written to EL3_CMD are a command, the lower
-   11 bits are the parameter, if applicable. */
+ * 11 bits are the parameter, if applicable.
+ */
+
 enum c509cmd {
        TotalReset      = 0<<11,
        SelectWindow    = 1<<11,
@@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = {
 
 static int tc589_probe(struct pcmcia_device *link)
 {
-    struct el3_private *lp;
-    struct net_device *dev;
+       struct el3_private *lp;
+       struct net_device *dev;
 
-    dev_dbg(&link->dev, "3c589_attach()\n");
+       dev_dbg(&link->dev, "3c589_attach()\n");
 
-    /* Create new ethernet device */
-    dev = alloc_etherdev(sizeof(struct el3_private));
-    if (!dev)
-        return -ENOMEM;
-    lp = netdev_priv(dev);
-    link->priv = dev;
-    lp->p_dev = link;
+       /* Create new ethernet device */
+       dev = alloc_etherdev(sizeof(struct el3_private));
+       if (!dev)
+               return -ENOMEM;
+       lp = netdev_priv(dev);
+       link->priv = dev;
+       lp->p_dev = link;
 
-    spin_lock_init(&lp->lock);
-    link->resource[0]->end = 16;
-    link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+       spin_lock_init(&lp->lock);
+       link->resource[0]->end = 16;
+       link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
 
-    link->config_flags |= CONF_ENABLE_IRQ;
-    link->config_index = 1;
+       link->config_flags |= CONF_ENABLE_IRQ;
+       link->config_index = 1;
 
-    dev->netdev_ops = &el3_netdev_ops;
-    dev->watchdog_timeo = TX_TIMEOUT;
+       dev->netdev_ops = &el3_netdev_ops;
+       dev->watchdog_timeo = TX_TIMEOUT;
 
-    SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+       SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
 
-    return tc589_config(link);
+       return tc589_config(link);
 }
 
 static void tc589_detach(struct pcmcia_device *link)
 {
-    struct net_device *dev = link->priv;
+       struct net_device *dev = link->priv;
 
-    dev_dbg(&link->dev, "3c589_detach\n");
+       dev_dbg(&link->dev, "3c589_detach\n");
 
-    unregister_netdev(dev);
+       unregister_netdev(dev);
 
-    tc589_release(link);
+       tc589_release(link);
 
-    free_netdev(dev);
+       free_netdev(dev);
 } /* tc589_detach */
 
 static int tc589_config(struct pcmcia_device *link)
 {
-    struct net_device *dev = link->priv;
-    __be16 *phys_addr;
-    int ret, i, j, multi = 0, fifo;
-    unsigned int ioaddr;
-    static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
-    u8 *buf;
-    size_t len;
-
-    dev_dbg(&link->dev, "3c589_config\n");
-
-    phys_addr = (__be16 *)dev->dev_addr;
-    /* Is this a 3c562? */
-    if (link->manf_id != MANFID_3COM)
-           dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
-    multi = (link->card_id == PRODID_3COM_3C562);
-
-    link->io_lines = 16;
-
-    /* For the 3c562, the base address must be xx00-xx7f */
-    for (i = j = 0; j < 0x400; j += 0x10) {
-       if (multi && (j & 0x80)) continue;
-       link->resource[0]->start = j ^ 0x300;
-       i = pcmcia_request_io(link);
-       if (i == 0)
-               break;
-    }
-    if (i != 0)
-       goto failed;
-
-    ret = pcmcia_request_irq(link, el3_interrupt);
-    if (ret)
-           goto failed;
-
-    ret = pcmcia_enable_device(link);
-    if (ret)
-           goto failed;
-
-    dev->irq = link->irq;
-    dev->base_addr = link->resource[0]->start;
-    ioaddr = dev->base_addr;
-    EL3WINDOW(0);
-
-    /* The 3c589 has an extra EEPROM for configuration info, including
-       the hardware address.  The 3c562 puts the address in the CIS. */
-    len = pcmcia_get_tuple(link, 0x88, &buf);
-    if (buf && len >= 6) {
-           for (i = 0; i < 3; i++)
-                   phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
-           kfree(buf);
-    } else {
-       kfree(buf); /* 0 < len < 6 */
-       for (i = 0; i < 3; i++)
-           phys_addr[i] = htons(read_eeprom(ioaddr, i));
-       if (phys_addr[0] == htons(0x6060)) {
-           dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
-                   dev->base_addr, dev->base_addr+15);
-           goto failed;
+       struct net_device *dev = link->priv;
+       __be16 *phys_addr;
+       int ret, i, j, multi = 0, fifo;
+       unsigned int ioaddr;
+       static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+       u8 *buf;
+       size_t len;
+
+       dev_dbg(&link->dev, "3c589_config\n");
+
+       phys_addr = (__be16 *)dev->dev_addr;
+       /* Is this a 3c562? */
+       if (link->manf_id != MANFID_3COM)
+               dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
+       multi = (link->card_id == PRODID_3COM_3C562);
+
+       link->io_lines = 16;
+
+       /* For the 3c562, the base address must be xx00-xx7f */
+       for (i = j = 0; j < 0x400; j += 0x10) {
+               if (multi && (j & 0x80))
+                       continue;
+               link->resource[0]->start = j ^ 0x300;
+               i = pcmcia_request_io(link);
+               if (i == 0)
+                       break;
        }
-    }
-
-    /* The address and resource configuration register aren't loaded from
-       the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
-    outw(0x3f00, ioaddr + 8);
-    fifo = inl(ioaddr);
-
-    /* The if_port symbol can be set when the module is loaded */
-    if ((if_port >= 0) && (if_port <= 3))
-       dev->if_port = if_port;
-    else
-       dev_err(&link->dev, "invalid if_port requested\n");
-
-    SET_NETDEV_DEV(dev, &link->dev);
-
-    if (register_netdev(dev) != 0) {
-           dev_err(&link->dev, "register_netdev() failed\n");
-       goto failed;
-    }
-
-    netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
-               (multi ? "562" : "589"), dev->base_addr, dev->irq,
-               dev->dev_addr);
-    netdev_info(dev, "  %dK FIFO split %s Rx:Tx, %s xcvr\n",
-               (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
-               if_names[dev->if_port]);
-    return 0;
+       if (i != 0)
+               goto failed;
+
+       ret = pcmcia_request_irq(link, el3_interrupt);
+       if (ret)
+               goto failed;
+
+       ret = pcmcia_enable_device(link);
+       if (ret)
+               goto failed;
+
+       dev->irq = link->irq;
+       dev->base_addr = link->resource[0]->start;
+       ioaddr = dev->base_addr;
+       EL3WINDOW(0);
+
+       /* The 3c589 has an extra EEPROM for configuration info, including
+        * the hardware address.  The 3c562 puts the address in the CIS.
+        */
+       len = pcmcia_get_tuple(link, 0x88, &buf);
+       if (buf && len >= 6) {
+               for (i = 0; i < 3; i++)
+                       phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+               kfree(buf);
+       } else {
+               kfree(buf); /* 0 < len < 6 */
+               for (i = 0; i < 3; i++)
+                       phys_addr[i] = htons(read_eeprom(ioaddr, i));
+               if (phys_addr[0] == htons(0x6060)) {
+                       dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+                                       dev->base_addr, dev->base_addr+15);
+                       goto failed;
+               }
+       }
+
+       /* The address and resource configuration register aren't loaded from
+        * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
+        */
+
+       outw(0x3f00, ioaddr + 8);
+       fifo = inl(ioaddr);
+
+       /* The if_port symbol can be set when the module is loaded */
+       if ((if_port >= 0) && (if_port <= 3))
+               dev->if_port = if_port;
+       else
+               dev_err(&link->dev, "invalid if_port requested\n");
+
+       SET_NETDEV_DEV(dev, &link->dev);
+
+       if (register_netdev(dev) != 0) {
+               dev_err(&link->dev, "register_netdev() failed\n");
+               goto failed;
+       }
+
+       netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+                       (multi ? "562" : "589"), dev->base_addr, dev->irq,
+                       dev->dev_addr);
+       netdev_info(dev, "  %dK FIFO split %s Rx:Tx, %s xcvr\n",
+                       (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+                       if_names[dev->if_port]);
+       return 0;
 
 failed:
-    tc589_release(link);
-    return -ENODEV;
+       tc589_release(link);
+       return -ENODEV;
 } /* tc589_config */
 
 static void tc589_release(struct pcmcia_device *link)
@@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link)
 
 /*====================================================================*/
 
-/*
-  Use this for commands that may take time to finish
-*/
+/* Use this for commands that may take time to finish */
+
 static void tc589_wait_for_completion(struct net_device *dev, int cmd)
 {
-    int i = 100;
-    outw(cmd, dev->base_addr + EL3_CMD);
-    while (--i > 0)
-       if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
-    if (i == 0)
-       netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
+       int i = 100;
+       outw(cmd, dev->base_addr + EL3_CMD);
+       while (--i > 0)
+               if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000))
+                       break;
+       if (i == 0)
+               netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
 }
 
-/*
 Read a word from the EEPROM using the regular EEPROM access register.
-  Assume that we are in register window zero.
-*/
+/* Read a word from the EEPROM using the regular EEPROM access register.
* Assume that we are in register window zero.
+ */
+
 static u16 read_eeprom(unsigned int ioaddr, int index)
 {
-    int i;
-    outw(EEPROM_READ + index, ioaddr + 10);
-    /* Reading the eeprom takes 162 us */
-    for (i = 1620; i >= 0; i--)
-       if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
-           break;
-    return inw(ioaddr + 12);
+       int i;
+       outw(EEPROM_READ + index, ioaddr + 10);
+       /* Reading the eeprom takes 162 us */
+       for (i = 1620; i >= 0; i--)
+               if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+                       break;
+       return inw(ioaddr + 12);
 }
 
-/*
-  Set transceiver type, perhaps to something other than what the user
-  specified in dev->if_port.
-*/
+/* Set transceiver type, perhaps to something other than what the user
+ * specified in dev->if_port.
+ */
+
 static void tc589_set_xcvr(struct net_device *dev, int if_port)
 {
-    struct el3_private *lp = netdev_priv(dev);
-    unsigned int ioaddr = dev->base_addr;
-
-    EL3WINDOW(0);
-    switch (if_port) {
-    case 0: case 1: outw(0, ioaddr + 6); break;
-    case 2: outw(3<<14, ioaddr + 6); break;
-    case 3: outw(1<<14, ioaddr + 6); break;
-    }
-    /* On PCMCIA, this just turns on the LED */
-    outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
-    /* 10baseT interface, enable link beat and jabber check. */
-    EL3WINDOW(4);
-    outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
-    EL3WINDOW(1);
-    if (if_port == 2)
-       lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
-    else
-       lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+       struct el3_private *lp = netdev_priv(dev);
+       unsigned int ioaddr = dev->base_addr;
+
+       EL3WINDOW(0);
+       switch (if_port) {
+       case 0:
+       case 1:
+               outw(0, ioaddr + 6);
+               break;
+       case 2:
+               outw(3<<14, ioaddr + 6);
+               break;
+       case 3:
+               outw(1<<14, ioaddr + 6);
+               break;
+       }
+       /* On PCMCIA, this just turns on the LED */
+       outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+       /* 10baseT interface, enable link beat and jabber check. */
+       EL3WINDOW(4);
+       outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+       EL3WINDOW(1);
+       if (if_port == 2)
+               lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+       else
+               lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
 }
 
 static void dump_status(struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-    EL3WINDOW(1);
-    netdev_info(dev, "  irq status %04x, rx status %04x, tx status %02x  tx free %04x\n",
-               inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
-               inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
-    EL3WINDOW(4);
-    netdev_info(dev, "  diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
-               inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
-               inw(ioaddr+0x0a));
-    EL3WINDOW(1);
+       unsigned int ioaddr = dev->base_addr;
+       EL3WINDOW(1);
+       netdev_info(dev, "  irq status %04x, rx status %04x, tx status %02x  tx free %04x\n",
+                       inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+                       inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
+       EL3WINDOW(4);
+       netdev_info(dev, "  diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+                       inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+                       inw(ioaddr+0x0a));
+       EL3WINDOW(1);
 }
 
 /* Reset and restore all of the 3c589 registers. */
 static void tc589_reset(struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-    int i;
-
-    EL3WINDOW(0);
-    outw(0x0001, ioaddr + 4);                  /* Activate board. */
-    outw(0x3f00, ioaddr + 8);                  /* Set the IRQ line. */
-
-    /* Set the station address in window 2. */
-    EL3WINDOW(2);
-    for (i = 0; i < 6; i++)
-       outb(dev->dev_addr[i], ioaddr + i);
-
-    tc589_set_xcvr(dev, dev->if_port);
-
-    /* Switch to the stats window, and clear all stats by reading. */
-    outw(StatsDisable, ioaddr + EL3_CMD);
-    EL3WINDOW(6);
-    for (i = 0; i < 9; i++)
-       inb(ioaddr+i);
-    inw(ioaddr + 10);
-    inw(ioaddr + 12);
-
-    /* Switch to register set 1 for normal use. */
-    EL3WINDOW(1);
-
-    set_rx_mode(dev);
-    outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
-    outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
-    outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
-    /* Allow status bits to be seen. */
-    outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
-    /* Ack all pending events, and set active indicator mask. */
-    outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+       unsigned int ioaddr = dev->base_addr;
+       int i;
+
+       EL3WINDOW(0);
+       outw(0x0001, ioaddr + 4);                       /* Activate board. */
+       outw(0x3f00, ioaddr + 8);                       /* Set the IRQ line. */
+
+       /* Set the station address in window 2. */
+       EL3WINDOW(2);
+       for (i = 0; i < 6; i++)
+               outb(dev->dev_addr[i], ioaddr + i);
+
+       tc589_set_xcvr(dev, dev->if_port);
+
+       /* Switch to the stats window, and clear all stats by reading. */
+       outw(StatsDisable, ioaddr + EL3_CMD);
+       EL3WINDOW(6);
+       for (i = 0; i < 9; i++)
+               inb(ioaddr+i);
+       inw(ioaddr + 10);
+       inw(ioaddr + 12);
+
+       /* Switch to register set 1 for normal use. */
+       EL3WINDOW(1);
+
+       set_rx_mode(dev);
+       outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+       outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+       outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+       /* Allow status bits to be seen. */
+       outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+       /* Ack all pending events, and set active indicator mask. */
+       outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
         ioaddr + EL3_CMD);
-    outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+       outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
         | AdapterFailure, ioaddr + EL3_CMD);
 }
 
@@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = {
 
 static int el3_config(struct net_device *dev, struct ifmap *map)
 {
-    if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
-       if (map->port <= 3) {
-           dev->if_port = map->port;
-           netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
-           tc589_set_xcvr(dev, dev->if_port);
-       } else
-           return -EINVAL;
-    }
-    return 0;
+       if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+               if (map->port <= 3) {
+                       dev->if_port = map->port;
+                       netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+                       tc589_set_xcvr(dev, dev->if_port);
+               } else {
+                       return -EINVAL;
+               }
+       }
+       return 0;
 }
 
 static int el3_open(struct net_device *dev)
 {
-    struct el3_private *lp = netdev_priv(dev);
-    struct pcmcia_device *link = lp->p_dev;
+       struct el3_private *lp = netdev_priv(dev);
+       struct pcmcia_device *link = lp->p_dev;
 
-    if (!pcmcia_dev_present(link))
-       return -ENODEV;
+       if (!pcmcia_dev_present(link))
+               return -ENODEV;
 
-    link->open++;
-    netif_start_queue(dev);
+       link->open++;
+       netif_start_queue(dev);
 
-    tc589_reset(dev);
-    init_timer(&lp->media);
-    lp->media.function = media_check;
-    lp->media.data = (unsigned long) dev;
-    lp->media.expires = jiffies + HZ;
-    add_timer(&lp->media);
+       tc589_reset(dev);
+       init_timer(&lp->media);
+       lp->media.function = media_check;
+       lp->media.data = (unsigned long) dev;
+       lp->media.expires = jiffies + HZ;
+       add_timer(&lp->media);
 
-    dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
+       dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
          dev->name, inw(dev->base_addr + EL3_STATUS));
 
-    return 0;
+       return 0;
 }
 
 static void el3_tx_timeout(struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-
-    netdev_warn(dev, "Transmit timed out!\n");
-    dump_status(dev);
-    dev->stats.tx_errors++;
-    dev->trans_start = jiffies; /* prevent tx timeout */
-    /* Issue TX_RESET and TX_START commands. */
-    tc589_wait_for_completion(dev, TxReset);
-    outw(TxEnable, ioaddr + EL3_CMD);
-    netif_wake_queue(dev);
+       unsigned int ioaddr = dev->base_addr;
+
+       netdev_warn(dev, "Transmit timed out!\n");
+       dump_status(dev);
+       dev->stats.tx_errors++;
+       dev->trans_start = jiffies; /* prevent tx timeout */
+       /* Issue TX_RESET and TX_START commands. */
+       tc589_wait_for_completion(dev, TxReset);
+       outw(TxEnable, ioaddr + EL3_CMD);
+       netif_wake_queue(dev);
 }
 
 static void pop_tx_status(struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-    int i;
-
-    /* Clear the Tx status stack. */
-    for (i = 32; i > 0; i--) {
-       u_char tx_status = inb(ioaddr + TX_STATUS);
-       if (!(tx_status & 0x84)) break;
-       /* reset transmitter on jabber error or underrun */
-       if (tx_status & 0x30)
-               tc589_wait_for_completion(dev, TxReset);
-       if (tx_status & 0x38) {
-               netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
-               outw(TxEnable, ioaddr + EL3_CMD);
-               dev->stats.tx_aborted_errors++;
+       unsigned int ioaddr = dev->base_addr;
+       int i;
+
+       /* Clear the Tx status stack. */
+       for (i = 32; i > 0; i--) {
+               u_char tx_status = inb(ioaddr + TX_STATUS);
+               if (!(tx_status & 0x84))
+                       break;
+               /* reset transmitter on jabber error or underrun */
+               if (tx_status & 0x30)
+                       tc589_wait_for_completion(dev, TxReset);
+               if (tx_status & 0x38) {
+                       netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+                       outw(TxEnable, ioaddr + EL3_CMD);
+                       dev->stats.tx_aborted_errors++;
+               }
+               outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
        }
-       outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
-    }
 }
 
 static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
                                        struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-    struct el3_private *priv = netdev_priv(dev);
-    unsigned long flags;
+       unsigned int ioaddr = dev->base_addr;
+       struct el3_private *priv = netdev_priv(dev);
+       unsigned long flags;
 
-    netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+       netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
               (long)skb->len, inw(ioaddr + EL3_STATUS));
 
-    spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irqsave(&priv->lock, flags);
 
-    dev->stats.tx_bytes += skb->len;
+       dev->stats.tx_bytes += skb->len;
 
-    /* Put out the doubleword header... */
-    outw(skb->len, ioaddr + TX_FIFO);
-    outw(0x00, ioaddr + TX_FIFO);
-    /* ... and the packet rounded to a doubleword. */
-    outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+       /* Put out the doubleword header... */
+       outw(skb->len, ioaddr + TX_FIFO);
+       outw(0x00, ioaddr + TX_FIFO);
+       /* ... and the packet rounded to a doubleword. */
+       outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
 
-    if (inw(ioaddr + TX_FREE) <= 1536) {
-       netif_stop_queue(dev);
-       /* Interrupt us when the FIFO has room for max-sized packet. */
-       outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
-    }
+       if (inw(ioaddr + TX_FREE) <= 1536) {
+               netif_stop_queue(dev);
+               /* Interrupt us when the FIFO has room for max-sized packet. */
+               outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+       }
 
-    pop_tx_status(dev);
-    spin_unlock_irqrestore(&priv->lock, flags);
-    dev_kfree_skb(skb);
+       pop_tx_status(dev);
+       spin_unlock_irqrestore(&priv->lock, flags);
+       dev_kfree_skb(skb);
 
-    return NETDEV_TX_OK;
+       return NETDEV_TX_OK;
 }
 
 /* The EL3 interrupt handler. */
 static irqreturn_t el3_interrupt(int irq, void *dev_id)
 {
-    struct net_device *dev = (struct net_device *) dev_id;
-    struct el3_private *lp = netdev_priv(dev);
-    unsigned int ioaddr;
-    __u16 status;
-    int i = 0, handled = 1;
+       struct net_device *dev = (struct net_device *) dev_id;
+       struct el3_private *lp = netdev_priv(dev);
+       unsigned int ioaddr;
+       __u16 status;
+       int i = 0, handled = 1;
 
-    if (!netif_device_present(dev))
-       return IRQ_NONE;
+       if (!netif_device_present(dev))
+               return IRQ_NONE;
 
-    ioaddr = dev->base_addr;
+       ioaddr = dev->base_addr;
 
-    netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
+       netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
 
-    spin_lock(&lp->lock);
-    while ((status = inw(ioaddr + EL3_STATUS)) &
+       spin_lock(&lp->lock);
+       while ((status = inw(ioaddr + EL3_STATUS)) &
        (IntLatch | RxComplete | StatsFull)) {
-       if ((status & 0xe000) != 0x2000) {
-               netdev_dbg(dev, "interrupt from dead card\n");
-               handled = 0;
-               break;
-       }
-       if (status & RxComplete)
-               el3_rx(dev);
-       if (status & TxAvailable) {
-               netdev_dbg(dev, "    TX room bit was handled.\n");
-               /* There's room in the FIFO for a full-sized packet. */
-               outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
-               netif_wake_queue(dev);
-       }
-       if (status & TxComplete)
-               pop_tx_status(dev);
-       if (status & (AdapterFailure | RxEarly | StatsFull)) {
-           /* Handle all uncommon interrupts. */
-           if (status & StatsFull)             /* Empty statistics. */
-               update_stats(dev);
-           if (status & RxEarly) {             /* Rx early is unused. */
-               el3_rx(dev);
-               outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
-           }
-           if (status & AdapterFailure) {
-               u16 fifo_diag;
-               EL3WINDOW(4);
-               fifo_diag = inw(ioaddr + 4);
-               EL3WINDOW(1);
-               netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+               if ((status & 0xe000) != 0x2000) {
+                       netdev_dbg(dev, "interrupt from dead card\n");
+                       handled = 0;
+                       break;
+               }
+               if (status & RxComplete)
+                       el3_rx(dev);
+               if (status & TxAvailable) {
+                       netdev_dbg(dev, "    TX room bit was handled.\n");
+                       /* There's room in the FIFO for a full-sized packet. */
+                       outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+                       netif_wake_queue(dev);
+               }
+               if (status & TxComplete)
+                       pop_tx_status(dev);
+               if (status & (AdapterFailure | RxEarly | StatsFull)) {
+                       /* Handle all uncommon interrupts. */
+                       if (status & StatsFull)         /* Empty statistics. */
+                               update_stats(dev);
+                       if (status & RxEarly) {
+                               /* Rx early is unused. */
+                               el3_rx(dev);
+                               outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+                       }
+                       if (status & AdapterFailure) {
+                               u16 fifo_diag;
+                               EL3WINDOW(4);
+                               fifo_diag = inw(ioaddr + 4);
+                               EL3WINDOW(1);
+                               netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
                            fifo_diag);
-               if (fifo_diag & 0x0400) {
-                   /* Tx overrun */
-                   tc589_wait_for_completion(dev, TxReset);
-                   outw(TxEnable, ioaddr + EL3_CMD);
+                               if (fifo_diag & 0x0400) {
+                                       /* Tx overrun */
+                                       tc589_wait_for_completion(dev, TxReset);
+                                       outw(TxEnable, ioaddr + EL3_CMD);
+                               }
+                               if (fifo_diag & 0x2000) {
+                                       /* Rx underrun */
+                                       tc589_wait_for_completion(dev, RxReset);
+                                       set_rx_mode(dev);
+                                       outw(RxEnable, ioaddr + EL3_CMD);
+                               }
+                               outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+                       }
                }
-               if (fifo_diag & 0x2000) {
-                   /* Rx underrun */
-                   tc589_wait_for_completion(dev, RxReset);
-                   set_rx_mode(dev);
-                   outw(RxEnable, ioaddr + EL3_CMD);
+               if (++i > 10) {
+                       netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+                                       status);
+                       /* Clear all interrupts */
+                       outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+                       break;
                }
-               outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
-           }
+               /* Acknowledge the IRQ. */
+               outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
        }
-       if (++i > 10) {
-               netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
-                          status);
-               /* Clear all interrupts */
-               outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
-               break;
-       }
-       /* Acknowledge the IRQ. */
-       outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
-    }
-    lp->last_irq = jiffies;
-    spin_unlock(&lp->lock);
-    netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
-              inw(ioaddr + EL3_STATUS));
-    return IRQ_RETVAL(handled);
+       lp->last_irq = jiffies;
+       spin_unlock(&lp->lock);
+       netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+                       inw(ioaddr + EL3_STATUS));
+       return IRQ_RETVAL(handled);
 }
 
 static void media_check(unsigned long arg)
 {
-    struct net_device *dev = (struct net_device *)(arg);
-    struct el3_private *lp = netdev_priv(dev);
-    unsigned int ioaddr = dev->base_addr;
-    u16 media, errs;
-    unsigned long flags;
+       struct net_device *dev = (struct net_device *)(arg);
+       struct el3_private *lp = netdev_priv(dev);
+       unsigned int ioaddr = dev->base_addr;
+       u16 media, errs;
+       unsigned long flags;
 
-    if (!netif_device_present(dev)) goto reschedule;
+       if (!netif_device_present(dev))
+               goto reschedule;
 
-    /* Check for pending interrupt with expired latency timer: with
-       this, we can limp along even if the interrupt is blocked */
-    if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+       /* Check for pending interrupt with expired latency timer: with
+        * this, we can limp along even if the interrupt is blocked
+        */
+       if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
        (inb(ioaddr + EL3_TIMER) == 0xff)) {
-       if (!lp->fast_poll)
-               netdev_warn(dev, "interrupt(s) dropped!\n");
-
-       local_irq_save(flags);
-       el3_interrupt(dev->irq, dev);
-       local_irq_restore(flags);
-
-       lp->fast_poll = HZ;
-    }
-    if (lp->fast_poll) {
-       lp->fast_poll--;
-       lp->media.expires = jiffies + HZ/100;
-       add_timer(&lp->media);
-       return;
-    }
-
-    /* lp->lock guards the EL3 window. Window should always be 1 except
-       when the lock is held */
-    spin_lock_irqsave(&lp->lock, flags);
-    EL3WINDOW(4);
-    media = inw(ioaddr+WN4_MEDIA) & 0xc810;
-
-    /* Ignore collisions unless we've had no irq's recently */
-    if (time_before(jiffies, lp->last_irq + HZ)) {
-       media &= ~0x0010;
-    } else {
-       /* Try harder to detect carrier errors */
-       EL3WINDOW(6);
-       outw(StatsDisable, ioaddr + EL3_CMD);
-       errs = inb(ioaddr + 0);
-       outw(StatsEnable, ioaddr + EL3_CMD);
-       dev->stats.tx_carrier_errors += errs;
-       if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
-    }
+               if (!lp->fast_poll)
+                       netdev_warn(dev, "interrupt(s) dropped!\n");
+
+               local_irq_save(flags);
+               el3_interrupt(dev->irq, dev);
+               local_irq_restore(flags);
+
+               lp->fast_poll = HZ;
+       }
+       if (lp->fast_poll) {
+               lp->fast_poll--;
+               lp->media.expires = jiffies + HZ/100;
+               add_timer(&lp->media);
+               return;
+       }
+
+       /* lp->lock guards the EL3 window. Window should always be 1 except
+        * when the lock is held
+        */
+
+       spin_lock_irqsave(&lp->lock, flags);
+       EL3WINDOW(4);
+       media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+       /* Ignore collisions unless we've had no irq's recently */
+       if (time_before(jiffies, lp->last_irq + HZ)) {
+               media &= ~0x0010;
+       } else {
+               /* Try harder to detect carrier errors */
+               EL3WINDOW(6);
+               outw(StatsDisable, ioaddr + EL3_CMD);
+               errs = inb(ioaddr + 0);
+               outw(StatsEnable, ioaddr + EL3_CMD);
+               dev->stats.tx_carrier_errors += errs;
+               if (errs || (lp->media_status & 0x0010))
+                       media |= 0x0010;
+       }
 
-    if (media != lp->media_status) {
-       if ((media & lp->media_status & 0x8000) &&
-           ((lp->media_status ^ media) & 0x0800))
+       if (media != lp->media_status) {
+               if ((media & lp->media_status & 0x8000) &&
+                               ((lp->media_status ^ media) & 0x0800))
                netdev_info(dev, "%s link beat\n",
-                           (lp->media_status & 0x0800 ? "lost" : "found"));
-       else if ((media & lp->media_status & 0x4000) &&
+                               (lp->media_status & 0x0800 ? "lost" : "found"));
+               else if ((media & lp->media_status & 0x4000) &&
                 ((lp->media_status ^ media) & 0x0010))
                netdev_info(dev, "coax cable %s\n",
-                           (lp->media_status & 0x0010 ? "ok" : "problem"));
-       if (dev->if_port == 0) {
-           if (media & 0x8000) {
-               if (media & 0x0800)
-                       netdev_info(dev, "flipped to 10baseT\n");
-               else
+                               (lp->media_status & 0x0010 ? "ok" : "problem"));
+               if (dev->if_port == 0) {
+                       if (media & 0x8000) {
+                               if (media & 0x0800)
+                                       netdev_info(dev, "flipped to 10baseT\n");
+                               else
                        tc589_set_xcvr(dev, 2);
-           } else if (media & 0x4000) {
-               if (media & 0x0010)
-                   tc589_set_xcvr(dev, 1);
-               else
-                   netdev_info(dev, "flipped to 10base2\n");
-           }
+                       } else if (media & 0x4000) {
+                               if (media & 0x0010)
+                                       tc589_set_xcvr(dev, 1);
+                               else
+                                       netdev_info(dev, "flipped to 10base2\n");
+                       }
+               }
+               lp->media_status = media;
        }
-       lp->media_status = media;
-    }
 
-    EL3WINDOW(1);
-    spin_unlock_irqrestore(&lp->lock, flags);
+       EL3WINDOW(1);
+       spin_unlock_irqrestore(&lp->lock, flags);
 
 reschedule:
-    lp->media.expires = jiffies + HZ;
-    add_timer(&lp->media);
+       lp->media.expires = jiffies + HZ;
+       add_timer(&lp->media);
 }
 
 static struct net_device_stats *el3_get_stats(struct net_device *dev)
 {
-    struct el3_private *lp = netdev_priv(dev);
-    unsigned long flags;
-    struct pcmcia_device *link = lp->p_dev;
+       struct el3_private *lp = netdev_priv(dev);
+       unsigned long flags;
+       struct pcmcia_device *link = lp->p_dev;
 
-    if (pcmcia_dev_present(link)) {
-       spin_lock_irqsave(&lp->lock, flags);
-       update_stats(dev);
-       spin_unlock_irqrestore(&lp->lock, flags);
-    }
-    return &dev->stats;
+       if (pcmcia_dev_present(link)) {
+               spin_lock_irqsave(&lp->lock, flags);
+               update_stats(dev);
+               spin_unlock_irqrestore(&lp->lock, flags);
+       }
+       return &dev->stats;
 }
 
-/*
-  Update statistics.  We change to register window 6, so this should be run
-  single-threaded if the device is active. This is expected to be a rare
-  operation, and it's simpler for the rest of the driver to assume that
-  window 1 is always valid rather than use a special window-state variable.
-
-  Caller must hold the lock for this
+/* Update statistics.  We change to register window 6, so this should be run
+* single-threaded if the device is active. This is expected to be a rare
+* operation, and it's simpler for the rest of the driver to assume that
+* window 1 is always valid rather than use a special window-state variable.
+*
+* Caller must hold the lock for this
 */
+
 static void update_stats(struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-
-    netdev_dbg(dev, "updating the statistics.\n");
-    /* Turn off statistics updates while reading. */
-    outw(StatsDisable, ioaddr + EL3_CMD);
-    /* Switch to the stats window, and read everything. */
-    EL3WINDOW(6);
-    dev->stats.tx_carrier_errors       += inb(ioaddr + 0);
-    dev->stats.tx_heartbeat_errors     += inb(ioaddr + 1);
-    /* Multiple collisions. */         inb(ioaddr + 2);
-    dev->stats.collisions              += inb(ioaddr + 3);
-    dev->stats.tx_window_errors                += inb(ioaddr + 4);
-    dev->stats.rx_fifo_errors          += inb(ioaddr + 5);
-    dev->stats.tx_packets              += inb(ioaddr + 6);
-    /* Rx packets   */                 inb(ioaddr + 7);
-    /* Tx deferrals */                 inb(ioaddr + 8);
-    /* Rx octets */                    inw(ioaddr + 10);
-    /* Tx octets */                    inw(ioaddr + 12);
-
-    /* Back to window 1, and turn statistics back on. */
-    EL3WINDOW(1);
-    outw(StatsEnable, ioaddr + EL3_CMD);
+       unsigned int ioaddr = dev->base_addr;
+
+       netdev_dbg(dev, "updating the statistics.\n");
+       /* Turn off statistics updates while reading. */
+       outw(StatsDisable, ioaddr + EL3_CMD);
+       /* Switch to the stats window, and read everything. */
+       EL3WINDOW(6);
+       dev->stats.tx_carrier_errors    += inb(ioaddr + 0);
+       dev->stats.tx_heartbeat_errors  += inb(ioaddr + 1);
+       /* Multiple collisions. */
+       inb(ioaddr + 2);
+       dev->stats.collisions           += inb(ioaddr + 3);
+       dev->stats.tx_window_errors             += inb(ioaddr + 4);
+       dev->stats.rx_fifo_errors               += inb(ioaddr + 5);
+       dev->stats.tx_packets           += inb(ioaddr + 6);
+       /* Rx packets   */
+       inb(ioaddr + 7);
+       /* Tx deferrals */
+       inb(ioaddr + 8);
+       /* Rx octets */
+       inw(ioaddr + 10);
+       /* Tx octets */
+       inw(ioaddr + 12);
+
+       /* Back to window 1, and turn statistics back on. */
+       EL3WINDOW(1);
+       outw(StatsEnable, ioaddr + EL3_CMD);
 }
 
 static int el3_rx(struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-    int worklimit = 32;
-    short rx_status;
+       unsigned int ioaddr = dev->base_addr;
+       int worklimit = 32;
+       short rx_status;
 
-    netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+       netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
               inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
-    while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+       while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
                    worklimit > 0) {
-       worklimit--;
-       if (rx_status & 0x4000) { /* Error, update stats. */
-           short error = rx_status & 0x3800;
-           dev->stats.rx_errors++;
-           switch (error) {
-           case 0x0000:        dev->stats.rx_over_errors++; break;
-           case 0x0800:        dev->stats.rx_length_errors++; break;
-           case 0x1000:        dev->stats.rx_frame_errors++; break;
-           case 0x1800:        dev->stats.rx_length_errors++; break;
-           case 0x2000:        dev->stats.rx_frame_errors++; break;
-           case 0x2800:        dev->stats.rx_crc_errors++; break;
-           }
-       } else {
-           short pkt_len = rx_status & 0x7ff;
-           struct sk_buff *skb;
-
-           skb = netdev_alloc_skb(dev, pkt_len + 5);
-
-           netdev_dbg(dev, "    Receiving packet size %d status %4.4x.\n",
+               worklimit--;
+               if (rx_status & 0x4000) { /* Error, update stats. */
+                       short error = rx_status & 0x3800;
+                       dev->stats.rx_errors++;
+                       switch (error) {
+                       case 0x0000:
+                               dev->stats.rx_over_errors++;
+                               break;
+                       case 0x0800:
+                               dev->stats.rx_length_errors++;
+                               break;
+                       case 0x1000:
+                               dev->stats.rx_frame_errors++;
+                               break;
+                       case 0x1800:
+                               dev->stats.rx_length_errors++;
+                               break;
+                       case 0x2000:
+                               dev->stats.rx_frame_errors++;
+                               break;
+                       case 0x2800:
+                               dev->stats.rx_crc_errors++;
+                               break;
+                       }
+               } else {
+                       short pkt_len = rx_status & 0x7ff;
+                       struct sk_buff *skb;
+
+                       skb = netdev_alloc_skb(dev, pkt_len + 5);
+
+                       netdev_dbg(dev, "    Receiving packet size %d status %4.4x.\n",
                       pkt_len, rx_status);
-           if (skb != NULL) {
-               skb_reserve(skb, 2);
-               insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+                       if (skb != NULL) {
+                               skb_reserve(skb, 2);
+                               insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
                        (pkt_len+3)>>2);
-               skb->protocol = eth_type_trans(skb, dev);
-               netif_rx(skb);
-               dev->stats.rx_packets++;
-               dev->stats.rx_bytes += pkt_len;
-           } else {
-               netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+                               skb->protocol = eth_type_trans(skb, dev);
+                               netif_rx(skb);
+                               dev->stats.rx_packets++;
+                               dev->stats.rx_bytes += pkt_len;
+                       } else {
+                               netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
                           pkt_len);
-               dev->stats.rx_dropped++;
-           }
+                               dev->stats.rx_dropped++;
+                       }
+               }
+               /* Pop the top of the Rx FIFO */
+               tc589_wait_for_completion(dev, RxDiscard);
        }
-       /* Pop the top of the Rx FIFO */
-       tc589_wait_for_completion(dev, RxDiscard);
-    }
-    if (worklimit == 0)
-       netdev_warn(dev, "too much work in el3_rx!\n");
-    return 0;
+       if (worklimit == 0)
+               netdev_warn(dev, "too much work in el3_rx!\n");
+       return 0;
 }
 
 static void set_rx_mode(struct net_device *dev)
 {
-    unsigned int ioaddr = dev->base_addr;
-    u16 opts = SetRxFilter | RxStation | RxBroadcast;
-
-    if (dev->flags & IFF_PROMISC)
-       opts |= RxMulticast | RxProm;
-    else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
-       opts |= RxMulticast;
-    outw(opts, ioaddr + EL3_CMD);
+       unsigned int ioaddr = dev->base_addr;
+       u16 opts = SetRxFilter | RxStation | RxBroadcast;
+
+       if (dev->flags & IFF_PROMISC)
+               opts |= RxMulticast | RxProm;
+       else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
+               opts |= RxMulticast;
+       outw(opts, ioaddr + EL3_CMD);
 }
 
 static void set_multicast_list(struct net_device *dev)
@@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev)
 
 static int el3_close(struct net_device *dev)
 {
-    struct el3_private *lp = netdev_priv(dev);
-    struct pcmcia_device *link = lp->p_dev;
-    unsigned int ioaddr = dev->base_addr;
-
-    dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+       struct el3_private *lp = netdev_priv(dev);
+       struct pcmcia_device *link = lp->p_dev;
+       unsigned int ioaddr = dev->base_addr;
+
+       dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+       if (pcmcia_dev_present(link)) {
+               /* Turn off statistics ASAP.  We update dev->stats below. */
+               outw(StatsDisable, ioaddr + EL3_CMD);
+
+               /* Disable the receiver and transmitter. */
+               outw(RxDisable, ioaddr + EL3_CMD);
+               outw(TxDisable, ioaddr + EL3_CMD);
+
+               if (dev->if_port == 2)
+                       /* Turn off thinnet power.  Green! */
+                       outw(StopCoax, ioaddr + EL3_CMD);
+               else if (dev->if_port == 1) {
+                       /* Disable link beat and jabber */
+                       EL3WINDOW(4);
+                       outw(0, ioaddr + WN4_MEDIA);
+               }
 
-    if (pcmcia_dev_present(link)) {
-       /* Turn off statistics ASAP.  We update dev->stats below. */
-       outw(StatsDisable, ioaddr + EL3_CMD);
+               /* Switching back to window 0 disables the IRQ. */
+               EL3WINDOW(0);
+               /* But we explicitly zero the IRQ line select anyway. */
+               outw(0x0f00, ioaddr + WN0_IRQ);
 
-       /* Disable the receiver and transmitter. */
-       outw(RxDisable, ioaddr + EL3_CMD);
-       outw(TxDisable, ioaddr + EL3_CMD);
-
-       if (dev->if_port == 2)
-           /* Turn off thinnet power.  Green! */
-           outw(StopCoax, ioaddr + EL3_CMD);
-       else if (dev->if_port == 1) {
-           /* Disable link beat and jabber */
-           EL3WINDOW(4);
-           outw(0, ioaddr + WN4_MEDIA);
+               /* Check if the card still exists */
+               if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+                       update_stats(dev);
        }
 
-       /* Switching back to window 0 disables the IRQ. */
-       EL3WINDOW(0);
-       /* But we explicitly zero the IRQ line select anyway. */
-       outw(0x0f00, ioaddr + WN0_IRQ);
-
-       /* Check if the card still exists */
-       if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
-           update_stats(dev);
-    }
-
-    link->open--;
-    netif_stop_queue(dev);
-    del_timer_sync(&lp->media);
+       link->open--;
+       netif_stop_queue(dev);
+       del_timer_sync(&lp->media);
 
-    return 0;
+       return 0;
 }
 
 static const struct pcmcia_device_id tc589_ids[] = {
index 238ccea965c8a1528c10d7477753fcc1ad068ca0..61477b8e8d24caa9941bc3e706518145f216722a 100644 (file)
@@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
                /* ... and the packet rounded to a doubleword. */
                skb_tx_timestamp(skb);
                iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
-               dev_kfree_skb (skb);
+               dev_consume_skb_any (skb);
                if (ioread16(ioaddr + TxFree) > 1536) {
                        netif_start_queue (dev);        /* AKPM: redundant? */
                } else {
index d2cd80444ade7b5e5a6bdfe1f5e5207f8c02d798..599311f0e05c18eccad98511b3d7d73737dbd1d5 100644 (file)
@@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
        spin_unlock(&ei_local->page_lock);
        enable_irq_lockdep_irqrestore(dev->irq, &flags);
        skb_tx_timestamp(skb);
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
        dev->stats.tx_bytes += send_length;
 
        return NETDEV_TX_OK;
index 506b0248c4001b48382f3ef6be2ccbc47724c8fa..39b26fe28d1051ff916faceb747da7a64dac711f 100644 (file)
@@ -22,6 +22,7 @@ source "drivers/net/ethernet/adaptec/Kconfig"
 source "drivers/net/ethernet/aeroflex/Kconfig"
 source "drivers/net/ethernet/allwinner/Kconfig"
 source "drivers/net/ethernet/alteon/Kconfig"
+source "drivers/net/ethernet/altera/Kconfig"
 source "drivers/net/ethernet/amd/Kconfig"
 source "drivers/net/ethernet/apple/Kconfig"
 source "drivers/net/ethernet/arc/Kconfig"
@@ -149,6 +150,7 @@ config S6GMAC
          To compile this driver as a module, choose M here. The module
          will be called s6gmac.
 
+source "drivers/net/ethernet/samsung/Kconfig"
 source "drivers/net/ethernet/seeq/Kconfig"
 source "drivers/net/ethernet/silan/Kconfig"
 source "drivers/net/ethernet/sis/Kconfig"
index c0b8789952e711fb77e44fc214d06ee8cac5e8f3..545d0b3b9cb422b2fefa7122b074cd869a9085c2 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
 obj-$(CONFIG_GRETH) += aeroflex/
 obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
 obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
+obj-$(CONFIG_ALTERA_TSE) += altera/
 obj-$(CONFIG_NET_VENDOR_AMD) += amd/
 obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
 obj-$(CONFIG_NET_VENDOR_ARC) += arc/
@@ -60,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
 obj-$(CONFIG_SH_ETH) += renesas/
 obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
 obj-$(CONFIG_S6GMAC) += s6gmac.o
+obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
 obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
 obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
 obj-$(CONFIG_NET_VENDOR_SIS) += sis/
index c0f68dcd1dc125c422d4d1d084b331a01beee2b3..7ae74d450e8fb8786dae5f0e05e981685c92ef28 100644 (file)
@@ -307,11 +307,6 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
        return bfin_mdio_poll();
 }
 
-static int bfin_mdiobus_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static void bfin_mac_adjust_link(struct net_device *dev)
 {
        struct bfin_mac_local *lp = netdev_priv(dev);
@@ -1040,6 +1035,7 @@ static struct ptp_clock_info bfin_ptp_caps = {
        .n_alarm        = 0,
        .n_ext_ts       = 0,
        .n_per_out      = 0,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = bfin_ptp_adjfreq,
        .adjtime        = bfin_ptp_adjtime,
@@ -1086,7 +1082,7 @@ static inline void _tx_reclaim_skb(void)
                tx_list_head->desc_a.config &= ~DMAEN;
                tx_list_head->status.status_word = 0;
                if (tx_list_head->skb) {
-                       dev_kfree_skb(tx_list_head->skb);
+                       dev_consume_skb_any(tx_list_head->skb);
                        tx_list_head->skb = NULL;
                }
                tx_list_head = tx_list_head->next;
@@ -1823,7 +1819,6 @@ static int bfin_mii_bus_probe(struct platform_device *pdev)
                goto out_err_alloc;
        miibus->read = bfin_mdiobus_read;
        miibus->write = bfin_mdiobus_write;
-       miibus->reset = bfin_mdiobus_reset;
 
        miibus->parent = &pdev->dev;
        miibus->name = "bfin_mii_bus";
index c5d75e7aeeb6e7140af25eb7b3b16797dae4f62e..23578dfee249879064c1b86526ee80aead68b79f 100644 (file)
@@ -1213,11 +1213,6 @@ static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
        return 0;
 }
 
-static int greth_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static void greth_link_change(struct net_device *dev)
 {
        struct greth_private *greth = netdev_priv(dev);
@@ -1332,7 +1327,6 @@ static int greth_mdio_init(struct greth_private *greth)
        snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
        greth->mdio->read = greth_mdio_read;
        greth->mdio->write = greth_mdio_write;
-       greth->mdio->reset = greth_mdio_reset;
        greth->mdio->priv = greth;
 
        greth->mdio->irq = greth->mdio_irqs;
index 511f6eecd58bc8153ef9cbf6a0ae021e91d8d889..fcaeeb8a4929dbe2364752fe71f283dd46640ce3 100644 (file)
@@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
        spin_unlock_irqrestore(&db->lock, flags);
 
        /* free this SKB */
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
new file mode 100644 (file)
index 0000000..80c1ab7
--- /dev/null
@@ -0,0 +1,8 @@
+config ALTERA_TSE
+       tristate "Altera Triple-Speed Ethernet MAC support"
+       select PHYLIB
+       ---help---
+         This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
+
+         To compile this driver as a module, choose M here. The module
+         will be called alteratse.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
new file mode 100644 (file)
index 0000000..d4a187e
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for the Altera device drivers.
+#
+
+obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
+altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
+altera_msgdma.o altera_sgdma.o altera_utils.o
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
new file mode 100644 (file)
index 0000000..3df1866
--- /dev/null
@@ -0,0 +1,202 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/netdevice.h>
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_msgdmahw.h"
+
+/* No initialization work to do for MSGDMA */
+int msgdma_initialize(struct altera_tse_private *priv)
+{
+       return 0;
+}
+
+void msgdma_uninitialize(struct altera_tse_private *priv)
+{
+}
+
+void msgdma_reset(struct altera_tse_private *priv)
+{
+       int counter;
+       struct msgdma_csr *txcsr =
+               (struct msgdma_csr *)priv->tx_dma_csr;
+       struct msgdma_csr *rxcsr =
+               (struct msgdma_csr *)priv->rx_dma_csr;
+
+       /* Reset Rx mSGDMA */
+       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+       iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
+
+       counter = 0;
+       while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+               if (tse_bit_is_clear(&rxcsr->status,
+                                    MSGDMA_CSR_STAT_RESETTING))
+                       break;
+               udelay(1);
+       }
+
+       if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
+               netif_warn(priv, drv, priv->dev,
+                          "TSE Rx mSGDMA resetting bit never cleared!\n");
+
+       /* clear all status bits */
+       iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
+
+       /* Reset Tx mSGDMA */
+       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+       iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
+
+       counter = 0;
+       while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+               if (tse_bit_is_clear(&txcsr->status,
+                                    MSGDMA_CSR_STAT_RESETTING))
+                       break;
+               udelay(1);
+       }
+
+       if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
+               netif_warn(priv, drv, priv->dev,
+                          "TSE Tx mSGDMA resetting bit never cleared!\n");
+
+       /* clear all status bits */
+       iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
+}
+
+void msgdma_disable_rxirq(struct altera_tse_private *priv)
+{
+       struct msgdma_csr *csr = priv->rx_dma_csr;
+       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_enable_rxirq(struct altera_tse_private *priv)
+{
+       struct msgdma_csr *csr = priv->rx_dma_csr;
+       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_disable_txirq(struct altera_tse_private *priv)
+{
+       struct msgdma_csr *csr = priv->tx_dma_csr;
+       tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_enable_txirq(struct altera_tse_private *priv)
+{
+       struct msgdma_csr *csr = priv->tx_dma_csr;
+       tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
+}
+
+void msgdma_clear_rxirq(struct altera_tse_private *priv)
+{
+       struct msgdma_csr *csr = priv->rx_dma_csr;
+       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+}
+
+void msgdma_clear_txirq(struct altera_tse_private *priv)
+{
+       struct msgdma_csr *csr = priv->tx_dma_csr;
+       iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
+}
+
+/* return 0 to indicate transmit is pending */
+int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+       struct msgdma_extended_desc *desc = priv->tx_dma_desc;
+
+       iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
+       iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
+       iowrite32(0, &desc->write_addr_lo);
+       iowrite32(0, &desc->write_addr_hi);
+       iowrite32(buffer->len, &desc->len);
+       iowrite32(0, &desc->burst_seq_num);
+       iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
+       iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
+       return 0;
+}
+
+u32 msgdma_tx_completions(struct altera_tse_private *priv)
+{
+       u32 ready = 0;
+       u32 inuse;
+       u32 status;
+       struct msgdma_csr *txcsr =
+               (struct msgdma_csr *)priv->tx_dma_csr;
+
+       /* Get number of sent descriptors */
+       inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
+
+       if (inuse) { /* Tx FIFO is not empty */
+               ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+       } else {
+               /* Check for buffered last packet */
+               status = ioread32(&txcsr->status);
+               if (status & MSGDMA_CSR_STAT_BUSY)
+                       ready = priv->tx_prod - priv->tx_cons - 1;
+               else
+                       ready = priv->tx_prod - priv->tx_cons;
+       }
+       return ready;
+}
+
+/* Put buffer to the mSGDMA RX FIFO
+ */
+int msgdma_add_rx_desc(struct altera_tse_private *priv,
+                       struct tse_buffer *rxbuffer)
+{
+       struct msgdma_extended_desc *desc = priv->rx_dma_desc;
+       u32 len = priv->rx_dma_buf_sz;
+       dma_addr_t dma_addr = rxbuffer->dma_addr;
+       u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
+                       | MSGDMA_DESC_CTL_END_ON_LEN
+                       | MSGDMA_DESC_CTL_TR_COMP_IRQ
+                       | MSGDMA_DESC_CTL_EARLY_IRQ
+                       | MSGDMA_DESC_CTL_TR_ERR_IRQ
+                       | MSGDMA_DESC_CTL_GO);
+
+       iowrite32(0, &desc->read_addr_lo);
+       iowrite32(0, &desc->read_addr_hi);
+       iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
+       iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
+       iowrite32(len, &desc->len);
+       iowrite32(0, &desc->burst_seq_num);
+       iowrite32(0x00010001, &desc->stride);
+       iowrite32(control, &desc->control);
+       return 1;
+}
+
+/* status is returned on upper 16 bits,
+ * length is returned in lower 16 bits
+ */
+u32 msgdma_rx_status(struct altera_tse_private *priv)
+{
+       u32 rxstatus = 0;
+       u32 pktlength;
+       u32 pktstatus;
+       struct msgdma_csr *rxcsr =
+               (struct msgdma_csr *)priv->rx_dma_csr;
+       struct msgdma_response *rxresp =
+               (struct msgdma_response *)priv->rx_dma_resp;
+
+       if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
+               pktlength = ioread32(&rxresp->bytes_transferred);
+               pktstatus = ioread32(&rxresp->status);
+               rxstatus = pktstatus;
+               rxstatus = rxstatus << 16;
+               rxstatus |= (pktlength & 0xffff);
+       }
+       return rxstatus;
+}
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
new file mode 100644 (file)
index 0000000..7f0f5bf
--- /dev/null
@@ -0,0 +1,34 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_MSGDMA_H__
+#define __ALTERA_MSGDMA_H__
+
+void msgdma_reset(struct altera_tse_private *);
+void msgdma_enable_txirq(struct altera_tse_private *);
+void msgdma_enable_rxirq(struct altera_tse_private *);
+void msgdma_disable_rxirq(struct altera_tse_private *);
+void msgdma_disable_txirq(struct altera_tse_private *);
+void msgdma_clear_rxirq(struct altera_tse_private *);
+void msgdma_clear_txirq(struct altera_tse_private *);
+u32 msgdma_tx_completions(struct altera_tse_private *);
+int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
+int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
+u32 msgdma_rx_status(struct altera_tse_private *);
+int msgdma_initialize(struct altera_tse_private *);
+void msgdma_uninitialize(struct altera_tse_private *);
+
+#endif /*  __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
new file mode 100644 (file)
index 0000000..d7b59ba
--- /dev/null
@@ -0,0 +1,167 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_MSGDMAHW_H__
+#define __ALTERA_MSGDMAHW_H__
+
+/* mSGDMA standard descriptor format
+ */
+struct msgdma_desc {
+       u32 read_addr;  /* data buffer source address */
+       u32 write_addr; /* data buffer destination address */
+       u32 len;        /* the number of bytes to transfer per descriptor */
+       u32 control;    /* characteristics of the transfer */
+};
+
+/* mSGDMA extended descriptor format
+ */
+struct msgdma_extended_desc {
+       u32 read_addr_lo;       /* data buffer source address low bits */
+       u32 write_addr_lo;      /* data buffer destination address low bits */
+       u32 len;                /* the number of bytes to transfer
+                                * per descriptor
+                                */
+       u32 burst_seq_num;      /* bit 31:24 write burst
+                                * bit 23:16 read burst
+                                * bit 15:0  sequence number
+                                */
+       u32 stride;             /* bit 31:16 write stride
+                                * bit 15:0  read stride
+                                */
+       u32 read_addr_hi;       /* data buffer source address high bits */
+       u32 write_addr_hi;      /* data buffer destination address high bits */
+       u32 control;            /* characteristics of the transfer */
+};
+
+/* mSGDMA descriptor control field bit definitions
+ */
+#define MSGDMA_DESC_CTL_SET_CH(x)      ((x) & 0xff)
+#define MSGDMA_DESC_CTL_GEN_SOP                BIT(8)
+#define MSGDMA_DESC_CTL_GEN_EOP                BIT(9)
+#define MSGDMA_DESC_CTL_PARK_READS     BIT(10)
+#define MSGDMA_DESC_CTL_PARK_WRITES    BIT(11)
+#define MSGDMA_DESC_CTL_END_ON_EOP     BIT(12)
+#define MSGDMA_DESC_CTL_END_ON_LEN     BIT(13)
+#define MSGDMA_DESC_CTL_TR_COMP_IRQ    BIT(14)
+#define MSGDMA_DESC_CTL_EARLY_IRQ      BIT(15)
+#define MSGDMA_DESC_CTL_TR_ERR_IRQ     (0xff << 16)
+#define MSGDMA_DESC_CTL_EARLY_DONE     BIT(24)
+/* Writing â€˜1’ to the â€˜go’ bit commits the entire descriptor into the
+ * descriptor FIFO(s)
+ */
+#define MSGDMA_DESC_CTL_GO             BIT(31)
+
+/* Tx buffer control flags
+ */
+#define MSGDMA_DESC_CTL_TX_FIRST       (MSGDMA_DESC_CTL_GEN_SOP |      \
+                                        MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
+                                        MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_MIDDLE      (MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
+                                        MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_LAST                (MSGDMA_DESC_CTL_GEN_EOP |      \
+                                        MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
+                                        MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
+                                        MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_TX_SINGLE      (MSGDMA_DESC_CTL_GEN_SOP |      \
+                                        MSGDMA_DESC_CTL_GEN_EOP |      \
+                                        MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
+                                        MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
+                                        MSGDMA_DESC_CTL_GO)
+
+#define MSGDMA_DESC_CTL_RX_SINGLE      (MSGDMA_DESC_CTL_END_ON_EOP |   \
+                                        MSGDMA_DESC_CTL_END_ON_LEN |   \
+                                        MSGDMA_DESC_CTL_TR_COMP_IRQ |  \
+                                        MSGDMA_DESC_CTL_EARLY_IRQ |    \
+                                        MSGDMA_DESC_CTL_TR_ERR_IRQ |   \
+                                        MSGDMA_DESC_CTL_GO)
+
+/* mSGDMA extended descriptor stride definitions
+ */
+#define MSGDMA_DESC_TX_STRIDE          (0x00010001)
+#define MSGDMA_DESC_RX_STRIDE          (0x00010001)
+
+/* mSGDMA dispatcher control and status register map
+ */
+struct msgdma_csr {
+       u32 status;             /* Read/Clear */
+       u32 control;            /* Read/Write */
+       u32 rw_fill_level;      /* bit 31:16 - write fill level
+                                * bit 15:0  - read fill level
+                                */
+       u32 resp_fill_level;    /* bit 15:0 */
+       u32 rw_seq_num;         /* bit 31:16 - write sequence number
+                                * bit 15:0  - read sequence number
+                                */
+       u32 pad[3];             /* reserved */
+};
+
+/* mSGDMA CSR status register bit definitions
+ */
+#define MSGDMA_CSR_STAT_BUSY                   BIT(0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY         BIT(1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL          BIT(2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY         BIT(3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL          BIT(4)
+#define MSGDMA_CSR_STAT_STOPPED                        BIT(5)
+#define MSGDMA_CSR_STAT_RESETTING              BIT(6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR         BIT(7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY       BIT(8)
+#define MSGDMA_CSR_STAT_IRQ                    BIT(9)
+#define MSGDMA_CSR_STAT_MASK                   0x3FF
+#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ       0x1FF
+
+#define MSGDMA_CSR_STAT_BUSY_GET(v)                    GET_BIT_VALUE(v, 0)
+#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v)          GET_BIT_VALUE(v, 1)
+#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v)           GET_BIT_VALUE(v, 2)
+#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v)          GET_BIT_VALUE(v, 3)
+#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v)           GET_BIT_VALUE(v, 4)
+#define MSGDMA_CSR_STAT_STOPPED_GET(v)                 GET_BIT_VALUE(v, 5)
+#define MSGDMA_CSR_STAT_RESETTING_GET(v)               GET_BIT_VALUE(v, 6)
+#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v)          GET_BIT_VALUE(v, 7)
+#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v)                GET_BIT_VALUE(v, 8)
+#define MSGDMA_CSR_STAT_IRQ_GET(v)                     GET_BIT_VALUE(v, 9)
+
+/* mSGDMA CSR control register bit definitions
+ */
+#define MSGDMA_CSR_CTL_STOP                    BIT(0)
+#define MSGDMA_CSR_CTL_RESET                   BIT(1)
+#define MSGDMA_CSR_CTL_STOP_ON_ERR             BIT(2)
+#define MSGDMA_CSR_CTL_STOP_ON_EARLY           BIT(3)
+#define MSGDMA_CSR_CTL_GLOBAL_INTR             BIT(4)
+#define MSGDMA_CSR_CTL_STOP_DESCS              BIT(5)
+
+/* mSGDMA CSR fill level bits
+ */
+#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v)                (((v) & 0xffff0000) >> 16)
+#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v)                ((v) & 0x0000ffff)
+#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v)      ((v) & 0x0000ffff)
+
+/* mSGDMA response register map
+ */
+struct msgdma_response {
+       u32 bytes_transferred;
+       u32 status;
+};
+
+/* mSGDMA response register bit definitions
+ */
+#define MSGDMA_RESP_EARLY_TERM BIT(8)
+#define MSGDMA_RESP_ERR_MASK   0xFF
+
+#endif /* __ALTERA_MSGDMA_H__*/
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
new file mode 100644 (file)
index 0000000..0ee9663
--- /dev/null
@@ -0,0 +1,509 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/list.h>
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_sgdmahw.h"
+#include "altera_sgdma.h"
+
+static void sgdma_descrip(struct sgdma_descrip *desc,
+                         struct sgdma_descrip *ndesc,
+                         dma_addr_t ndesc_phys,
+                         dma_addr_t raddr,
+                         dma_addr_t waddr,
+                         u16 length,
+                         int generate_eop,
+                         int rfixed,
+                         int wfixed);
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+                             struct sgdma_descrip *desc);
+
+static int sgdma_async_read(struct altera_tse_private *priv);
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+                struct sgdma_descrip *desc);
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+                struct sgdma_descrip *desc);
+
+static int sgdma_txbusy(struct altera_tse_private *priv);
+
+static int sgdma_rxbusy(struct altera_tse_private *priv);
+
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
+
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv);
+
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv);
+
+int sgdma_initialize(struct altera_tse_private *priv)
+{
+       priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
+
+       priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
+                     SGDMA_CTRLREG_ILASTD;
+
+       INIT_LIST_HEAD(&priv->txlisthd);
+       INIT_LIST_HEAD(&priv->rxlisthd);
+
+       priv->rxdescphys = (dma_addr_t) 0;
+       priv->txdescphys = (dma_addr_t) 0;
+
+       priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
+                                         priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(priv->device, priv->rxdescphys)) {
+               sgdma_uninitialize(priv);
+               netdev_err(priv->dev, "error mapping rx descriptor memory\n");
+               return -EINVAL;
+       }
+
+       priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
+                                         priv->txdescmem, DMA_TO_DEVICE);
+
+       if (dma_mapping_error(priv->device, priv->txdescphys)) {
+               sgdma_uninitialize(priv);
+               netdev_err(priv->dev, "error mapping tx descriptor memory\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void sgdma_uninitialize(struct altera_tse_private *priv)
+{
+       if (priv->rxdescphys)
+               dma_unmap_single(priv->device, priv->rxdescphys,
+                                priv->rxdescmem, DMA_BIDIRECTIONAL);
+
+       if (priv->txdescphys)
+               dma_unmap_single(priv->device, priv->txdescphys,
+                                priv->txdescmem, DMA_TO_DEVICE);
+}
+
+/* This function resets the SGDMA controller and clears the
+ * descriptor memory used for transmits and receives.
+ */
+void sgdma_reset(struct altera_tse_private *priv)
+{
+       u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
+       u32 txdescriplen   = priv->txdescmem;
+       u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
+       u32 rxdescriplen   = priv->rxdescmem;
+       struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
+       struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
+
+       /* Initialize descriptor memory to 0 */
+       memset(ptxdescripmem, 0, txdescriplen);
+       memset(prxdescripmem, 0, rxdescriplen);
+
+       iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
+       iowrite32(0, &ptxsgdma->control);
+
+       iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
+       iowrite32(0, &prxsgdma->control);
+}
+
+void sgdma_enable_rxirq(struct altera_tse_private *priv)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+       priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
+       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+void sgdma_enable_txirq(struct altera_tse_private *priv)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+       priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
+       tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
+}
+
+/* for SGDMA, RX interrupts remain enabled after enabling */
+void sgdma_disable_rxirq(struct altera_tse_private *priv)
+{
+}
+
+/* for SGDMA, TX interrupts remain enabled after enabling */
+void sgdma_disable_txirq(struct altera_tse_private *priv)
+{
+}
+
+void sgdma_clear_rxirq(struct altera_tse_private *priv)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+void sgdma_clear_txirq(struct altera_tse_private *priv)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+       tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
+}
+
+/* transmits buffer through SGDMA. Returns number of buffers
+ * transmitted, 0 if not possible.
+ *
+ * tx_lock is held by the caller
+ */
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+       int pktstx = 0;
+       struct sgdma_descrip *descbase =
+               (struct sgdma_descrip *)priv->tx_dma_desc;
+
+       struct sgdma_descrip *cdesc = &descbase[0];
+       struct sgdma_descrip *ndesc = &descbase[1];
+
+       /* wait 'til the tx sgdma is ready for the next transmit request */
+       if (sgdma_txbusy(priv))
+               return 0;
+
+       sgdma_descrip(cdesc,                    /* current descriptor */
+                     ndesc,                    /* next descriptor */
+                     sgdma_txphysaddr(priv, ndesc),
+                     buffer->dma_addr,         /* address of packet to xmit */
+                     0,                        /* write addr 0 for tx dma */
+                     buffer->len,              /* length of packet */
+                     SGDMA_CONTROL_EOP,        /* Generate EOP */
+                     0,                        /* read fixed */
+                     SGDMA_CONTROL_WR_FIXED);  /* Generate SOP */
+
+       pktstx = sgdma_async_write(priv, cdesc);
+
+       /* enqueue the request to the pending transmit queue */
+       queue_tx(priv, buffer);
+
+       return 1;
+}
+
+
+/* tx_lock held to protect access to queued tx list
+ */
+u32 sgdma_tx_completions(struct altera_tse_private *priv)
+{
+       u32 ready = 0;
+       struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
+
+       if (!sgdma_txbusy(priv) &&
+           ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
+           (dequeue_tx(priv))) {
+               ready = 1;
+       }
+
+       return ready;
+}
+
+int sgdma_add_rx_desc(struct altera_tse_private *priv,
+                     struct tse_buffer *rxbuffer)
+{
+       queue_rx(priv, rxbuffer);
+       return sgdma_async_read(priv);
+}
+
+/* status is returned on upper 16 bits,
+ * length is returned in lower 16 bits
+ */
+u32 sgdma_rx_status(struct altera_tse_private *priv)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+       struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
+       struct sgdma_descrip *desc = NULL;
+       int pktsrx;
+       unsigned int rxstatus = 0;
+       unsigned int pktlength = 0;
+       unsigned int pktstatus = 0;
+       struct tse_buffer *rxbuffer = NULL;
+
+       dma_sync_single_for_cpu(priv->device,
+                               priv->rxdescphys,
+                               priv->rxdescmem,
+                               DMA_BIDIRECTIONAL);
+
+       desc = &base[0];
+       if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
+           (desc->status & SGDMA_STATUS_EOP)) {
+               pktlength = desc->bytes_xferred;
+               pktstatus = desc->status & 0x3f;
+               rxstatus = pktstatus;
+               rxstatus = rxstatus << 16;
+               rxstatus |= (pktlength & 0xffff);
+
+               desc->status = 0;
+
+               rxbuffer = dequeue_rx(priv);
+               if (rxbuffer == NULL)
+                       netdev_err(priv->dev,
+                                  "sgdma rx and rx queue empty!\n");
+
+               /* kick the rx sgdma after reaping this descriptor */
+               pktsrx = sgdma_async_read(priv);
+       }
+
+       return rxstatus;
+}
+
+
+/* Private functions */
+static void sgdma_descrip(struct sgdma_descrip *desc,
+                         struct sgdma_descrip *ndesc,
+                         dma_addr_t ndesc_phys,
+                         dma_addr_t raddr,
+                         dma_addr_t waddr,
+                         u16 length,
+                         int generate_eop,
+                         int rfixed,
+                         int wfixed)
+{
+       /* Clear the next descriptor as not owned by hardware */
+       u32 ctrl = ndesc->control;
+       ctrl &= ~SGDMA_CONTROL_HW_OWNED;
+       ndesc->control = ctrl;
+
+       ctrl = 0;
+       ctrl = SGDMA_CONTROL_HW_OWNED;
+       ctrl |= generate_eop;
+       ctrl |= rfixed;
+       ctrl |= wfixed;
+
+       /* Channel is implicitly zero, initialized to 0 by default */
+
+       desc->raddr = raddr;
+       desc->waddr = waddr;
+       desc->next = lower_32_bits(ndesc_phys);
+       desc->control = ctrl;
+       desc->status = 0;
+       desc->rburst = 0;
+       desc->wburst = 0;
+       desc->bytes = length;
+       desc->bytes_xferred = 0;
+}
+
+/* If hardware is busy, don't restart async read.
+ * if status register is 0 - meaning initial state, restart async read,
+ * probably for the first time when populating a receive buffer.
+ * If read status indicate not busy and a status, restart the async
+ * DMA read.
+ */
+static int sgdma_async_read(struct altera_tse_private *priv)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+       struct sgdma_descrip *descbase =
+               (struct sgdma_descrip *)priv->rx_dma_desc;
+
+       struct sgdma_descrip *cdesc = &descbase[0];
+       struct sgdma_descrip *ndesc = &descbase[1];
+
+       unsigned int sts = ioread32(&csr->status);
+       struct tse_buffer *rxbuffer = NULL;
+
+       if (!sgdma_rxbusy(priv)) {
+               rxbuffer = queue_rx_peekhead(priv);
+               if (rxbuffer == NULL)
+                       return 0;
+
+               sgdma_descrip(cdesc,            /* current descriptor */
+                             ndesc,            /* next descriptor */
+                             sgdma_rxphysaddr(priv, ndesc),
+                             0,                /* read addr 0 for rx dma */
+                             rxbuffer->dma_addr, /* write addr for rx dma */
+                             0,                /* read 'til EOP */
+                             0,                /* EOP: NA for rx dma */
+                             0,                /* read fixed: NA for rx dma */
+                             0);               /* SOP: NA for rx DMA */
+
+               /* clear control and status */
+               iowrite32(0, &csr->control);
+
+               /* If status available, clear those bits */
+               if (sts & 0xf)
+                       iowrite32(0xf, &csr->status);
+
+               dma_sync_single_for_device(priv->device,
+                                          priv->rxdescphys,
+                                          priv->rxdescmem,
+                                          DMA_BIDIRECTIONAL);
+
+               iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
+                         &csr->next_descrip);
+
+               iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
+                         &csr->control);
+
+               return 1;
+       }
+
+       return 0;
+}
+
+static int sgdma_async_write(struct altera_tse_private *priv,
+                            struct sgdma_descrip *desc)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+       if (sgdma_txbusy(priv))
+               return 0;
+
+       /* clear control and status */
+       iowrite32(0, &csr->control);
+       iowrite32(0x1f, &csr->status);
+
+       dma_sync_single_for_device(priv->device, priv->txdescphys,
+                                  priv->txdescmem, DMA_TO_DEVICE);
+
+       iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
+                 &csr->next_descrip);
+
+       iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
+                 &csr->control);
+
+       return 1;
+}
+
+static dma_addr_t
+sgdma_txphysaddr(struct altera_tse_private *priv,
+                struct sgdma_descrip *desc)
+{
+       dma_addr_t paddr = priv->txdescmem_busaddr;
+       uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
+       return (dma_addr_t)((uintptr_t)paddr + offs);
+}
+
+static dma_addr_t
+sgdma_rxphysaddr(struct altera_tse_private *priv,
+                struct sgdma_descrip *desc)
+{
+       dma_addr_t paddr = priv->rxdescmem_busaddr;
+       uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
+       return (dma_addr_t)((uintptr_t)paddr + offs);
+}
+
+#define list_remove_head(list, entry, type, member)                    \
+       do {                                                            \
+               entry = NULL;                                           \
+               if (!list_empty(list)) {                                \
+                       entry = list_entry((list)->next, type, member); \
+                       list_del_init(&entry->member);                  \
+               }                                                       \
+       } while (0)
+
+#define list_peek_head(list, entry, type, member)                      \
+       do {                                                            \
+               entry = NULL;                                           \
+               if (!list_empty(list)) {                                \
+                       entry = list_entry((list)->next, type, member); \
+               }                                                       \
+       } while (0)
+
+/* adds a tse_buffer to the tail of a tx buffer list.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+       list_add_tail(&buffer->lh, &priv->txlisthd);
+}
+
+
+/* adds a tse_buffer to the tail of a rx buffer list
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static void
+queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
+{
+       list_add_tail(&buffer->lh, &priv->rxlisthd);
+}
+
+/* dequeues a tse_buffer from the transmit buffer list, otherwise
+ * returns NULL if empty.
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_tx(struct altera_tse_private *priv)
+{
+       struct tse_buffer *buffer = NULL;
+       list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
+       return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list.
+ */
+static struct tse_buffer *
+dequeue_rx(struct altera_tse_private *priv)
+{
+       struct tse_buffer *buffer = NULL;
+       list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+       return buffer;
+}
+
+/* dequeues a tse_buffer from the receive buffer list, otherwise
+ * returns NULL if empty
+ * assumes the caller is managing and holding a mutual exclusion
+ * primitive to avoid simultaneous pushes/pops to the list while the
+ * head is being examined.
+ */
+static struct tse_buffer *
+queue_rx_peekhead(struct altera_tse_private *priv)
+{
+       struct tse_buffer *buffer = NULL;
+       list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
+       return buffer;
+}
+
+/* check and return rx sgdma status without polling
+ */
+static int sgdma_rxbusy(struct altera_tse_private *priv)
+{
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
+       return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
+}
+
+/* waits for the tx sgdma to finish it's current operation, returns 0
+ * when it transitions to nonbusy, returns 1 if the operation times out
+ */
+static int sgdma_txbusy(struct altera_tse_private *priv)
+{
+       int delay = 0;
+       struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
+
+       /* if DMA is busy, wait for current transactino to finish */
+       while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
+               udelay(1);
+
+       if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
+               netdev_err(priv->dev, "timeout waiting for tx dma\n");
+               return 1;
+       }
+       return 0;
+}
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
new file mode 100644 (file)
index 0000000..07d4717
--- /dev/null
@@ -0,0 +1,35 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMA_H__
+#define __ALTERA_SGDMA_H__
+
+void sgdma_reset(struct altera_tse_private *);
+void sgdma_enable_txirq(struct altera_tse_private *);
+void sgdma_enable_rxirq(struct altera_tse_private *);
+void sgdma_disable_rxirq(struct altera_tse_private *);
+void sgdma_disable_txirq(struct altera_tse_private *);
+void sgdma_clear_rxirq(struct altera_tse_private *);
+void sgdma_clear_txirq(struct altera_tse_private *);
+int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
+u32 sgdma_tx_completions(struct altera_tse_private *);
+int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
+void sgdma_status(struct altera_tse_private *);
+u32 sgdma_rx_status(struct altera_tse_private *);
+int sgdma_initialize(struct altera_tse_private *);
+void sgdma_uninitialize(struct altera_tse_private *);
+
+#endif /*  __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
new file mode 100644 (file)
index 0000000..ba3334f
--- /dev/null
@@ -0,0 +1,124 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_SGDMAHW_H__
+#define __ALTERA_SGDMAHW_H__
+
+/* SGDMA descriptor structure */
+struct sgdma_descrip {
+       unsigned int    raddr; /* address of data to be read */
+       unsigned int    pad1;
+       unsigned int    waddr;
+       unsigned int    pad2;
+       unsigned int    next;
+       unsigned int    pad3;
+       unsigned short  bytes;
+       unsigned char   rburst;
+       unsigned char   wburst;
+       unsigned short  bytes_xferred;  /* 16 bits, bytes xferred */
+
+       /* bit 0: error
+        * bit 1: length error
+        * bit 2: crc error
+        * bit 3: truncated error
+        * bit 4: phy error
+        * bit 5: collision error
+        * bit 6: reserved
+        * bit 7: status eop for recv case
+        */
+       unsigned char   status;
+
+       /* bit 0: eop
+        * bit 1: read_fixed
+        * bit 2: write fixed
+        * bits 3,4,5,6: Channel (always 0)
+        * bit 7: hardware owned
+        */
+       unsigned char   control;
+} __packed;
+
+
+#define SGDMA_STATUS_ERR               BIT(0)
+#define SGDMA_STATUS_LENGTH_ERR                BIT(1)
+#define SGDMA_STATUS_CRC_ERR           BIT(2)
+#define SGDMA_STATUS_TRUNC_ERR         BIT(3)
+#define SGDMA_STATUS_PHY_ERR           BIT(4)
+#define SGDMA_STATUS_COLL_ERR          BIT(5)
+#define SGDMA_STATUS_EOP               BIT(7)
+
+#define SGDMA_CONTROL_EOP              BIT(0)
+#define SGDMA_CONTROL_RD_FIXED         BIT(1)
+#define SGDMA_CONTROL_WR_FIXED         BIT(2)
+
+/* Channel is always 0, so just zero initialize it */
+
+#define SGDMA_CONTROL_HW_OWNED         BIT(7)
+
+/* SGDMA register space */
+struct sgdma_csr {
+       /* bit 0: error
+        * bit 1: eop
+        * bit 2: descriptor completed
+        * bit 3: chain completed
+        * bit 4: busy
+        * remainder reserved
+        */
+       u32     status;
+       u32     pad1[3];
+
+       /* bit 0: interrupt on error
+        * bit 1: interrupt on eop
+        * bit 2: interrupt after every descriptor
+        * bit 3: interrupt after last descrip in a chain
+        * bit 4: global interrupt enable
+        * bit 5: starts descriptor processing
+        * bit 6: stop core on dma error
+        * bit 7: interrupt on max descriptors
+        * bits 8-15: max descriptors to generate interrupt
+        * bit 16: Software reset
+        * bit 17: clears owned by hardware if 0, does not clear otherwise
+        * bit 18: enables descriptor polling mode
+        * bit 19-26: clocks before polling again
+        * bit 27-30: reserved
+        * bit 31: clear interrupt
+        */
+       u32     control;
+       u32     pad2[3];
+       u32     next_descrip;
+       u32     pad3[3];
+};
+
+
+#define SGDMA_STSREG_ERR       BIT(0) /* Error */
+#define SGDMA_STSREG_EOP       BIT(1) /* EOP */
+#define SGDMA_STSREG_DESCRIP   BIT(2) /* Descriptor completed */
+#define SGDMA_STSREG_CHAIN     BIT(3) /* Chain completed */
+#define SGDMA_STSREG_BUSY      BIT(4) /* Controller busy */
+
+#define SGDMA_CTRLREG_IOE      BIT(0) /* Interrupt on error */
+#define SGDMA_CTRLREG_IOEOP    BIT(1) /* Interrupt on EOP */
+#define SGDMA_CTRLREG_IDESCRIP BIT(2) /* Interrupt after every descriptor */
+#define SGDMA_CTRLREG_ILASTD   BIT(3) /* Interrupt after last descriptor */
+#define SGDMA_CTRLREG_INTEN    BIT(4) /* Global Interrupt enable */
+#define SGDMA_CTRLREG_START    BIT(5) /* starts descriptor processing */
+#define SGDMA_CTRLREG_STOPERR  BIT(6) /* stop on dma error */
+#define SGDMA_CTRLREG_INTMAX   BIT(7) /* Interrupt on max descriptors */
+#define SGDMA_CTRLREG_RESET    BIT(16)/* Software reset */
+#define SGDMA_CTRLREG_COBHW    BIT(17)/* Clears owned by hardware */
+#define SGDMA_CTRLREG_POLL     BIT(18)/* enables descriptor polling mode */
+#define SGDMA_CTRLREG_CLRINT   BIT(31)/* Clears interrupt */
+
+#endif /* __ALTERA_SGDMAHW_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
new file mode 100644 (file)
index 0000000..8feeed0
--- /dev/null
@@ -0,0 +1,486 @@
+/* Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ *   Dalon Westergreen
+ *   Thomas Chou
+ *   Ian Abbott
+ *   Yuriy Kozlov
+ *   Tobias Klauser
+ *   Andriy Smolskyy
+ *   Roman Bulgakov
+ *   Dmytro Mytarchuk
+ *   Matthew Gerlach
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ALTERA_TSE_H__
+#define __ALTERA_TSE_H__
+
+#define ALTERA_TSE_RESOURCE_NAME       "altera_tse"
+
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR      10000
+#define ALTERA_TSE_MAC_FIFO_WIDTH              4       /* TX/RX FIFO width in
+                                                        * bytes
+                                                        */
+/* Rx FIFO default settings */
+#define ALTERA_TSE_RX_SECTION_EMPTY    16
+#define ALTERA_TSE_RX_SECTION_FULL     0
+#define ALTERA_TSE_RX_ALMOST_EMPTY     8
+#define ALTERA_TSE_RX_ALMOST_FULL      8
+
+/* Tx FIFO default settings */
+#define ALTERA_TSE_TX_SECTION_EMPTY    16
+#define ALTERA_TSE_TX_SECTION_FULL     0
+#define ALTERA_TSE_TX_ALMOST_EMPTY     8
+#define ALTERA_TSE_TX_ALMOST_FULL      3
+
+/* MAC function configuration default settings */
+#define ALTERA_TSE_TX_IPG_LENGTH       12
+
+#define GET_BIT_VALUE(v, bit)          (((v) >> (bit)) & 0x1)
+
+/* MAC Command_Config Register Bit Definitions
+ */
+#define MAC_CMDCFG_TX_ENA                      BIT(0)
+#define MAC_CMDCFG_RX_ENA                      BIT(1)
+#define MAC_CMDCFG_XON_GEN                     BIT(2)
+#define MAC_CMDCFG_ETH_SPEED                   BIT(3)
+#define MAC_CMDCFG_PROMIS_EN                   BIT(4)
+#define MAC_CMDCFG_PAD_EN                      BIT(5)
+#define MAC_CMDCFG_CRC_FWD                     BIT(6)
+#define MAC_CMDCFG_PAUSE_FWD                   BIT(7)
+#define MAC_CMDCFG_PAUSE_IGNORE                        BIT(8)
+#define MAC_CMDCFG_TX_ADDR_INS                 BIT(9)
+#define MAC_CMDCFG_HD_ENA                      BIT(10)
+#define MAC_CMDCFG_EXCESS_COL                  BIT(11)
+#define MAC_CMDCFG_LATE_COL                    BIT(12)
+#define MAC_CMDCFG_SW_RESET                    BIT(13)
+#define MAC_CMDCFG_MHASH_SEL                   BIT(14)
+#define MAC_CMDCFG_LOOP_ENA                    BIT(15)
+#define MAC_CMDCFG_TX_ADDR_SEL(v)              (((v) & 0x7) << 16)
+#define MAC_CMDCFG_MAGIC_ENA                   BIT(19)
+#define MAC_CMDCFG_SLEEP                       BIT(20)
+#define MAC_CMDCFG_WAKEUP                      BIT(21)
+#define MAC_CMDCFG_XOFF_GEN                    BIT(22)
+#define MAC_CMDCFG_CNTL_FRM_ENA                        BIT(23)
+#define MAC_CMDCFG_NO_LGTH_CHECK               BIT(24)
+#define MAC_CMDCFG_ENA_10                      BIT(25)
+#define MAC_CMDCFG_RX_ERR_DISC                 BIT(26)
+#define MAC_CMDCFG_DISABLE_READ_TIMEOUT                BIT(27)
+#define MAC_CMDCFG_CNT_RESET                   BIT(31)
+
+#define MAC_CMDCFG_TX_ENA_GET(v)               GET_BIT_VALUE(v, 0)
+#define MAC_CMDCFG_RX_ENA_GET(v)               GET_BIT_VALUE(v, 1)
+#define MAC_CMDCFG_XON_GEN_GET(v)              GET_BIT_VALUE(v, 2)
+#define MAC_CMDCFG_ETH_SPEED_GET(v)            GET_BIT_VALUE(v, 3)
+#define MAC_CMDCFG_PROMIS_EN_GET(v)            GET_BIT_VALUE(v, 4)
+#define MAC_CMDCFG_PAD_EN_GET(v)               GET_BIT_VALUE(v, 5)
+#define MAC_CMDCFG_CRC_FWD_GET(v)              GET_BIT_VALUE(v, 6)
+#define MAC_CMDCFG_PAUSE_FWD_GET(v)            GET_BIT_VALUE(v, 7)
+#define MAC_CMDCFG_PAUSE_IGNORE_GET(v)         GET_BIT_VALUE(v, 8)
+#define MAC_CMDCFG_TX_ADDR_INS_GET(v)          GET_BIT_VALUE(v, 9)
+#define MAC_CMDCFG_HD_ENA_GET(v)               GET_BIT_VALUE(v, 10)
+#define MAC_CMDCFG_EXCESS_COL_GET(v)           GET_BIT_VALUE(v, 11)
+#define MAC_CMDCFG_LATE_COL_GET(v)             GET_BIT_VALUE(v, 12)
+#define MAC_CMDCFG_SW_RESET_GET(v)             GET_BIT_VALUE(v, 13)
+#define MAC_CMDCFG_MHASH_SEL_GET(v)            GET_BIT_VALUE(v, 14)
+#define MAC_CMDCFG_LOOP_ENA_GET(v)             GET_BIT_VALUE(v, 15)
+#define MAC_CMDCFG_TX_ADDR_SEL_GET(v)          (((v) >> 16) & 0x7)
+#define MAC_CMDCFG_MAGIC_ENA_GET(v)            GET_BIT_VALUE(v, 19)
+#define MAC_CMDCFG_SLEEP_GET(v)                        GET_BIT_VALUE(v, 20)
+#define MAC_CMDCFG_WAKEUP_GET(v)               GET_BIT_VALUE(v, 21)
+#define MAC_CMDCFG_XOFF_GEN_GET(v)             GET_BIT_VALUE(v, 22)
+#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v)         GET_BIT_VALUE(v, 23)
+#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v)                GET_BIT_VALUE(v, 24)
+#define MAC_CMDCFG_ENA_10_GET(v)               GET_BIT_VALUE(v, 25)
+#define MAC_CMDCFG_RX_ERR_DISC_GET(v)          GET_BIT_VALUE(v, 26)
+#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
+#define MAC_CMDCFG_CNT_RESET_GET(v)            GET_BIT_VALUE(v, 31)
+
+/* MDIO registers within MAC register Space
+ */
+struct altera_tse_mdio {
+       u32 control;    /* PHY device operation control register */
+       u32 status;     /* PHY device operation status register */
+       u32 phy_id1;    /* Bits 31:16 of PHY identifier */
+       u32 phy_id2;    /* Bits 15:0 of PHY identifier */
+       u32 auto_negotiation_advertisement;     /* Auto-negotiation
+                                                        * advertisement
+                                                        * register
+                                                        */
+       u32 remote_partner_base_page_ability;
+
+       u32 reg6;
+       u32 reg7;
+       u32 reg8;
+       u32 reg9;
+       u32 rega;
+       u32 regb;
+       u32 regc;
+       u32 regd;
+       u32 rege;
+       u32 regf;
+       u32 reg10;
+       u32 reg11;
+       u32 reg12;
+       u32 reg13;
+       u32 reg14;
+       u32 reg15;
+       u32 reg16;
+       u32 reg17;
+       u32 reg18;
+       u32 reg19;
+       u32 reg1a;
+       u32 reg1b;
+       u32 reg1c;
+       u32 reg1d;
+       u32 reg1e;
+       u32 reg1f;
+};
+
+/* MAC register Space. Note that some of these registers may or may not be
+ * present depending upon options chosen by the user when the core was
+ * configured and built. Please consult the Altera Triple Speed Ethernet User
+ * Guide for details.
+ */
+struct altera_tse_mac {
+       /* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer
+        * specific revision
+        */
+       u32 megacore_revision;
+       /* Provides a memory location for user applications to test the device
+        * memory operation.
+        */
+       u32 scratch_pad;
+       /* The host processor uses this register to control and configure the
+        * MAC block
+        */
+       u32 command_config;
+       /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary
+        * MAC address
+        */
+       u32 mac_addr_0;
+       /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary
+        * MAC address
+        */
+       u32 mac_addr_1;
+       /* 14-bit maximum frame length. The MAC receive logic */
+       u32 frm_length;
+       /* The pause quanta is used in each pause frame sent to a remote
+        * Ethernet device, in increments of 512 Ethernet bit times
+        */
+       u32 pause_quanta;
+       /* 12-bit receive FIFO section-empty threshold */
+       u32 rx_section_empty;
+       /* 12-bit receive FIFO section-full threshold */
+       u32 rx_section_full;
+       /* 12-bit transmit FIFO section-empty threshold */
+       u32 tx_section_empty;
+       /* 12-bit transmit FIFO section-full threshold */
+       u32 tx_section_full;
+       /* 12-bit receive FIFO almost-empty threshold */
+       u32 rx_almost_empty;
+       /* 12-bit receive FIFO almost-full threshold */
+       u32 rx_almost_full;
+       /* 12-bit transmit FIFO almost-empty threshold */
+       u32 tx_almost_empty;
+       /* 12-bit transmit FIFO almost-full threshold */
+       u32 tx_almost_full;
+       /* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */
+       u32 mdio_phy0_addr;
+       /* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */
+       u32 mdio_phy1_addr;
+
+       /* Bit[15:0]—16-bit holdoff quanta */
+       u32 holdoff_quant;
+
+       /* only if 100/1000 BaseX PCS, reserved otherwise */
+       u32 reserved1[5];
+
+       /* Minimum IPG between consecutive transmit frame in terms of bytes */
+       u32 tx_ipg_length;
+
+       /* IEEE 802.3 oEntity Managed Object Support */
+
+       /* The MAC addresses */
+       u32 mac_id_1;
+       u32 mac_id_2;
+
+       /* Number of frames transmitted without error including pause frames */
+       u32 frames_transmitted_ok;
+       /* Number of frames received without error including pause frames */
+       u32 frames_received_ok;
+       /* Number of frames received with a CRC error */
+       u32 frames_check_sequence_errors;
+       /* Frame received with an alignment error */
+       u32 alignment_errors;
+       /* Sum of payload and padding octets of frames transmitted without
+        * error
+        */
+       u32 octets_transmitted_ok;
+       /* Sum of payload and padding octets of frames received without error */
+       u32 octets_received_ok;
+
+       /* IEEE 802.3 oPausedEntity Managed Object Support */
+
+       /* Number of transmitted pause frames */
+       u32 tx_pause_mac_ctrl_frames;
+       /* Number of Received pause frames */
+       u32 rx_pause_mac_ctrl_frames;
+
+       /* IETF MIB (MIB-II) Object Support */
+
+       /* Number of frames received with error */
+       u32 if_in_errors;
+       /* Number of frames transmitted with error */
+       u32 if_out_errors;
+       /* Number of valid received unicast frames */
+       u32 if_in_ucast_pkts;
+       /* Number of valid received multicasts frames (without pause) */
+       u32 if_in_multicast_pkts;
+       /* Number of valid received broadcast frames */
+       u32 if_in_broadcast_pkts;
+       u32 if_out_discards;
+       /* The number of valid unicast frames transmitted */
+       u32 if_out_ucast_pkts;
+       /* The number of valid multicast frames transmitted,
+        * excluding pause frames
+        */
+       u32 if_out_multicast_pkts;
+       u32 if_out_broadcast_pkts;
+
+       /* IETF RMON MIB Object Support */
+
+       /* Counts the number of dropped packets due to internal errors
+        * of the MAC client.
+        */
+       u32 ether_stats_drop_events;
+       /* Total number of bytes received. Good and bad frames. */
+       u32 ether_stats_octets;
+       /* Total number of packets received. Counts good and bad packets. */
+       u32 ether_stats_pkts;
+       /* Number of packets received with less than 64 bytes. */
+       u32 ether_stats_undersize_pkts;
+       /* The number of frames received that are longer than the
+        * value configured in the frm_length register
+        */
+       u32 ether_stats_oversize_pkts;
+       /* Number of received packet with 64 bytes */
+       u32 ether_stats_pkts_64_octets;
+       /* Frames (good and bad) with 65 to 127 bytes */
+       u32 ether_stats_pkts_65to127_octets;
+       /* Frames (good and bad) with 128 to 255 bytes */
+       u32 ether_stats_pkts_128to255_octets;
+       /* Frames (good and bad) with 256 to 511 bytes */
+       u32 ether_stats_pkts_256to511_octets;
+       /* Frames (good and bad) with 512 to 1023 bytes */
+       u32 ether_stats_pkts_512to1023_octets;
+       /* Frames (good and bad) with 1024 to 1518 bytes */
+       u32 ether_stats_pkts_1024to1518_octets;
+
+       /* Any frame length from 1519 to the maximum length configured in the
+        * frm_length register, if it is greater than 1518
+        */
+       u32 ether_stats_pkts_1519tox_octets;
+       /* Too long frames with CRC error */
+       u32 ether_stats_jabbers;
+       /* Too short frames with CRC error */
+       u32 ether_stats_fragments;
+
+       u32 reserved2;
+
+       /* FIFO control register */
+       u32 tx_cmd_stat;
+       u32 rx_cmd_stat;
+
+       /* Extended Statistics Counters */
+       u32 msb_octets_transmitted_ok;
+       u32 msb_octets_received_ok;
+       u32 msb_ether_stats_octets;
+
+       u32 reserved3;
+
+       /* Multicast address resolution table, mapped in the controller address
+        * space
+        */
+       u32 hash_table[64];
+
+       /* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY
+        * management interface
+        */
+       struct altera_tse_mdio mdio_phy0;
+       struct altera_tse_mdio mdio_phy1;
+
+       /* 4 Supplemental MAC Addresses */
+       u32 supp_mac_addr_0_0;
+       u32 supp_mac_addr_0_1;
+       u32 supp_mac_addr_1_0;
+       u32 supp_mac_addr_1_1;
+       u32 supp_mac_addr_2_0;
+       u32 supp_mac_addr_2_1;
+       u32 supp_mac_addr_3_0;
+       u32 supp_mac_addr_3_1;
+
+       u32 reserved4[8];
+
+       /* IEEE 1588v2 Feature */
+       u32 tx_period;
+       u32 tx_adjust_fns;
+       u32 tx_adjust_ns;
+       u32 rx_period;
+       u32 rx_adjust_fns;
+       u32 rx_adjust_ns;
+
+       u32 reserved5[42];
+};
+
+/* Transmit and Receive Command Registers Bit Definitions
+ */
+#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC                BIT(17)
+#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16      BIT(18)
+#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16      BIT(25)
+
+/* Wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct tse_buffer {
+       struct list_head lh;
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+       u32 len;
+       int mapped_as_page;
+};
+
+struct altera_tse_private;
+
+#define ALTERA_DTYPE_SGDMA 1
+#define ALTERA_DTYPE_MSGDMA 2
+
+/* standard DMA interface for SGDMA and MSGDMA */
+struct altera_dmaops {
+       int altera_dtype;
+       int dmamask;
+       void (*reset_dma)(struct altera_tse_private *);
+       void (*enable_txirq)(struct altera_tse_private *);
+       void (*enable_rxirq)(struct altera_tse_private *);
+       void (*disable_txirq)(struct altera_tse_private *);
+       void (*disable_rxirq)(struct altera_tse_private *);
+       void (*clear_txirq)(struct altera_tse_private *);
+       void (*clear_rxirq)(struct altera_tse_private *);
+       int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
+       u32 (*tx_completions)(struct altera_tse_private *);
+       int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
+       u32 (*get_rx_status)(struct altera_tse_private *);
+       int (*init_dma)(struct altera_tse_private *);
+       void (*uninit_dma)(struct altera_tse_private *);
+};
+
+/* This structure is private to each device.
+ */
+struct altera_tse_private {
+       struct net_device *dev;
+       struct device *device;
+       struct napi_struct napi;
+
+       /* MAC address space */
+       struct altera_tse_mac __iomem *mac_dev;
+
+       /* TSE Revision */
+       u32     revision;
+
+       /* mSGDMA Rx Dispatcher address space */
+       void __iomem *rx_dma_csr;
+       void __iomem *rx_dma_desc;
+       void __iomem *rx_dma_resp;
+
+       /* mSGDMA Tx Dispatcher address space */
+       void __iomem *tx_dma_csr;
+       void __iomem *tx_dma_desc;
+
+       /* Rx buffers queue */
+       struct tse_buffer *rx_ring;
+       u32 rx_cons;
+       u32 rx_prod;
+       u32 rx_ring_size;
+       u32 rx_dma_buf_sz;
+
+       /* Tx ring buffer */
+       struct tse_buffer *tx_ring;
+       u32 tx_prod;
+       u32 tx_cons;
+       u32 tx_ring_size;
+
+       /* Interrupts */
+       u32 tx_irq;
+       u32 rx_irq;
+
+       /* RX/TX MAC FIFO configs */
+       u32 tx_fifo_depth;
+       u32 rx_fifo_depth;
+       u32 max_mtu;
+
+       /* Hash filter settings */
+       u32 hash_filter;
+       u32 added_unicast;
+
+       /* Descriptor memory info for managing SGDMA */
+       u32 txdescmem;
+       u32 rxdescmem;
+       dma_addr_t rxdescmem_busaddr;
+       dma_addr_t txdescmem_busaddr;
+       u32 txctrlreg;
+       u32 rxctrlreg;
+       dma_addr_t rxdescphys;
+       dma_addr_t txdescphys;
+
+       struct list_head txlisthd;
+       struct list_head rxlisthd;
+
+       /* MAC command_config register protection */
+       spinlock_t mac_cfg_lock;
+       /* Tx path protection */
+       spinlock_t tx_lock;
+       /* Rx DMA & interrupt control protection */
+       spinlock_t rxdma_irq_lock;
+
+       /* PHY */
+       int phy_addr;           /* PHY's MDIO address, -1 for autodetection */
+       phy_interface_t phy_iface;
+       struct mii_bus *mdio;
+       struct phy_device *phydev;
+       int oldspeed;
+       int oldduplex;
+       int oldlink;
+
+       /* ethtool msglvl option */
+       u32 msg_enable;
+
+       struct altera_dmaops *dmaops;
+};
+
+/* Function prototypes
+ */
+void altera_tse_set_ethtool_ops(struct net_device *);
+
+#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
new file mode 100644 (file)
index 0000000..319ca74
--- /dev/null
@@ -0,0 +1,235 @@
+/* Ethtool support for Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ *   Dalon Westergreen
+ *   Thomas Chou
+ *   Ian Abbott
+ *   Yuriy Kozlov
+ *   Tobias Klauser
+ *   Andriy Smolskyy
+ *   Roman Bulgakov
+ *   Dmytro Mytarchuk
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "altera_tse.h"
+
+#define TSE_STATS_LEN  31
+#define TSE_NUM_REGS   128
+
+static char const stat_gstrings[][ETH_GSTRING_LEN] = {
+       "tx_packets",
+       "rx_packets",
+       "rx_crc_errors",
+       "rx_align_errors",
+       "tx_bytes",
+       "rx_bytes",
+       "tx_pause",
+       "rx_pause",
+       "rx_errors",
+       "tx_errors",
+       "rx_unicast",
+       "rx_multicast",
+       "rx_broadcast",
+       "tx_discards",
+       "tx_unicast",
+       "tx_multicast",
+       "tx_broadcast",
+       "ether_drops",
+       "rx_total_bytes",
+       "rx_total_packets",
+       "rx_undersize",
+       "rx_oversize",
+       "rx_64_bytes",
+       "rx_65_127_bytes",
+       "rx_128_255_bytes",
+       "rx_256_511_bytes",
+       "rx_512_1023_bytes",
+       "rx_1024_1518_bytes",
+       "rx_gte_1519_bytes",
+       "rx_jabbers",
+       "rx_runts",
+};
+
+static void tse_get_drvinfo(struct net_device *dev,
+                           struct ethtool_drvinfo *info)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       u32 rev = ioread32(&priv->mac_dev->megacore_revision);
+
+       strcpy(info->driver, "Altera TSE MAC IP Driver");
+       strcpy(info->version, "v8.0");
+       snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
+                rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
+       sprintf(info->bus_info, "platform");
+}
+
+/* Fill in a buffer with the strings which correspond to the
+ * stats
+ */
+static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+       memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN);
+}
+
+static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+                          u64 *buf)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct altera_tse_mac *mac = priv->mac_dev;
+       u64 ext;
+
+       buf[0] = ioread32(&mac->frames_transmitted_ok);
+       buf[1] = ioread32(&mac->frames_received_ok);
+       buf[2] = ioread32(&mac->frames_check_sequence_errors);
+       buf[3] = ioread32(&mac->alignment_errors);
+
+       /* Extended aOctetsTransmittedOK counter */
+       ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
+       ext |= ioread32(&mac->octets_transmitted_ok);
+       buf[4] = ext;
+
+       /* Extended aOctetsReceivedOK counter */
+       ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
+       ext |= ioread32(&mac->octets_received_ok);
+       buf[5] = ext;
+
+       buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
+       buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
+       buf[8] = ioread32(&mac->if_in_errors);
+       buf[9] = ioread32(&mac->if_out_errors);
+       buf[10] = ioread32(&mac->if_in_ucast_pkts);
+       buf[11] = ioread32(&mac->if_in_multicast_pkts);
+       buf[12] = ioread32(&mac->if_in_broadcast_pkts);
+       buf[13] = ioread32(&mac->if_out_discards);
+       buf[14] = ioread32(&mac->if_out_ucast_pkts);
+       buf[15] = ioread32(&mac->if_out_multicast_pkts);
+       buf[16] = ioread32(&mac->if_out_broadcast_pkts);
+       buf[17] = ioread32(&mac->ether_stats_drop_events);
+
+       /* Extended etherStatsOctets counter */
+       ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
+       ext |= ioread32(&mac->ether_stats_octets);
+       buf[18] = ext;
+
+       buf[19] = ioread32(&mac->ether_stats_pkts);
+       buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
+       buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
+       buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
+       buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
+       buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
+       buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
+       buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
+       buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
+       buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
+       buf[29] = ioread32(&mac->ether_stats_jabbers);
+       buf[30] = ioread32(&mac->ether_stats_fragments);
+}
+
+static int tse_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return TSE_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static u32 tse_get_msglevel(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       return priv->msg_enable;
+}
+
+static void tse_set_msglevel(struct net_device *dev, uint32_t data)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       priv->msg_enable = data;
+}
+
+static int tse_reglen(struct net_device *dev)
+{
+       return TSE_NUM_REGS * sizeof(u32);
+}
+
+static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                        void *regbuf)
+{
+       int i;
+       struct altera_tse_private *priv = netdev_priv(dev);
+       u32 *tse_mac_regs = (u32 *)priv->mac_dev;
+       u32 *buf = regbuf;
+
+       /* Set version to a known value, so ethtool knows
+        * how to do any special formatting of this data.
+        * This version number will need to change if and
+        * when this register table is changed.
+        */
+
+       regs->version = 1;
+
+       for (i = 0; i < TSE_NUM_REGS; i++)
+               buf[i] = ioread32(&tse_mac_regs[i]);
+}
+
+static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+
+       if (phydev == NULL)
+               return -ENODEV;
+
+       return phy_ethtool_gset(phydev, cmd);
+}
+
+static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+
+       if (phydev == NULL)
+               return -ENODEV;
+
+       return phy_ethtool_sset(phydev, cmd);
+}
+
+static const struct ethtool_ops tse_ethtool_ops = {
+       .get_drvinfo = tse_get_drvinfo,
+       .get_regs_len = tse_reglen,
+       .get_regs = tse_get_regs,
+       .get_link = ethtool_op_get_link,
+       .get_settings = tse_get_settings,
+       .set_settings = tse_set_settings,
+       .get_strings = tse_gstrings,
+       .get_sset_count = tse_sset_count,
+       .get_ethtool_stats = tse_fill_stats,
+       .get_msglevel = tse_get_msglevel,
+       .set_msglevel = tse_set_msglevel,
+};
+
+void altera_tse_set_ethtool_ops(struct net_device *netdev)
+{
+       SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
new file mode 100644 (file)
index 0000000..c70a29e
--- /dev/null
@@ -0,0 +1,1543 @@
+/* Altera Triple-Speed Ethernet MAC driver
+ * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
+ *
+ * Contributors:
+ *   Dalon Westergreen
+ *   Thomas Chou
+ *   Ian Abbott
+ *   Yuriy Kozlov
+ *   Tobias Klauser
+ *   Andriy Smolskyy
+ *   Roman Bulgakov
+ *   Dmytro Mytarchuk
+ *   Matthew Gerlach
+ *
+ * Original driver contributed by SLS.
+ * Major updates contributed by GlobalLogic
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <asm/cacheflush.h>
+
+#include "altera_utils.h"
+#include "altera_tse.h"
+#include "altera_sgdma.h"
+#include "altera_msgdma.h"
+
+static atomic_t instance_count = ATOMIC_INIT(~0);
+/* Module parameters */
+static int debug = -1;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
+                                       NETIF_MSG_IFDOWN);
+
+#define RX_DESCRIPTORS 64
+static int dma_rx_num = RX_DESCRIPTORS;
+module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
+
+#define TX_DESCRIPTORS 64
+static int dma_tx_num = TX_DESCRIPTORS;
+module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
+
+
+#define POLL_PHY (-1)
+
+/* Make sure DMA buffer size is larger than the max frame size
+ * plus some alignment offset and a VLAN header. If the max frame size is
+ * 1518, a VLAN header would be additional 4 bytes and additional
+ * headroom for alignment is 2 bytes, 2048 is just fine.
+ */
+#define ALTERA_RXDMABUFFER_SIZE        2048
+
+/* Allow network stack to resume queueing packets after we've
+ * finished transmitting at least 1/4 of the packets in the queue.
+ */
+#define TSE_TX_THRESH(x)       (x->tx_ring_size / 4)
+
+#define TXQUEUESTOP_THRESHHOLD 2
+
+static struct of_device_id altera_tse_ids[];
+
+static inline u32 tse_tx_avail(struct altera_tse_private *priv)
+{
+       return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
+}
+
+/* MDIO specific functions
+ */
+static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
+       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+       u32 data;
+
+       /* set MDIO address */
+       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+
+       /* get the data */
+       data = ioread32(&mdio_regs[regnum]) & 0xffff;
+       return data;
+}
+
+static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+                                u16 value)
+{
+       struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
+       unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
+
+       /* set MDIO address */
+       iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
+
+       /* write the data */
+       iowrite32((u32) value, &mdio_regs[regnum]);
+       return 0;
+}
+
+static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       int ret;
+       int i;
+       struct device_node *mdio_node = NULL;
+       struct mii_bus *mdio = NULL;
+       struct device_node *child_node = NULL;
+
+       for_each_child_of_node(priv->device->of_node, child_node) {
+               if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
+                       mdio_node = child_node;
+                       break;
+               }
+       }
+
+       if (mdio_node) {
+               netdev_dbg(dev, "FOUND MDIO subnode\n");
+       } else {
+               netdev_dbg(dev, "NO MDIO subnode\n");
+               return 0;
+       }
+
+       mdio = mdiobus_alloc();
+       if (mdio == NULL) {
+               netdev_err(dev, "Error allocating MDIO bus\n");
+               return -ENOMEM;
+       }
+
+       mdio->name = ALTERA_TSE_RESOURCE_NAME;
+       mdio->read = &altera_tse_mdio_read;
+       mdio->write = &altera_tse_mdio_write;
+       snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
+
+       mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+       if (mdio->irq == NULL) {
+               ret = -ENOMEM;
+               goto out_free_mdio;
+       }
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               mdio->irq[i] = PHY_POLL;
+
+       mdio->priv = priv->mac_dev;
+       mdio->parent = priv->device;
+
+       ret = of_mdiobus_register(mdio, mdio_node);
+       if (ret != 0) {
+               netdev_err(dev, "Cannot register MDIO bus %s\n",
+                          mdio->id);
+               goto out_free_mdio_irq;
+       }
+
+       if (netif_msg_drv(priv))
+               netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
+
+       priv->mdio = mdio;
+       return 0;
+out_free_mdio_irq:
+       kfree(mdio->irq);
+out_free_mdio:
+       mdiobus_free(mdio);
+       mdio = NULL;
+       return ret;
+}
+
+static void altera_tse_mdio_destroy(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+
+       if (priv->mdio == NULL)
+               return;
+
+       if (netif_msg_drv(priv))
+               netdev_info(dev, "MDIO bus %s: removed\n",
+                           priv->mdio->id);
+
+       mdiobus_unregister(priv->mdio);
+       kfree(priv->mdio->irq);
+       mdiobus_free(priv->mdio);
+       priv->mdio = NULL;
+}
+
+static int tse_init_rx_buffer(struct altera_tse_private *priv,
+                             struct tse_buffer *rxbuffer, int len)
+{
+       rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
+       if (!rxbuffer->skb)
+               return -ENOMEM;
+
+       rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
+                                               len,
+                                               DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
+               netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+               dev_kfree_skb_any(rxbuffer->skb);
+               return -EINVAL;
+       }
+       rxbuffer->len = len;
+       return 0;
+}
+
+static void tse_free_rx_buffer(struct altera_tse_private *priv,
+                              struct tse_buffer *rxbuffer)
+{
+       struct sk_buff *skb = rxbuffer->skb;
+       dma_addr_t dma_addr = rxbuffer->dma_addr;
+
+       if (skb != NULL) {
+               if (dma_addr)
+                       dma_unmap_single(priv->device, dma_addr,
+                                        rxbuffer->len,
+                                        DMA_FROM_DEVICE);
+               dev_kfree_skb_any(skb);
+               rxbuffer->skb = NULL;
+               rxbuffer->dma_addr = 0;
+       }
+}
+
+/* Unmap and free Tx buffer resources
+ */
+static void tse_free_tx_buffer(struct altera_tse_private *priv,
+                              struct tse_buffer *buffer)
+{
+       if (buffer->dma_addr) {
+               if (buffer->mapped_as_page)
+                       dma_unmap_page(priv->device, buffer->dma_addr,
+                                      buffer->len, DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(priv->device, buffer->dma_addr,
+                                        buffer->len, DMA_TO_DEVICE);
+               buffer->dma_addr = 0;
+       }
+       if (buffer->skb) {
+               dev_kfree_skb_any(buffer->skb);
+               buffer->skb = NULL;
+       }
+}
+
+static int alloc_init_skbufs(struct altera_tse_private *priv)
+{
+       unsigned int rx_descs = priv->rx_ring_size;
+       unsigned int tx_descs = priv->tx_ring_size;
+       int ret = -ENOMEM;
+       int i;
+
+       /* Create Rx ring buffer */
+       priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
+                               GFP_KERNEL);
+       if (!priv->rx_ring)
+               goto err_rx_ring;
+
+       /* Create Tx ring buffer */
+       priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
+                               GFP_KERNEL);
+       if (!priv->tx_ring)
+               goto err_tx_ring;
+
+       priv->tx_cons = 0;
+       priv->tx_prod = 0;
+
+       /* Init Rx ring */
+       for (i = 0; i < rx_descs; i++) {
+               ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
+                                        priv->rx_dma_buf_sz);
+               if (ret)
+                       goto err_init_rx_buffers;
+       }
+
+       priv->rx_cons = 0;
+       priv->rx_prod = 0;
+
+       return 0;
+err_init_rx_buffers:
+       while (--i >= 0)
+               tse_free_rx_buffer(priv, &priv->rx_ring[i]);
+       kfree(priv->tx_ring);
+err_tx_ring:
+       kfree(priv->rx_ring);
+err_rx_ring:
+       return ret;
+}
+
+static void free_skbufs(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       unsigned int rx_descs = priv->rx_ring_size;
+       unsigned int tx_descs = priv->tx_ring_size;
+       int i;
+
+       /* Release the DMA TX/RX socket buffers */
+       for (i = 0; i < rx_descs; i++)
+               tse_free_rx_buffer(priv, &priv->rx_ring[i]);
+       for (i = 0; i < tx_descs; i++)
+               tse_free_tx_buffer(priv, &priv->tx_ring[i]);
+
+
+       kfree(priv->tx_ring);
+}
+
+/* Reallocate the skb for the reception process
+ */
+static inline void tse_rx_refill(struct altera_tse_private *priv)
+{
+       unsigned int rxsize = priv->rx_ring_size;
+       unsigned int entry;
+       int ret;
+
+       for (; priv->rx_cons - priv->rx_prod > 0;
+                       priv->rx_prod++) {
+               entry = priv->rx_prod % rxsize;
+               if (likely(priv->rx_ring[entry].skb == NULL)) {
+                       ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
+                               priv->rx_dma_buf_sz);
+                       if (unlikely(ret != 0))
+                               break;
+                       priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
+               }
+       }
+}
+
+/* Pull out the VLAN tag and fix up the packet
+ */
+static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+       struct ethhdr *eth_hdr;
+       u16 vid;
+       if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           !__vlan_get_tag(skb, &vid)) {
+               eth_hdr = (struct ethhdr *)skb->data;
+               memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+               skb_pull(skb, VLAN_HLEN);
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+       }
+}
+
+/* Receive a packet: retrieve and pass over to upper levels
+ */
+static int tse_rx(struct altera_tse_private *priv, int limit)
+{
+       unsigned int count = 0;
+       unsigned int next_entry;
+       struct sk_buff *skb;
+       unsigned int entry = priv->rx_cons % priv->rx_ring_size;
+       u32 rxstatus;
+       u16 pktlength;
+       u16 pktstatus;
+
+       while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
+               pktstatus = rxstatus >> 16;
+               pktlength = rxstatus & 0xffff;
+
+               if ((pktstatus & 0xFF) || (pktlength == 0))
+                       netdev_err(priv->dev,
+                                  "RCV pktstatus %08X pktlength %08X\n",
+                                  pktstatus, pktlength);
+
+               count++;
+               next_entry = (++priv->rx_cons) % priv->rx_ring_size;
+
+               skb = priv->rx_ring[entry].skb;
+               if (unlikely(!skb)) {
+                       netdev_err(priv->dev,
+                                  "%s: Inconsistent Rx descriptor chain\n",
+                                  __func__);
+                       priv->dev->stats.rx_dropped++;
+                       break;
+               }
+               priv->rx_ring[entry].skb = NULL;
+
+               skb_put(skb, pktlength);
+
+               /* make cache consistent with receive packet buffer */
+               dma_sync_single_for_cpu(priv->device,
+                                       priv->rx_ring[entry].dma_addr,
+                                       priv->rx_ring[entry].len,
+                                       DMA_FROM_DEVICE);
+
+               dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
+                                priv->rx_ring[entry].len, DMA_FROM_DEVICE);
+
+               if (netif_msg_pktdata(priv)) {
+                       netdev_info(priv->dev, "frame received %d bytes\n",
+                                   pktlength);
+                       print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
+                                      16, 1, skb->data, pktlength, true);
+               }
+
+               tse_rx_vlan(priv->dev, skb);
+
+               skb->protocol = eth_type_trans(skb, priv->dev);
+               skb_checksum_none_assert(skb);
+
+               napi_gro_receive(&priv->napi, skb);
+
+               priv->dev->stats.rx_packets++;
+               priv->dev->stats.rx_bytes += pktlength;
+
+               entry = next_entry;
+       }
+
+       tse_rx_refill(priv);
+       return count;
+}
+
+/* Reclaim resources after transmission completes
+ */
+static int tse_tx_complete(struct altera_tse_private *priv)
+{
+       unsigned int txsize = priv->tx_ring_size;
+       u32 ready;
+       unsigned int entry;
+       struct tse_buffer *tx_buff;
+       int txcomplete = 0;
+
+       spin_lock(&priv->tx_lock);
+
+       ready = priv->dmaops->tx_completions(priv);
+
+       /* Free sent buffers */
+       while (ready && (priv->tx_cons != priv->tx_prod)) {
+               entry = priv->tx_cons % txsize;
+               tx_buff = &priv->tx_ring[entry];
+
+               if (netif_msg_tx_done(priv))
+                       netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
+                                  __func__, priv->tx_prod, priv->tx_cons);
+
+               if (likely(tx_buff->skb))
+                       priv->dev->stats.tx_packets++;
+
+               tse_free_tx_buffer(priv, tx_buff);
+               priv->tx_cons++;
+
+               txcomplete++;
+               ready--;
+       }
+
+       if (unlikely(netif_queue_stopped(priv->dev) &&
+                    tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
+               netif_tx_lock(priv->dev);
+               if (netif_queue_stopped(priv->dev) &&
+                   tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
+                       if (netif_msg_tx_done(priv))
+                               netdev_dbg(priv->dev, "%s: restart transmit\n",
+                                          __func__);
+                       netif_wake_queue(priv->dev);
+               }
+               netif_tx_unlock(priv->dev);
+       }
+
+       spin_unlock(&priv->tx_lock);
+       return txcomplete;
+}
+
+/* NAPI polling function
+ */
+static int tse_poll(struct napi_struct *napi, int budget)
+{
+       struct altera_tse_private *priv =
+                       container_of(napi, struct altera_tse_private, napi);
+       int rxcomplete = 0;
+       int txcomplete = 0;
+       unsigned long int flags;
+
+       txcomplete = tse_tx_complete(priv);
+
+       rxcomplete = tse_rx(priv, budget);
+
+       if (rxcomplete >= budget || txcomplete > 0)
+               return rxcomplete;
+
+       napi_gro_flush(napi, false);
+       __napi_complete(napi);
+
+       netdev_dbg(priv->dev,
+                  "NAPI Complete, did %d packets with budget %d\n",
+                  txcomplete+rxcomplete, budget);
+
+       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+       priv->dmaops->enable_rxirq(priv);
+       priv->dmaops->enable_txirq(priv);
+       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+       return rxcomplete + txcomplete;
+}
+
+/* DMA TX & RX FIFO interrupt routing
+ */
+static irqreturn_t altera_isr(int irq, void *dev_id)
+{
+       struct net_device *dev = dev_id;
+       struct altera_tse_private *priv;
+       unsigned long int flags;
+
+
+       if (unlikely(!dev)) {
+               pr_err("%s: invalid dev pointer\n", __func__);
+               return IRQ_NONE;
+       }
+       priv = netdev_priv(dev);
+
+       /* turn off desc irqs and enable napi rx */
+       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+
+       if (likely(napi_schedule_prep(&priv->napi))) {
+               priv->dmaops->disable_rxirq(priv);
+               priv->dmaops->disable_txirq(priv);
+               __napi_schedule(&priv->napi);
+       }
+
+       /* reset IRQs */
+       priv->dmaops->clear_rxirq(priv);
+       priv->dmaops->clear_txirq(priv);
+
+       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+/* Transmit a packet (called by the kernel). Dispatches
+ * either the SGDMA method for transmitting or the
+ * MSGDMA method, assumes no scatter/gather support,
+ * implying an assumption that there's only one
+ * physically contiguous fragment starting at
+ * skb->data, for length of skb_headlen(skb).
+ */
+static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       unsigned int txsize = priv->tx_ring_size;
+       unsigned int entry;
+       struct tse_buffer *buffer = NULL;
+       int nfrags = skb_shinfo(skb)->nr_frags;
+       unsigned int nopaged_len = skb_headlen(skb);
+       enum netdev_tx ret = NETDEV_TX_OK;
+       dma_addr_t dma_addr;
+       int txcomplete = 0;
+
+       spin_lock_bh(&priv->tx_lock);
+
+       if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
+               if (!netif_queue_stopped(dev)) {
+                       netif_stop_queue(dev);
+                       /* This is a hard error, log it. */
+                       netdev_err(priv->dev,
+                                  "%s: Tx list full when queue awake\n",
+                                  __func__);
+               }
+               ret = NETDEV_TX_BUSY;
+               goto out;
+       }
+
+       /* Map the first skb fragment */
+       entry = priv->tx_prod % txsize;
+       buffer = &priv->tx_ring[entry];
+
+       dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->device, dma_addr)) {
+               netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
+       buffer->skb = skb;
+       buffer->dma_addr = dma_addr;
+       buffer->len = nopaged_len;
+
+       /* Push data out of the cache hierarchy into main memory */
+       dma_sync_single_for_device(priv->device, buffer->dma_addr,
+                                  buffer->len, DMA_TO_DEVICE);
+
+       txcomplete = priv->dmaops->tx_buffer(priv, buffer);
+
+       skb_tx_timestamp(skb);
+
+       priv->tx_prod++;
+       dev->stats.tx_bytes += skb->len;
+
+       if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
+               if (netif_msg_hw(priv))
+                       netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
+                                  __func__);
+               netif_stop_queue(dev);
+       }
+
+out:
+       spin_unlock_bh(&priv->tx_lock);
+
+       return ret;
+}
+
+/* Called every time the controller might need to be made
+ * aware of new link state.  The PHY code conveys this
+ * information through variables in the phydev structure, and this
+ * function converts those variables into the appropriate
+ * register values, and can bring down the device if needed.
+ */
+static void altera_tse_adjust_link(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       int new_state = 0;
+
+       /* only change config if there is a link */
+       spin_lock(&priv->mac_cfg_lock);
+       if (phydev->link) {
+               /* Read old config */
+               u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
+
+               /* Check duplex */
+               if (phydev->duplex != priv->oldduplex) {
+                       new_state = 1;
+                       if (!(phydev->duplex))
+                               cfg_reg |= MAC_CMDCFG_HD_ENA;
+                       else
+                               cfg_reg &= ~MAC_CMDCFG_HD_ENA;
+
+                       netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
+                                  dev->name, phydev->duplex);
+
+                       priv->oldduplex = phydev->duplex;
+               }
+
+               /* Check speed */
+               if (phydev->speed != priv->oldspeed) {
+                       new_state = 1;
+                       switch (phydev->speed) {
+                       case 1000:
+                               cfg_reg |= MAC_CMDCFG_ETH_SPEED;
+                               cfg_reg &= ~MAC_CMDCFG_ENA_10;
+                               break;
+                       case 100:
+                               cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
+                               cfg_reg &= ~MAC_CMDCFG_ENA_10;
+                               break;
+                       case 10:
+                               cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
+                               cfg_reg |= MAC_CMDCFG_ENA_10;
+                               break;
+                       default:
+                               if (netif_msg_link(priv))
+                                       netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
+                                                   phydev->speed);
+                               break;
+                       }
+                       priv->oldspeed = phydev->speed;
+               }
+               iowrite32(cfg_reg, &priv->mac_dev->command_config);
+
+               if (!priv->oldlink) {
+                       new_state = 1;
+                       priv->oldlink = 1;
+               }
+       } else if (priv->oldlink) {
+               new_state = 1;
+               priv->oldlink = 0;
+               priv->oldspeed = 0;
+               priv->oldduplex = -1;
+       }
+
+       if (new_state && netif_msg_link(priv))
+               phy_print_status(phydev);
+
+       spin_unlock(&priv->mac_cfg_lock);
+}
+static struct phy_device *connect_local_phy(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct phy_device *phydev = NULL;
+       char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+       int ret;
+
+       if (priv->phy_addr != POLL_PHY) {
+               snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+                        priv->mdio->id, priv->phy_addr);
+
+               netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
+
+               phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
+                                    priv->phy_iface);
+               if (IS_ERR(phydev))
+                       netdev_err(dev, "Could not attach to PHY\n");
+
+       } else {
+               phydev = phy_find_first(priv->mdio);
+               if (phydev == NULL) {
+                       netdev_err(dev, "No PHY found\n");
+                       return phydev;
+               }
+
+               ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
+                               priv->phy_iface);
+               if (ret != 0) {
+                       netdev_err(dev, "Could not attach to PHY\n");
+                       phydev = NULL;
+               }
+       }
+       return phydev;
+}
+
+/* Initialize driver's PHY state, and attach to the PHY
+ */
+static int init_phy(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct phy_device *phydev;
+       struct device_node *phynode;
+
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
+
+       phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
+
+       if (!phynode) {
+               netdev_dbg(dev, "no phy-handle found\n");
+               if (!priv->mdio) {
+                       netdev_err(dev,
+                                  "No phy-handle nor local mdio specified\n");
+                       return -ENODEV;
+               }
+               phydev = connect_local_phy(dev);
+       } else {
+               netdev_dbg(dev, "phy-handle found\n");
+               phydev = of_phy_connect(dev, phynode,
+                       &altera_tse_adjust_link, 0, priv->phy_iface);
+       }
+
+       if (!phydev) {
+               netdev_err(dev, "Could not find the PHY\n");
+               return -ENODEV;
+       }
+
+       /* Stop Advertising 1000BASE Capability if interface is not GMII
+        * Note: Checkpatch throws CHECKs for the camel case defines below,
+        * it's ok to ignore.
+        */
+       if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
+           (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
+               phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+                                        SUPPORTED_1000baseT_Full);
+
+       /* Broken HW is sometimes missing the pull-up resistor on the
+        * MDIO line, which results in reads to non-existent devices returning
+        * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+        * device as well.
+        * Note: phydev->phy_id is the result of reading the UID PHY registers.
+        */
+       if (phydev->phy_id == 0) {
+               netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
+               phy_disconnect(phydev);
+               return -ENODEV;
+       }
+
+       netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
+                  phydev->addr, phydev->phy_id, phydev->link);
+
+       priv->phydev = phydev;
+       return 0;
+}
+
+static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
+{
+       struct altera_tse_mac *mac = priv->mac_dev;
+       u32 msb;
+       u32 lsb;
+
+       msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+       lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
+
+       /* Set primary MAC address */
+       iowrite32(msb, &mac->mac_addr_0);
+       iowrite32(lsb, &mac->mac_addr_1);
+}
+
+/* MAC software reset.
+ * When reset is triggered, the MAC function completes the current
+ * transmission or reception, and subsequently disables the transmit and
+ * receive logic, flushes the receive FIFO buffer, and resets the statistics
+ * counters.
+ */
+static int reset_mac(struct altera_tse_private *priv)
+{
+       void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
+       int counter;
+       u32 dat;
+
+       dat = ioread32(cmd_cfg_reg);
+       dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
+       dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
+       iowrite32(dat, cmd_cfg_reg);
+
+       counter = 0;
+       while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+               if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
+                       break;
+               udelay(1);
+       }
+
+       if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
+               dat = ioread32(cmd_cfg_reg);
+               dat &= ~MAC_CMDCFG_SW_RESET;
+               iowrite32(dat, cmd_cfg_reg);
+               return -1;
+       }
+       return 0;
+}
+
+/* Initialize MAC core registers
+*/
+static int init_mac(struct altera_tse_private *priv)
+{
+       struct altera_tse_mac *mac = priv->mac_dev;
+       unsigned int cmd = 0;
+       u32 frm_length;
+
+       /* Setup Rx FIFO */
+       iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
+                 &mac->rx_section_empty);
+       iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
+       iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
+       iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
+
+       /* Setup Tx FIFO */
+       iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
+                 &mac->tx_section_empty);
+       iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
+       iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
+       iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
+
+       /* MAC Address Configuration */
+       tse_update_mac_addr(priv, priv->dev->dev_addr);
+
+       /* MAC Function Configuration */
+       frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
+       iowrite32(frm_length, &mac->frm_length);
+       iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
+
+       /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
+        * start address
+        */
+       tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
+       tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
+                                        ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
+
+       /* Set the MAC options */
+       cmd = ioread32(&mac->command_config);
+       cmd |= MAC_CMDCFG_PAD_EN;       /* Padding Removal on Receive */
+       cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
+       cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
+                                        * with CRC errors
+                                        */
+       cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
+       cmd &= ~MAC_CMDCFG_TX_ENA;
+       cmd &= ~MAC_CMDCFG_RX_ENA;
+       iowrite32(cmd, &mac->command_config);
+
+       if (netif_msg_hw(priv))
+               dev_dbg(priv->device,
+                       "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
+
+       return 0;
+}
+
+/* Start/stop MAC transmission logic
+ */
+static void tse_set_mac(struct altera_tse_private *priv, bool enable)
+{
+       struct altera_tse_mac *mac = priv->mac_dev;
+       u32 value = ioread32(&mac->command_config);
+
+       if (enable)
+               value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
+       else
+               value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
+
+       iowrite32(value, &mac->command_config);
+}
+
+/* Change the MTU
+ */
+static int tse_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       unsigned int max_mtu = priv->max_mtu;
+       unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
+
+       if (netif_running(dev)) {
+               netdev_err(dev, "must be stopped to change its MTU\n");
+               return -EBUSY;
+       }
+
+       if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
+               netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
+               return -EINVAL;
+       }
+
+       dev->mtu = new_mtu;
+       netdev_update_features(dev);
+
+       return 0;
+}
+
+static void altera_tse_set_mcfilter(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct altera_tse_mac *mac = priv->mac_dev;
+       int i;
+       struct netdev_hw_addr *ha;
+
+       /* clear the hash filter */
+       for (i = 0; i < 64; i++)
+               iowrite32(0, &(mac->hash_table[i]));
+
+       netdev_for_each_mc_addr(ha, dev) {
+               unsigned int hash = 0;
+               int mac_octet;
+
+               for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
+                       unsigned char xor_bit = 0;
+                       unsigned char octet = ha->addr[mac_octet];
+                       unsigned int bitshift;
+
+                       for (bitshift = 0; bitshift < 8; bitshift++)
+                               xor_bit ^= ((octet >> bitshift) & 0x01);
+
+                       hash = (hash << 1) | xor_bit;
+               }
+               iowrite32(1, &(mac->hash_table[hash]));
+       }
+}
+
+
+static void altera_tse_set_mcfilterall(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct altera_tse_mac *mac = priv->mac_dev;
+       int i;
+
+       /* set the hash filter */
+       for (i = 0; i < 64; i++)
+               iowrite32(1, &(mac->hash_table[i]));
+}
+
+/* Set or clear the multicast filter for this adaptor
+ */
+static void tse_set_rx_mode_hashfilter(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct altera_tse_mac *mac = priv->mac_dev;
+
+       spin_lock(&priv->mac_cfg_lock);
+
+       if (dev->flags & IFF_PROMISC)
+               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+
+       if (dev->flags & IFF_ALLMULTI)
+               altera_tse_set_mcfilterall(dev);
+       else
+               altera_tse_set_mcfilter(dev);
+
+       spin_unlock(&priv->mac_cfg_lock);
+}
+
+/* Set or clear the multicast filter for this adaptor
+ */
+static void tse_set_rx_mode(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       struct altera_tse_mac *mac = priv->mac_dev;
+
+       spin_lock(&priv->mac_cfg_lock);
+
+       if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
+           !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
+               tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+       else
+               tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
+
+       spin_unlock(&priv->mac_cfg_lock);
+}
+
+/* Open and initialize the interface
+ */
+static int tse_open(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       int ret = 0;
+       int i;
+       unsigned long int flags;
+
+       /* Reset and configure TSE MAC and probe associated PHY */
+       ret = priv->dmaops->init_dma(priv);
+       if (ret != 0) {
+               netdev_err(dev, "Cannot initialize DMA\n");
+               goto phy_error;
+       }
+
+       if (netif_msg_ifup(priv))
+               netdev_warn(dev, "device MAC address %pM\n",
+                           dev->dev_addr);
+
+       if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
+               netdev_warn(dev, "TSE revision %x\n", priv->revision);
+
+       spin_lock(&priv->mac_cfg_lock);
+       ret = reset_mac(priv);
+       if (ret)
+               netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+
+       ret = init_mac(priv);
+       spin_unlock(&priv->mac_cfg_lock);
+       if (ret) {
+               netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
+               goto alloc_skbuf_error;
+       }
+
+       priv->dmaops->reset_dma(priv);
+
+       /* Create and initialize the TX/RX descriptors chains. */
+       priv->rx_ring_size = dma_rx_num;
+       priv->tx_ring_size = dma_tx_num;
+       ret = alloc_init_skbufs(priv);
+       if (ret) {
+               netdev_err(dev, "DMA descriptors initialization failed\n");
+               goto alloc_skbuf_error;
+       }
+
+
+       /* Register RX interrupt */
+       ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
+                         dev->name, dev);
+       if (ret) {
+               netdev_err(dev, "Unable to register RX interrupt %d\n",
+                          priv->rx_irq);
+               goto init_error;
+       }
+
+       /* Register TX interrupt */
+       ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
+                         dev->name, dev);
+       if (ret) {
+               netdev_err(dev, "Unable to register TX interrupt %d\n",
+                          priv->tx_irq);
+               goto tx_request_irq_error;
+       }
+
+       /* Enable DMA interrupts */
+       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+       priv->dmaops->enable_rxirq(priv);
+       priv->dmaops->enable_txirq(priv);
+
+       /* Setup RX descriptor chain */
+       for (i = 0; i < priv->rx_ring_size; i++)
+               priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
+
+       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+       /* Start MAC Rx/Tx */
+       spin_lock(&priv->mac_cfg_lock);
+       tse_set_mac(priv, true);
+       spin_unlock(&priv->mac_cfg_lock);
+
+       if (priv->phydev)
+               phy_start(priv->phydev);
+
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       return 0;
+
+tx_request_irq_error:
+       free_irq(priv->rx_irq, dev);
+init_error:
+       free_skbufs(dev);
+alloc_skbuf_error:
+       if (priv->phydev) {
+               phy_disconnect(priv->phydev);
+               priv->phydev = NULL;
+       }
+phy_error:
+       return ret;
+}
+
+/* Stop TSE MAC interface and put the device in an inactive state
+ */
+static int tse_shutdown(struct net_device *dev)
+{
+       struct altera_tse_private *priv = netdev_priv(dev);
+       int ret;
+       unsigned long int flags;
+
+       /* Stop and disconnect the PHY */
+       if (priv->phydev) {
+               phy_stop(priv->phydev);
+               phy_disconnect(priv->phydev);
+               priv->phydev = NULL;
+       }
+
+       netif_stop_queue(dev);
+       napi_disable(&priv->napi);
+
+       /* Disable DMA interrupts */
+       spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
+       priv->dmaops->disable_rxirq(priv);
+       priv->dmaops->disable_txirq(priv);
+       spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
+
+       /* Free the IRQ lines */
+       free_irq(priv->rx_irq, dev);
+       free_irq(priv->tx_irq, dev);
+
+       /* disable and reset the MAC, empties fifo */
+       spin_lock(&priv->mac_cfg_lock);
+       spin_lock(&priv->tx_lock);
+
+       ret = reset_mac(priv);
+       if (ret)
+               netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
+       priv->dmaops->reset_dma(priv);
+       free_skbufs(dev);
+
+       spin_unlock(&priv->tx_lock);
+       spin_unlock(&priv->mac_cfg_lock);
+
+       priv->dmaops->uninit_dma(priv);
+
+       return 0;
+}
+
+static struct net_device_ops altera_tse_netdev_ops = {
+       .ndo_open               = tse_open,
+       .ndo_stop               = tse_shutdown,
+       .ndo_start_xmit         = tse_start_xmit,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_rx_mode        = tse_set_rx_mode,
+       .ndo_change_mtu         = tse_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+};
+
+
+static int request_and_map(struct platform_device *pdev, const char *name,
+                          struct resource **res, void __iomem **ptr)
+{
+       struct resource *region;
+       struct device *device = &pdev->dev;
+
+       *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+       if (*res == NULL) {
+               dev_err(device, "resource %s not defined\n", name);
+               return -ENODEV;
+       }
+
+       region = devm_request_mem_region(device, (*res)->start,
+                                        resource_size(*res), dev_name(device));
+       if (region == NULL) {
+               dev_err(device, "unable to request %s\n", name);
+               return -EBUSY;
+       }
+
+       *ptr = devm_ioremap_nocache(device, region->start,
+                                   resource_size(region));
+       if (*ptr == NULL) {
+               dev_err(device, "ioremap_nocache of %s failed!", name);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/* Probe Altera TSE MAC device
+ */
+static int altera_tse_probe(struct platform_device *pdev)
+{
+       struct net_device *ndev;
+       int ret = -ENODEV;
+       struct resource *control_port;
+       struct resource *dma_res;
+       struct altera_tse_private *priv;
+       const unsigned char *macaddr;
+       struct device_node *np = pdev->dev.of_node;
+       void __iomem *descmap;
+       const struct of_device_id *of_id = NULL;
+
+       ndev = alloc_etherdev(sizeof(struct altera_tse_private));
+       if (!ndev) {
+               dev_err(&pdev->dev, "Could not allocate network device\n");
+               return -ENODEV;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       priv = netdev_priv(ndev);
+       priv->device = &pdev->dev;
+       priv->dev = ndev;
+       priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+       of_id = of_match_device(altera_tse_ids, &pdev->dev);
+
+       if (of_id)
+               priv->dmaops = (struct altera_dmaops *)of_id->data;
+
+
+       if (priv->dmaops &&
+           priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
+               /* Get the mapped address to the SGDMA descriptor memory */
+               ret = request_and_map(pdev, "s1", &dma_res, &descmap);
+               if (ret)
+                       goto out_free;
+
+               /* Start of that memory is for transmit descriptors */
+               priv->tx_dma_desc = descmap;
+
+               /* First half is for tx descriptors, other half for tx */
+               priv->txdescmem = resource_size(dma_res)/2;
+
+               priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
+
+               priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
+                                                    priv->txdescmem));
+               priv->rxdescmem = resource_size(dma_res)/2;
+               priv->rxdescmem_busaddr = dma_res->start;
+               priv->rxdescmem_busaddr += priv->txdescmem;
+
+               if (upper_32_bits(priv->rxdescmem_busaddr)) {
+                       dev_dbg(priv->device,
+                               "SGDMA bus addresses greater than 32-bits\n");
+                       goto out_free;
+               }
+               if (upper_32_bits(priv->txdescmem_busaddr)) {
+                       dev_dbg(priv->device,
+                               "SGDMA bus addresses greater than 32-bits\n");
+                       goto out_free;
+               }
+       } else if (priv->dmaops &&
+                  priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
+               ret = request_and_map(pdev, "rx_resp", &dma_res,
+                                     &priv->rx_dma_resp);
+               if (ret)
+                       goto out_free;
+
+               ret = request_and_map(pdev, "tx_desc", &dma_res,
+                                     &priv->tx_dma_desc);
+               if (ret)
+                       goto out_free;
+
+               priv->txdescmem = resource_size(dma_res);
+               priv->txdescmem_busaddr = dma_res->start;
+
+               ret = request_and_map(pdev, "rx_desc", &dma_res,
+                                     &priv->rx_dma_desc);
+               if (ret)
+                       goto out_free;
+
+               priv->rxdescmem = resource_size(dma_res);
+               priv->rxdescmem_busaddr = dma_res->start;
+
+       } else {
+               goto out_free;
+       }
+
+       if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+               dma_set_coherent_mask(priv->device,
+                                     DMA_BIT_MASK(priv->dmaops->dmamask));
+       else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+               dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
+       else
+               goto out_free;
+
+       /* MAC address space */
+       ret = request_and_map(pdev, "control_port", &control_port,
+                             (void __iomem **)&priv->mac_dev);
+       if (ret)
+               goto out_free;
+
+       /* xSGDMA Rx Dispatcher address space */
+       ret = request_and_map(pdev, "rx_csr", &dma_res,
+                             &priv->rx_dma_csr);
+       if (ret)
+               goto out_free;
+
+
+       /* xSGDMA Tx Dispatcher address space */
+       ret = request_and_map(pdev, "tx_csr", &dma_res,
+                             &priv->tx_dma_csr);
+       if (ret)
+               goto out_free;
+
+
+       /* Rx IRQ */
+       priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
+       if (priv->rx_irq == -ENXIO) {
+               dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
+               ret = -ENXIO;
+               goto out_free;
+       }
+
+       /* Tx IRQ */
+       priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
+       if (priv->tx_irq == -ENXIO) {
+               dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
+               ret = -ENXIO;
+               goto out_free;
+       }
+
+       /* get FIFO depths from device tree */
+       if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
+                                &priv->rx_fifo_depth)) {
+               dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
+               ret = -ENXIO;
+               goto out_free;
+       }
+
+       if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+                                &priv->rx_fifo_depth)) {
+               dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
+               ret = -ENXIO;
+               goto out_free;
+       }
+
+       /* get hash filter settings for this instance */
+       priv->hash_filter =
+               of_property_read_bool(pdev->dev.of_node,
+                                     "altr,has-hash-multicast-filter");
+
+       /* get supplemental address settings for this instance */
+       priv->added_unicast =
+               of_property_read_bool(pdev->dev.of_node,
+                                     "altr,has-supplementary-unicast");
+
+       /* Max MTU is 1500, ETH_DATA_LEN */
+       priv->max_mtu = ETH_DATA_LEN;
+
+       /* Get the max mtu from the device tree. Note that the
+        * "max-frame-size" parameter is actually max mtu. Definition
+        * in the ePAPR v1.1 spec and usage differ, so go with usage.
+        */
+       of_property_read_u32(pdev->dev.of_node, "max-frame-size",
+                            &priv->max_mtu);
+
+       /* The DMA buffer size already accounts for an alignment bias
+        * to avoid unaligned access exceptions for the NIOS processor,
+        */
+       priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
+
+       /* get default MAC address from device tree */
+       macaddr = of_get_mac_address(pdev->dev.of_node);
+       if (macaddr)
+               ether_addr_copy(ndev->dev_addr, macaddr);
+       else
+               eth_hw_addr_random(ndev);
+
+       priv->phy_iface = of_get_phy_mode(np);
+
+       /* try to get PHY address from device tree, use PHY autodetection if
+        * no valid address is given
+        */
+       if (of_property_read_u32(pdev->dev.of_node, "phy-addr",
+                                &priv->phy_addr)) {
+               priv->phy_addr = POLL_PHY;
+       }
+
+       if (!((priv->phy_addr == POLL_PHY) ||
+             ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
+               dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
+                       priv->phy_addr);
+               goto out_free;
+       }
+
+       /* Create/attach to MDIO bus */
+       ret = altera_tse_mdio_create(ndev,
+                                    atomic_add_return(1, &instance_count));
+
+       if (ret)
+               goto out_free;
+
+       /* initialize netdev */
+       ether_setup(ndev);
+       ndev->mem_start = control_port->start;
+       ndev->mem_end = control_port->end;
+       ndev->netdev_ops = &altera_tse_netdev_ops;
+       altera_tse_set_ethtool_ops(ndev);
+
+       altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
+
+       if (priv->hash_filter)
+               altera_tse_netdev_ops.ndo_set_rx_mode =
+                       tse_set_rx_mode_hashfilter;
+
+       /* Scatter/gather IO is not supported,
+        * so it is turned off
+        */
+       ndev->hw_features &= ~NETIF_F_SG;
+       ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+
+       /* VLAN offloading of tagging, stripping and filtering is not
+        * supported by hardware, but driver will accommodate the
+        * extra 4-byte VLAN tag for processing by upper layers
+        */
+       ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+       /* setup NAPI interface */
+       netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+
+       spin_lock_init(&priv->mac_cfg_lock);
+       spin_lock_init(&priv->tx_lock);
+       spin_lock_init(&priv->rxdma_irq_lock);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to register TSE net device\n");
+               goto out_free_mdio;
+       }
+
+       platform_set_drvdata(pdev, ndev);
+
+       priv->revision = ioread32(&priv->mac_dev->megacore_revision);
+
+       if (netif_msg_probe(priv))
+               dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
+                        (priv->revision >> 8) & 0xff,
+                        priv->revision & 0xff,
+                        (unsigned long) control_port->start, priv->rx_irq,
+                        priv->tx_irq);
+
+       ret = init_phy(ndev);
+       if (ret != 0) {
+               netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+               goto out_free_mdio;
+       }
+       return 0;
+
+out_free_mdio:
+       altera_tse_mdio_destroy(ndev);
+out_free:
+       free_netdev(ndev);
+       return ret;
+}
+
+/* Remove Altera TSE MAC device
+ */
+static int altera_tse_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+
+       platform_set_drvdata(pdev, NULL);
+       altera_tse_mdio_destroy(ndev);
+       unregister_netdev(ndev);
+       free_netdev(ndev);
+
+       return 0;
+}
+
+struct altera_dmaops altera_dtype_sgdma = {
+       .altera_dtype = ALTERA_DTYPE_SGDMA,
+       .dmamask = 32,
+       .reset_dma = sgdma_reset,
+       .enable_txirq = sgdma_enable_txirq,
+       .enable_rxirq = sgdma_enable_rxirq,
+       .disable_txirq = sgdma_disable_txirq,
+       .disable_rxirq = sgdma_disable_rxirq,
+       .clear_txirq = sgdma_clear_txirq,
+       .clear_rxirq = sgdma_clear_rxirq,
+       .tx_buffer = sgdma_tx_buffer,
+       .tx_completions = sgdma_tx_completions,
+       .add_rx_desc = sgdma_add_rx_desc,
+       .get_rx_status = sgdma_rx_status,
+       .init_dma = sgdma_initialize,
+       .uninit_dma = sgdma_uninitialize,
+};
+
+struct altera_dmaops altera_dtype_msgdma = {
+       .altera_dtype = ALTERA_DTYPE_MSGDMA,
+       .dmamask = 64,
+       .reset_dma = msgdma_reset,
+       .enable_txirq = msgdma_enable_txirq,
+       .enable_rxirq = msgdma_enable_rxirq,
+       .disable_txirq = msgdma_disable_txirq,
+       .disable_rxirq = msgdma_disable_rxirq,
+       .clear_txirq = msgdma_clear_txirq,
+       .clear_rxirq = msgdma_clear_rxirq,
+       .tx_buffer = msgdma_tx_buffer,
+       .tx_completions = msgdma_tx_completions,
+       .add_rx_desc = msgdma_add_rx_desc,
+       .get_rx_status = msgdma_rx_status,
+       .init_dma = msgdma_initialize,
+       .uninit_dma = msgdma_uninitialize,
+};
+
+static struct of_device_id altera_tse_ids[] = {
+       { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
+       { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
+       { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
+       {},
+};
+MODULE_DEVICE_TABLE(of, altera_tse_ids);
+
+static struct platform_driver altera_tse_driver = {
+       .probe          = altera_tse_probe,
+       .remove         = altera_tse_remove,
+       .suspend        = NULL,
+       .resume         = NULL,
+       .driver         = {
+               .name   = ALTERA_TSE_RESOURCE_NAME,
+               .owner  = THIS_MODULE,
+               .of_match_table = altera_tse_ids,
+       },
+};
+
+module_platform_driver(altera_tse_driver);
+
+MODULE_AUTHOR("Altera Corporation");
+MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
new file mode 100644 (file)
index 0000000..70fa13f
--- /dev/null
@@ -0,0 +1,44 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "altera_tse.h"
+#include "altera_utils.h"
+
+void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
+{
+       u32 value = ioread32(ioaddr);
+       value |= bit_mask;
+       iowrite32(value, ioaddr);
+}
+
+void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
+{
+       u32 value = ioread32(ioaddr);
+       value &= ~bit_mask;
+       iowrite32(value, ioaddr);
+}
+
+int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
+{
+       u32 value = ioread32(ioaddr);
+       return (value & bit_mask) ? 1 : 0;
+}
+
+int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
+{
+       u32 value = ioread32(ioaddr);
+       return (value & bit_mask) ? 0 : 1;
+}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
new file mode 100644 (file)
index 0000000..ce1db36
--- /dev/null
@@ -0,0 +1,27 @@
+/* Altera TSE SGDMA and MSGDMA Linux driver
+ * Copyright (C) 2014 Altera Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+
+#ifndef __ALTERA_UTILS_H__
+#define __ALTERA_UTILS_H__
+
+void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
+void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
+int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
+int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
+
+#endif /* __ALTERA_UTILS_H__*/
index 18e542f7853d384c60ae6aec96e2ae1a1a19e3d7..98a10d555b793e029d20b6694ba1fab7cd13a053 100644 (file)
@@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
        outs++;
        /* Kick the lance: transmit now */
        WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        spin_lock_irqsave(&lp->devlock, flags);
        if (TX_BUFFS_AVAIL)
index 9793767996a2a8e836259adde6fa0a342cd0f4d0..87e727b921dc0a20f99a6bdb084d2a6e3b168f9e 100644 (file)
@@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
        if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
                netif_stop_queue(dev);
 
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index 2061b471fd161b92cd9c4161fa15b955b63a28e2..26efaaa5e73fd292de512fc428e9af84126fcbb9 100644 (file)
@@ -720,6 +720,9 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
        int rx_pkt_limit = budget;
        unsigned long flags;
 
+       if (rx_pkt_limit <= 0)
+               goto rx_not_empty;
+
        do{
                /* process receive packets until we use the quota*/
                /* If we own the next entry, it's a new packet. Send it up. */
index 9339cccfe05a35977493a0fbaf766b91f49f8acb..e7cc9174e364162c68652e8c083e1d66fd5eefa0 100644 (file)
@@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
        struct pcnet32_rx_head *new_rx_ring;
        struct sk_buff **new_skb_list;
        int new, overlap;
+       unsigned int entries = 1 << size;
 
        new_rx_ring = pci_alloc_consistent(lp->pci_dev,
                                           sizeof(struct pcnet32_rx_head) *
-                                          (1 << size),
+                                          entries,
                                           &new_ring_dma_addr);
        if (new_rx_ring == NULL) {
                netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
                return;
        }
-       memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+       memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
 
-       new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+       new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
        if (!new_dma_addr_list)
                goto free_new_rx_ring;
 
-       new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
-                              GFP_ATOMIC);
+       new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
        if (!new_skb_list)
                goto free_new_lists;
 
        /* first copy the current receive buffers */
-       overlap = min(size, lp->rx_ring_size);
+       overlap = min(entries, lp->rx_ring_size);
        for (new = 0; new < overlap; new++) {
                new_rx_ring[new] = lp->rx_ring[new];
                new_dma_addr_list[new] = lp->rx_dma_addr[new];
                new_skb_list[new] = lp->rx_skbuff[new];
        }
        /* now allocate any new buffers needed */
-       for (; new < size; new++) {
+       for (; new < entries; new++) {
                struct sk_buff *rx_skbuff;
                new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
                rx_skbuff = new_skb_list[new];
@@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
                new_dma_addr_list[new] =
                            pci_map_single(lp->pci_dev, rx_skbuff->data,
                                           PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+               if (pci_dma_mapping_error(lp->pci_dev,
+                                         new_dma_addr_list[new])) {
+                       netif_err(lp, drv, dev, "%s dma mapping failed\n",
+                                 __func__);
+                       dev_kfree_skb(new_skb_list[new]);
+                       goto free_all_new;
+               }
                new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
                new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
                new_rx_ring[new].status = cpu_to_le16(0x8000);
@@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
        /* and free any unneeded buffers */
        for (; new < lp->rx_ring_size; new++) {
                if (lp->rx_skbuff[new]) {
-                       pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
-                                        PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       if (!pci_dma_mapping_error(lp->pci_dev,
+                                                  lp->rx_dma_addr[new]))
+                               pci_unmap_single(lp->pci_dev,
+                                                lp->rx_dma_addr[new],
+                                                PKT_BUF_SIZE,
+                                                PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(lp->rx_skbuff[new]);
                }
        }
@@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
                            lp->rx_ring_size, lp->rx_ring,
                            lp->rx_ring_dma_addr);
 
-       lp->rx_ring_size = (1 << size);
+       lp->rx_ring_size = entries;
        lp->rx_mod_mask = lp->rx_ring_size - 1;
        lp->rx_len_bits = (size << 4);
        lp->rx_ring = new_rx_ring;
@@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
 free_all_new:
        while (--new >= lp->rx_ring_size) {
                if (new_skb_list[new]) {
-                       pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
-                                        PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       if (!pci_dma_mapping_error(lp->pci_dev,
+                                                  new_dma_addr_list[new]))
+                               pci_unmap_single(lp->pci_dev,
+                                                new_dma_addr_list[new],
+                                                PKT_BUF_SIZE,
+                                                PCI_DMA_FROMDEVICE);
                        dev_kfree_skb(new_skb_list[new]);
                }
        }
@@ -634,8 +649,7 @@ free_new_lists:
        kfree(new_dma_addr_list);
 free_new_rx_ring:
        pci_free_consistent(lp->pci_dev,
-                           sizeof(struct pcnet32_rx_head) *
-                           (1 << size),
+                           sizeof(struct pcnet32_rx_head) * entries,
                            new_rx_ring,
                            new_ring_dma_addr);
 }
@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
                lp->rx_ring[i].status = 0;      /* CPU owns buffer */
                wmb();          /* Make sure adapter sees owner change */
                if (lp->rx_skbuff[i]) {
-                       pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
-                                        PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       if (!pci_dma_mapping_error(lp->pci_dev,
+                                                  lp->rx_dma_addr[i]))
+                               pci_unmap_single(lp->pci_dev,
+                                                lp->rx_dma_addr[i],
+                                                PKT_BUF_SIZE,
+                                                PCI_DMA_FROMDEVICE);
                        dev_kfree_skb_any(lp->rx_skbuff[i]);
                }
                lp->rx_skbuff[i] = NULL;
@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
                lp->tx_dma_addr[x] =
                        pci_map_single(lp->pci_dev, skb->data, skb->len,
                                       PCI_DMA_TODEVICE);
+               if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
+                       netif_printk(lp, hw, KERN_DEBUG, dev,
+                                    "DMA mapping error at line: %d!\n",
+                                    __LINE__);
+                       goto clean_up;
+               }
                lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
                wmb();  /* Make sure owner changes after all others are visible */
                lp->tx_ring[x].status = cpu_to_le16(status);
@@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev,
 
        if (pkt_len > rx_copybreak) {
                struct sk_buff *newskb;
+               dma_addr_t new_dma_addr;
 
                newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
+               /*
+                * map the new buffer, if mapping fails, drop the packet and
+                * reuse the old buffer
+                */
                if (newskb) {
                        skb_reserve(newskb, NET_IP_ALIGN);
-                       skb = lp->rx_skbuff[entry];
-                       pci_unmap_single(lp->pci_dev,
-                                        lp->rx_dma_addr[entry],
-                                        PKT_BUF_SIZE,
-                                        PCI_DMA_FROMDEVICE);
-                       skb_put(skb, pkt_len);
-                       lp->rx_skbuff[entry] = newskb;
-                       lp->rx_dma_addr[entry] =
-                                           pci_map_single(lp->pci_dev,
-                                                          newskb->data,
-                                                          PKT_BUF_SIZE,
-                                                          PCI_DMA_FROMDEVICE);
-                       rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
-                       rx_in_place = 1;
+                       new_dma_addr = pci_map_single(lp->pci_dev,
+                                                     newskb->data,
+                                                     PKT_BUF_SIZE,
+                                                     PCI_DMA_FROMDEVICE);
+                       if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
+                               netif_err(lp, rx_err, dev,
+                                         "DMA mapping error.\n");
+                               dev_kfree_skb(newskb);
+                               skb = NULL;
+                       } else {
+                               skb = lp->rx_skbuff[entry];
+                               pci_unmap_single(lp->pci_dev,
+                                                lp->rx_dma_addr[entry],
+                                                PKT_BUF_SIZE,
+                                                PCI_DMA_FROMDEVICE);
+                               skb_put(skb, pkt_len);
+                               lp->rx_skbuff[entry] = newskb;
+                               lp->rx_dma_addr[entry] = new_dma_addr;
+                               rxp->base = cpu_to_le32(new_dma_addr);
+                               rx_in_place = 1;
+                       }
                } else
                        skb = NULL;
        } else
@@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
                lp->tx_ring[i].status = 0;      /* CPU owns buffer */
                wmb();          /* Make sure adapter sees owner change */
                if (lp->tx_skbuff[i]) {
-                       pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
-                                        lp->tx_skbuff[i]->len,
-                                        PCI_DMA_TODEVICE);
+                       if (!pci_dma_mapping_error(lp->pci_dev,
+                                                  lp->tx_dma_addr[i]))
+                               pci_unmap_single(lp->pci_dev,
+                                                lp->tx_dma_addr[i],
+                                                lp->tx_skbuff[i]->len,
+                                                PCI_DMA_TODEVICE);
                        dev_kfree_skb_any(lp->tx_skbuff[i]);
                }
                lp->tx_skbuff[i] = NULL;
@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
                }
 
                rmb();
-               if (lp->rx_dma_addr[i] == 0)
+               if (lp->rx_dma_addr[i] == 0) {
                        lp->rx_dma_addr[i] =
                            pci_map_single(lp->pci_dev, rx_skbuff->data,
                                           PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       if (pci_dma_mapping_error(lp->pci_dev,
+                                                 lp->rx_dma_addr[i])) {
+                               /* there is not much we can do at this point */
+                               netif_err(lp, drv, dev,
+                                         "%s pci dma mapping error\n",
+                                         __func__);
+                               return -1;
+                       }
+               }
                lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
                lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
                wmb();          /* Make sure owner changes after all others are visible */
@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
 
        lp->tx_ring[entry].misc = 0x00000000;
 
-       lp->tx_skbuff[entry] = skb;
        lp->tx_dma_addr[entry] =
            pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
+               dev_kfree_skb_any(skb);
+               dev->stats.tx_dropped++;
+               goto drop_packet;
+       }
+       lp->tx_skbuff[entry] = skb;
        lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
        wmb();                  /* Make sure owner changes after all others are visible */
        lp->tx_ring[entry].status = cpu_to_le16(status);
@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
                lp->tx_full = 1;
                netif_stop_queue(dev);
        }
+drop_packet:
        spin_unlock_irqrestore(&lp->lock, flags);
        return NETDEV_TX_OK;
 }
index 380d24922049d97beeedd523e04e9ba9f8033bc5..17bb9ce96260df20eba44a9f215778a62c28373e 100644 (file)
@@ -535,7 +535,7 @@ static int alx_alloc_descriptors(struct alx_priv *alx)
        if (!alx->descmem.virt)
                goto out_free;
 
-       alx->txq.tpd = (void *)alx->descmem.virt;
+       alx->txq.tpd = alx->descmem.virt;
        alx->txq.tpd_dma = alx->descmem.dma;
 
        /* alignment requirement for next block */
@@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
 drop:
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
index 4d3258dd0a88143d77f8e39febf16800ce41ad2d..e11bf18fbbd19880cb5ea1557dbc0d78cbeccfd4 100644 (file)
@@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
 }
 
 static inline void atl1c_clean_buffer(struct pci_dev *pdev,
-                               struct atl1c_buffer *buffer_info, int in_irq)
+                               struct atl1c_buffer *buffer_info)
 {
        u16 pci_driection;
        if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
                        pci_unmap_page(pdev, buffer_info->dma,
                                        buffer_info->length, pci_driection);
        }
-       if (buffer_info->skb) {
-               if (in_irq)
-                       dev_kfree_skb_irq(buffer_info->skb);
-               else
-                       dev_kfree_skb(buffer_info->skb);
-       }
+       if (buffer_info->skb)
+               dev_consume_skb_any(buffer_info->skb);
        buffer_info->dma = 0;
        buffer_info->skb = NULL;
        ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
        ring_count = tpd_ring->count;
        for (index = 0; index < ring_count; index++) {
                buffer_info = &tpd_ring->buffer_info[index];
-               atl1c_clean_buffer(pdev, buffer_info, 0);
+               atl1c_clean_buffer(pdev, buffer_info);
        }
 
        /* Zero out Tx-buffers */
@@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
 
        for (j = 0; j < rfd_ring->count; j++) {
                buffer_info = &rfd_ring->buffer_info[j];
-               atl1c_clean_buffer(pdev, buffer_info, 0);
+               atl1c_clean_buffer(pdev, buffer_info);
        }
        /* zero out the descriptor ring */
        memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
 
        while (next_to_clean != hw_next_to_clean) {
                buffer_info = &tpd_ring->buffer_info[next_to_clean];
-               atl1c_clean_buffer(pdev, buffer_info, 1);
+               atl1c_clean_buffer(pdev, buffer_info);
                if (++next_to_clean == tpd_ring->count)
                        next_to_clean = 0;
                atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1977,17 +1973,17 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
                          enum atl1c_trans_queue type)
 {
        struct pci_dev *pdev = adapter->pdev;
+       unsigned short offload_type;
        u8 hdr_len;
        u32 real_len;
-       unsigned short offload_type;
-       int err;
 
        if (skb_is_gso(skb)) {
-               if (skb_header_cloned(skb)) {
-                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-                       if (unlikely(err))
-                               return -1;
-               }
+               int err;
+
+               err = skb_cow_head(skb, 0);
+               if (err < 0)
+                       return err;
+
                offload_type = skb_shinfo(skb)->gso_type;
 
                if (offload_type & SKB_GSO_TCPV4) {
@@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
        while (index != tpd_ring->next_to_use) {
                tpd = ATL1C_TPD_DESC(tpd_ring, index);
                buffer_info = &tpd_ring->buffer_info[index];
-               atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
+               atl1c_clean_buffer(adpt->pdev, buffer_info);
                memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
                if (++index == tpd_ring->count)
                        index = 0;
@@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
                /* roll back tpd/buffer */
                atl1c_tx_rollback(adapter, tpd, type);
                spin_unlock_irqrestore(&adapter->tx_lock, flags);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
        } else {
                atl1c_tx_queue(adapter, skb, tpd, type);
                spin_unlock_irqrestore(&adapter->tx_lock, flags);
index 422aab27ea1bb7b3e9f52e7d44ba6d157a1c2647..4345332533adb5f9108fd7d9a1fbc7a138109a3c 100644 (file)
@@ -1641,17 +1641,17 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
 static int atl1e_tso_csum(struct atl1e_adapter *adapter,
                       struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
 {
+       unsigned short offload_type;
        u8 hdr_len;
        u32 real_len;
-       unsigned short offload_type;
-       int err;
 
        if (skb_is_gso(skb)) {
-               if (skb_header_cloned(skb)) {
-                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-                       if (unlikely(err))
-                               return -1;
-               }
+               int err;
+
+               err = skb_cow_head(skb, 0);
+               if (err < 0)
+                       return err;
+
                offload_type = skb_shinfo(skb)->gso_type;
 
                if (offload_type & SKB_GSO_TCPV4) {
index 287272dd69daefcbf7a0599742e00e03072f3f70..dfd0e91fa726852818b48b2853e9d60533d01dd7 100644 (file)
@@ -2118,18 +2118,17 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
 }
 
 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
-       struct tx_packet_desc *ptpd)
+                   struct tx_packet_desc *ptpd)
 {
        u8 hdr_len, ip_off;
        u32 real_len;
-       int err;
 
        if (skb_shinfo(skb)->gso_size) {
-               if (skb_header_cloned(skb)) {
-                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-                       if (unlikely(err))
-                               return -1;
-               }
+               int err;
+
+               err = skb_cow_head(skb, 0);
+               if (err < 0)
+                       return err;
 
                if (skb->protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
@@ -2175,7 +2174,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
                        return 3;
                }
        }
-       return false;
+       return 0;
 }
 
 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
index 265ce1b752ed0b169a927f6dad148e694ca4c5a2..78befb522a528268fae32c68649ead0a01263366 100644 (file)
@@ -55,6 +55,7 @@ static const char atl2_driver_name[] = "atl2";
 static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
 static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
 static const char atl2_driver_version[] = ATL2_DRV_VERSION;
+static const struct ethtool_ops atl2_ethtool_ops;
 
 MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
 MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -71,8 +72,6 @@ static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
 };
 MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
 
-static void atl2_set_ethtool_ops(struct net_device *netdev);
-
 static void atl2_check_options(struct atl2_adapter *adapter);
 
 /**
@@ -1397,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        atl2_setup_pcicmd(pdev);
 
        netdev->netdev_ops = &atl2_netdev_ops;
-       atl2_set_ethtool_ops(netdev);
+       SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
        netdev->watchdog_timeo = 5 * HZ;
        strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
@@ -2105,11 +2104,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
        .set_eeprom             = atl2_set_eeprom,
 };
 
-static void atl2_set_ethtool_ops(struct net_device *netdev)
-{
-       SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
-}
-
 #define LBYTESWAP(a)  ((((a) & 0x00ff00ff) << 8) | \
        (((a) & 0xff00ff00) >> 8))
 #define LONGSWAP(a)   ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
index 3f97d9fd0a71b66ec28c9db4fcc1df63ccf71b29..85dbddd03722b20a861f53cba7fe00b6cb66f3db 100644 (file)
@@ -60,6 +60,17 @@ config BCM63XX_ENET
          This driver supports the ethernet MACs in the Broadcom 63xx
          MIPS chipset family (BCM63XX).
 
+config BCMGENET
+       tristate "Broadcom GENET internal MAC support"
+       depends on OF
+       select MII
+       select PHYLIB
+       select FIXED_PHY if BCMGENET=y
+       select BCM7XXX_PHY
+       help
+         This driver supports the built-in Ethernet MACs found in the
+         Broadcom BCM7xxx Set Top Box family chipset.
+
 config BNX2
        tristate "Broadcom NetXtremeII support"
        depends on PCI
index 68efa1a3fb8820ed71ca1e565cf08c9f243bc55e..fd639a0d4c7d64b2b7db5eb084087502e3c6d63a 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BCMGENET) += genet/
 obj-$(CONFIG_BNX2) += bnx2.o
 obj-$(CONFIG_CNIC) += cnic.o
 obj-$(CONFIG_BNX2X) += bnx2x/
index 8a7bf7dad89823fadaa7b98f74ae08fe6a56838b..05ba6258901794ab51842ddc8d630f4d1286798d 100644 (file)
@@ -1685,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
        unsigned int start;
 
        do {
-               start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+               start = u64_stats_fetch_begin_irq(&hwstat->syncp);
 
                /* Convert HW stats into rtnl_link_stats64 stats. */
                nstat->rx_packets = hwstat->rx_pkts;
@@ -1719,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
                /* Carrier lost counter seems to be broken for some devices */
                nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
 #endif
-       } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
+       } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
 
        return nstat;
 }
@@ -2073,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
        do {
                data_src = &hwstat->tx_good_octets;
                data_dst = data;
-               start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+               start = u64_stats_fetch_begin_irq(&hwstat->syncp);
 
                for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
                        *data_dst++ = *data_src++;
 
-       } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
+       } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
 }
 
 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
index b9a5fb6400d3cf1917511dec27c92486a46f12fc..a7d11f5565d69342ad296471a9a5d44f8d7c51d5 100644 (file)
@@ -1722,9 +1722,6 @@ static const struct net_device_ops bcm_enet_ops = {
        .ndo_set_rx_mode        = bcm_enet_set_multicast_list,
        .ndo_do_ioctl           = bcm_enet_ioctl,
        .ndo_change_mtu         = bcm_enet_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = bcm_enet_netpoll,
-#endif
 };
 
 /*
index 6c9e1c9bdeb8cbe06ae1788bd7ecb628e2ea4564..a8efb18e42fa66a01fd1950977db55b96ab15022 100644 (file)
@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                sw_cons = BNX2_NEXT_TX_BD(sw_cons);
 
                tx_bytes += skb->len;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                tx_pkt++;
                if (tx_pkt == budget)
                        break;
@@ -3133,6 +3133,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
        struct l2_fhdr *rx_hdr;
        int rx_pkt = 0, pg_ring_used = 0;
 
+       if (budget <= 0)
+               return rx_pkt;
+
        hw_cons = bnx2_get_hw_rx_cons(bnapi);
        sw_cons = rxr->rx_cons;
        sw_prod = rxr->rx_prod;
@@ -6235,7 +6238,7 @@ bnx2_free_irq(struct bnx2 *bp)
 static void
 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
 {
-       int i, total_vecs, rc;
+       int i, total_vecs;
        struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
        struct net_device *dev = bp->dev;
        const int len = sizeof(bp->irq_tbl[0].name);
@@ -6258,16 +6261,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
 #ifdef BCM_CNIC
        total_vecs++;
 #endif
-       rc = -ENOSPC;
-       while (total_vecs >= BNX2_MIN_MSIX_VEC) {
-               rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
-               if (rc <= 0)
-                       break;
-               if (rc > 0)
-                       total_vecs = rc;
-       }
-
-       if (rc != 0)
+       total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
+                                          BNX2_MIN_MSIX_VEC, total_vecs);
+       if (total_vecs < 0)
                return;
 
        msix_vecs = total_vecs;
@@ -6640,7 +6636,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -6733,7 +6729,7 @@ dma_error:
                               PCI_DMA_TODEVICE);
        }
 
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
index 391f29ef6d2e172849c61140b7c88f99eff365fb..4d8f8aba0ea5d93be5e288a31da7fc12903af71f 100644 (file)
@@ -26,8 +26,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.78.17-0"
-#define DRV_MODULE_RELDATE      "2013/04/11"
+#define DRV_MODULE_VERSION      "1.78.19-0"
+#define DRV_MODULE_RELDATE      "2014/02/10"
 #define BNX2X_BC_VER            0x040200
 
 #if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
 #define BNX2X_MSG_DCB                  0x8000000
 
 /* regular debug print */
+#define DP_INNER(fmt, ...)                                     \
+       pr_notice("[%s:%d(%s)]" fmt,                            \
+                 __func__, __LINE__,                           \
+                 bp->dev ? (bp->dev->name) : "?",              \
+                 ##__VA_ARGS__);
+
 #define DP(__mask, fmt, ...)                                   \
 do {                                                           \
        if (unlikely(bp->msg_enable & (__mask)))                \
-               pr_notice("[%s:%d(%s)]" fmt,                    \
-                         __func__, __LINE__,                   \
-                         bp->dev ? (bp->dev->name) : "?",      \
-                         ##__VA_ARGS__);                       \
+               DP_INNER(fmt, ##__VA_ARGS__);                   \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...)                               \
+do {                                                           \
+       if (unlikely((bp->msg_enable & (__mask)) == __mask))    \
+               DP_INNER(fmt, ##__VA_ARGS__);                   \
 } while (0)
 
 #define DP_CONT(__mask, fmt, ...)                              \
@@ -1146,10 +1155,6 @@ struct bnx2x_port {
                        (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
 
 /* slow path */
-
-/* slow path work-queue */
-extern struct workqueue_struct *bnx2x_wq;
-
 #define BNX2X_MAX_NUM_OF_VFS   64
 #define BNX2X_VF_CID_WND       4 /* log num of queues per VF. HW config. */
 #define BNX2X_CIDS_PER_VF      (1 << BNX2X_VF_CID_WND)
@@ -1261,6 +1266,7 @@ struct bnx2x_slowpath {
        union {
                struct client_init_ramrod_data  init_data;
                struct client_update_ramrod_data update_data;
+               struct tpa_update_ramrod_data tpa_data;
        } q_rdata;
 
        union {
@@ -1392,7 +1398,7 @@ struct bnx2x_fw_stats_data {
 };
 
 /* Public slow path states */
-enum {
+enum sp_rtnl_flag {
        BNX2X_SP_RTNL_SETUP_TC,
        BNX2X_SP_RTNL_TX_TIMEOUT,
        BNX2X_SP_RTNL_FAN_FAILURE,
@@ -1403,6 +1409,12 @@ enum {
        BNX2X_SP_RTNL_RX_MODE,
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
        BNX2X_SP_RTNL_TX_STOP,
+       BNX2X_SP_RTNL_GET_DRV_VERSION,
+};
+
+enum bnx2x_iov_flag {
+       BNX2X_IOV_HANDLE_VF_MSG,
+       BNX2X_IOV_HANDLE_FLR,
 };
 
 struct bnx2x_prev_path_list {
@@ -1603,6 +1615,8 @@ struct bnx2x {
        int                     mrrs;
 
        struct delayed_work     sp_task;
+       struct delayed_work     iov_task;
+
        atomic_t                interrupt_occurred;
        struct delayed_work     sp_rtnl_task;
 
@@ -1693,6 +1707,10 @@ struct bnx2x {
        struct bnx2x_slowpath   *slowpath;
        dma_addr_t              slowpath_mapping;
 
+       /* Mechanism protecting the drv_info_to_mcp */
+       struct mutex            drv_info_mutex;
+       bool                    drv_info_mng_owner;
+
        /* Total number of FW statistics requests */
        u8                      fw_stats_num;
 
@@ -1882,6 +1900,9 @@ struct bnx2x {
        /* operation indication for the sp_rtnl task */
        unsigned long                           sp_rtnl_state;
 
+       /* Indication of the IOV tasks */
+       unsigned long                           iov_task_state;
+
        /* DCBX Negotiation results */
        struct dcbx_features                    dcbx_local_feat;
        u32                                     dcbx_error;
@@ -2525,6 +2546,8 @@ enum {
 
 void bnx2x_set_local_cmng(struct bnx2x *bp);
 
+void bnx2x_update_mng_version(struct bnx2x *bp);
+
 #define MCPR_SCRATCH_BASE(bp) \
        (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 
index dbcff509dc3f6d62cf48c729563196de8c4c7904..9261d5313b5be2bd361612640535fbf9c2810438 100644 (file)
@@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
 
 static int bnx2x_calc_num_queues(struct bnx2x *bp)
 {
-       return  bnx2x_num_queues ?
-                min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
-                min_t(int, netif_get_num_default_rss_queues(),
-                      BNX2X_MAX_QUEUES(bp));
+       int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+       /* Reduce memory usage in kdump environment by using only one queue */
+       if (reset_devices)
+               nq = 1;
+
+       nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+       return nq;
 }
 
 /**
@@ -868,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
        if (unlikely(bp->panic))
                return 0;
 #endif
+       if (budget <= 0)
+               return rx_pkt;
 
        bd_cons = fp->rx_bd_cons;
        bd_prod = fp->rx_bd_prod;
@@ -1638,36 +1644,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
        DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
           msix_vec);
 
-       rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
-
+       rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+                                  BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
        /*
         * reconfigure number of tx/rx queues according to available
         * MSI-X vectors
         */
-       if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
-               /* how less vectors we will have? */
-               int diff = msix_vec - rc;
-
-               BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
-
-               rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
-               if (rc) {
-                       BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
-                       goto no_msix;
-               }
-               /*
-                * decrease number of queues by number of unallocated entries
-                */
-               bp->num_ethernet_queues -= diff;
-               bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
-
-               BNX2X_DEV_INFO("New queue configuration set: %d\n",
-                              bp->num_queues);
-       } else if (rc > 0) {
+       if (rc == -ENOSPC) {
                /* Get by with single vector */
-               rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
-               if (rc) {
+               rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+               if (rc < 0) {
                        BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
                                       rc);
                        goto no_msix;
@@ -1680,8 +1666,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
                bp->num_ethernet_queues = 1;
                bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
        } else if (rc < 0) {
-               BNX2X_DEV_INFO("MSI-X is not attainable  rc %d\n", rc);
+               BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
                goto no_msix;
+       } else if (rc < msix_vec) {
+               /* how less vectors we will have? */
+               int diff = msix_vec - rc;
+
+               BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+               /*
+                * decrease number of queues by number of unallocated entries
+                */
+               bp->num_ethernet_queues -= diff;
+               bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+               BNX2X_DEV_INFO("New queue configuration set: %d\n",
+                              bp->num_queues);
        }
 
        bp->flags |= USING_MSIX_FLAG;
@@ -2234,8 +2234,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
                sizeof(struct per_queue_stats) * num_queue_stats +
                sizeof(struct stats_counter);
 
-       BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
-                       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+       bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+                                      bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+       if (!bp->fw_stats)
+               goto alloc_mem_err;
 
        /* Set shortcuts */
        bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
@@ -2802,6 +2804,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
        if (CNIC_ENABLED(bp))
                bnx2x_load_cnic(bp);
 
+       if (IS_PF(bp))
+               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
                /* mark driver is loaded in shmem2 */
                u32 val;
@@ -3028,6 +3033,10 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
        bp->state = BNX2X_STATE_CLOSED;
        bp->cnic_loaded = false;
 
+       /* Clear driver version indication in shmem */
+       if (IS_PF(bp))
+               bnx2x_update_mng_version(bp);
+
        /* Check if there are pending parity attentions. If there are - set
         * RECOVERY_IN_PROGRESS.
         */
@@ -4370,14 +4379,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
 
        if (!IS_FCOE_IDX(index)) {
                /* status blocks */
-               if (!CHIP_IS_E1x(bp))
-                       BNX2X_PCI_ALLOC(sb->e2_sb,
-                               &bnx2x_fp(bp, index, status_blk_mapping),
-                               sizeof(struct host_hc_status_block_e2));
-               else
-                       BNX2X_PCI_ALLOC(sb->e1x_sb,
-                               &bnx2x_fp(bp, index, status_blk_mapping),
-                           sizeof(struct host_hc_status_block_e1x));
+               if (!CHIP_IS_E1x(bp)) {
+                       sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+                                                   sizeof(struct host_hc_status_block_e2));
+                       if (!sb->e2_sb)
+                               goto alloc_mem_err;
+               } else {
+                       sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+                                                    sizeof(struct host_hc_status_block_e1x));
+                       if (!sb->e1x_sb)
+                               goto alloc_mem_err;
+               }
        }
 
        /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -4396,35 +4408,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
                           "allocating tx memory of fp %d cos %d\n",
                           index, cos);
 
-                       BNX2X_ALLOC(txdata->tx_buf_ring,
-                               sizeof(struct sw_tx_bd) * NUM_TX_BD);
-                       BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
-                               &txdata->tx_desc_mapping,
-                               sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+                       txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+                                                     sizeof(struct sw_tx_bd),
+                                                     GFP_KERNEL);
+                       if (!txdata->tx_buf_ring)
+                               goto alloc_mem_err;
+                       txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+                                                              sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+                       if (!txdata->tx_desc_ring)
+                               goto alloc_mem_err;
                }
        }
 
        /* Rx */
        if (!skip_rx_queue(bp, index)) {
                /* fastpath rx rings: rx_buf rx_desc rx_comp */
-               BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
-                               sizeof(struct sw_rx_bd) * NUM_RX_BD);
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
-                               &bnx2x_fp(bp, index, rx_desc_mapping),
-                               sizeof(struct eth_rx_bd) * NUM_RX_BD);
+               bnx2x_fp(bp, index, rx_buf_ring) =
+                       kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+               if (!bnx2x_fp(bp, index, rx_buf_ring))
+                       goto alloc_mem_err;
+               bnx2x_fp(bp, index, rx_desc_ring) =
+                       BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+                                       sizeof(struct eth_rx_bd) * NUM_RX_BD);
+               if (!bnx2x_fp(bp, index, rx_desc_ring))
+                       goto alloc_mem_err;
 
                /* Seed all CQEs by 1s */
-               BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
-                                &bnx2x_fp(bp, index, rx_comp_mapping),
-                                sizeof(struct eth_fast_path_rx_cqe) *
-                                NUM_RCQ_BD);
+               bnx2x_fp(bp, index, rx_comp_ring) =
+                       BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+                                        sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+               if (!bnx2x_fp(bp, index, rx_comp_ring))
+                       goto alloc_mem_err;
 
                /* SGE ring */
-               BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
-                               sizeof(struct sw_rx_page) * NUM_RX_SGE);
-               BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
-                               &bnx2x_fp(bp, index, rx_sge_mapping),
-                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+               bnx2x_fp(bp, index, rx_page_ring) =
+                       kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+                               GFP_KERNEL);
+               if (!bnx2x_fp(bp, index, rx_page_ring))
+                       goto alloc_mem_err;
+               bnx2x_fp(bp, index, rx_sge_ring) =
+                       BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+                                       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+               if (!bnx2x_fp(bp, index, rx_sge_ring))
+                       goto alloc_mem_err;
                /* RX BD ring */
                bnx2x_set_next_page_rx_bd(fp);
 
@@ -4780,12 +4806,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
                bnx2x_panic();
 #endif
 
-       smp_mb__before_clear_bit();
-       set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
-       smp_mb__after_clear_bit();
-
        /* This allows the netif to be shutdown gracefully before resetting */
-       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
 }
 
 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4913,3 +4935,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
        disable = disable ? 1 : (usec ? 0 : 1);
        storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
 }
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+                           u32 verbose)
+{
+       smp_mb__before_clear_bit();
+       set_bit(flag, &bp->sp_rtnl_state);
+       smp_mb__after_clear_bit();
+       DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+          flag);
+       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
index a89a40f88c25779ded7f3e9357f765f191dbd55f..05f4f5f52635b64668a3397427c6b1483f057eb7 100644 (file)
@@ -47,31 +47,26 @@ extern int bnx2x_num_queues;
                } \
        } while (0)
 
-#define BNX2X_PCI_ALLOC(x, y, size) \
-       do { \
-               x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
-               if (x == NULL) \
-                       goto alloc_mem_err; \
-               DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
-                  (unsigned long long)(*y), x); \
-       } while (0)
-
-#define BNX2X_PCI_FALLOC(x, y, size) \
-       do { \
-               x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
-               if (x == NULL) \
-                       goto alloc_mem_err; \
-               memset((void *)x, 0xFFFFFFFF, size); \
-               DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\
-                  (unsigned long long)(*y), x); \
-       } while (0)
-
-#define BNX2X_ALLOC(x, size) \
-       do { \
-               x = kzalloc(size, GFP_KERNEL); \
-               if (x == NULL) \
-                       goto alloc_mem_err; \
-       } while (0)
+#define BNX2X_PCI_ALLOC(y, size)                                       \
+({                                                                     \
+       void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+       if (x)                                                          \
+               DP(NETIF_MSG_HW,                                        \
+                  "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n",        \
+                  (unsigned long long)(*y), x);                        \
+       x;                                                              \
+})
+#define BNX2X_PCI_FALLOC(y, size)                                      \
+({                                                                     \
+       void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+       if (x) {                                                        \
+               memset(x, 0xff, size);                                  \
+               DP(NETIF_MSG_HW,                                        \
+                  "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",       \
+                  (unsigned long long)(*y), x);                        \
+       }                                                               \
+       x;                                                              \
+})
 
 /*********************** Interfaces ****************************
  *  Functions that need to be implemented by each driver version
@@ -1324,4 +1319,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
 int bnx2x_drain_tx_queues(struct bnx2x *bp);
 void bnx2x_squeeze_objects(struct bnx2x *bp);
 
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+                           u32 verbose);
+
 #endif /* BNX2X_CMN_H */
index fdace204b0549aa2599edd67c1c3112e0c4bf9de..97ea5421dd96f41bc3daf7a899ef9f1ddaab0931 100644 (file)
@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
         * as we are handling an attention on a work queue which must be
         * flushed at some rtnl-locked contexts (e.g. if down)
         */
-       if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
+       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
 }
 
 void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                        if (IS_MF(bp))
                                bnx2x_link_sync_notify(bp);
 
-                       set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
-
-                       schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
+                       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
                        return;
                }
        case BNX2X_DCBX_STATE_TX_PAUSED:
index 38fc794c1655d9d011d425ccce35cfa41ee43889..b6de05e3149b5604d818d5496cbbc23ab7bf64e8 100644 (file)
@@ -2969,8 +2969,9 @@ static void bnx2x_self_test(struct net_device *dev,
 #define IS_PORT_STAT(i) \
        ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
 #define IS_FUNC_STAT(i)                (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
-#define IS_MF_MODE_STAT(bp) \
-                       (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+#define HIDE_PORT_STAT(bp) \
+               ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
+                IS_VF(bp))
 
 /* ethtool statistics are displayed for all regular ethernet queues and the
  * fcoe L2 queue if not disabled
@@ -2992,7 +2993,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
                                      BNX2X_NUM_Q_STATS;
                } else
                        num_strings = 0;
-               if (IS_MF_MODE_STAT(bp)) {
+               if (HIDE_PORT_STAT(bp)) {
                        for (i = 0; i < BNX2X_NUM_STATS; i++)
                                if (IS_FUNC_STAT(i))
                                        num_strings++;
@@ -3047,7 +3048,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                }
 
                for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-                       if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+                       if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
                                continue;
                        strcpy(buf + (k + j)*ETH_GSTRING_LEN,
                                   bnx2x_stats_arr[i].string);
@@ -3105,7 +3106,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
 
        hw_stats = (u32 *)&bp->eth_stats;
        for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-               if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+               if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
                        continue;
                if (bnx2x_stats_arr[i].size == 0) {
                        /* skip this counter */
index 84aecdf06f7a7cc807993952eedd01d483e739f7..95dc365435483ed9298389b88f965130d48eb7d0 100644 (file)
@@ -87,7 +87,6 @@
        (IRO[156].base + ((vfId) * IRO[156].m1))
 #define CSTORM_VF_TO_PF_OFFSET(funcId) \
        (IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
 #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
        (IRO[203].base + ((pfId) * IRO[203].m1))
 #define TSTORM_ASSERT_LIST_INDEX_OFFSET        (IRO[102].base)
index cf1df8b62e2c2785c0560b77ac5ed4b5fc8ae8b3..5ba8af50c84f2bb3ebf9300c9ad8765d25a06a8d 100644 (file)
@@ -2003,6 +2003,23 @@ struct shmem_lfa {
        #define SHMEM_LFA_DONT_CLEAR_STAT               (1<<24)
 };
 
+/* Used to support NSCI get OS driver version
+ * on driver load the version value will be set
+ * on driver unload driver value of 0x0 will be set.
+ */
+struct os_drv_ver {
+#define DRV_VER_NOT_LOADED                     0
+
+       /* personalties order is important */
+#define DRV_PERS_ETHERNET                      0
+#define DRV_PERS_ISCSI                         1
+#define DRV_PERS_FCOE                          2
+
+       /* shmem2 struct is constant can't add more personalties here */
+#define MAX_DRV_PERS                           3
+       u32 versions[MAX_DRV_PERS];
+};
+
 struct ncsi_oem_fcoe_features {
        u32 fcoe_features1;
        #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
@@ -2217,6 +2234,18 @@ struct shmem2_region {
        u32 reserved4;                          /* Offset 0x150 */
        u32 link_attr_sync[PORT_MAX];           /* Offset 0x154 */
        #define LINK_ATTR_SYNC_KR2_ENABLE       (1<<0)
+
+       u32 reserved5[2];
+       u32 reserved6[PORT_MAX];
+
+       /* driver version for each personality */
+       struct os_drv_ver func_os_drv_ver[E2_FUNC_MAX]; /* Offset 0x16c */
+
+       /* Flag to the driver that PF's drv_info_host_addr buffer was read  */
+       u32 mfw_drv_indication;
+
+       /* We use indication for each PF (0..3) */
+#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
 };
 
 
@@ -2848,7 +2877,7 @@ struct afex_stats {
 
 #define BCM_5710_FW_MAJOR_VERSION                      7
 #define BCM_5710_FW_MINOR_VERSION                      8
-#define BCM_5710_FW_REVISION_VERSION           17
+#define BCM_5710_FW_REVISION_VERSION           19
 #define BCM_5710_FW_ENGINEERING_VERSION                0
 #define BCM_5710_FW_COMPILE_FLAGS                      1
 
index 7d4382286457e6f0bf1b9ba87a981dd758fc874b..a78edaccceee92d8f2439ac40f3b3ba887ec0000 100644 (file)
@@ -120,7 +120,8 @@ static int debug;
 module_param(debug, int, S_IRUGO);
 MODULE_PARM_DESC(debug, " Default debug msglevel");
 
-struct workqueue_struct *bnx2x_wq;
+static struct workqueue_struct *bnx2x_wq;
+struct workqueue_struct *bnx2x_iov_wq;
 
 struct bnx2x_mac_vals {
        u32 xmac_addr;
@@ -918,7 +919,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
        u16 start = 0, end = 0;
        u8 cos;
 #endif
-       if (disable_int)
+       if (IS_PF(bp) && disable_int)
                bnx2x_int_disable(bp);
 
        bp->stats_state = STATS_STATE_DISABLED;
@@ -929,33 +930,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
 
        /* Indices */
        /* Common */
-       BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
-                 bp->def_idx, bp->def_att_idx, bp->attn_state,
-                 bp->spq_prod_idx, bp->stats_counter);
-       BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
-                 bp->def_status_blk->atten_status_block.attn_bits,
-                 bp->def_status_blk->atten_status_block.attn_bits_ack,
-                 bp->def_status_blk->atten_status_block.status_block_id,
-                 bp->def_status_blk->atten_status_block.attn_bits_index);
-       BNX2X_ERR("     def (");
-       for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
-               pr_cont("0x%x%s",
-                       bp->def_status_blk->sp_sb.index_values[i],
-                       (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
-
-       for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
-               *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
-                       CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
-                       i*sizeof(u32));
-
-       pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
-              sp_sb_data.igu_sb_id,
-              sp_sb_data.igu_seg_id,
-              sp_sb_data.p_func.pf_id,
-              sp_sb_data.p_func.vnic_id,
-              sp_sb_data.p_func.vf_id,
-              sp_sb_data.p_func.vf_valid,
-              sp_sb_data.state);
+       if (IS_PF(bp)) {
+               struct host_sp_status_block *def_sb = bp->def_status_blk;
+               int data_size, cstorm_offset;
+
+               BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+                         bp->def_idx, bp->def_att_idx, bp->attn_state,
+                         bp->spq_prod_idx, bp->stats_counter);
+               BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
+                         def_sb->atten_status_block.attn_bits,
+                         def_sb->atten_status_block.attn_bits_ack,
+                         def_sb->atten_status_block.status_block_id,
+                         def_sb->atten_status_block.attn_bits_index);
+               BNX2X_ERR("     def (");
+               for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+                       pr_cont("0x%x%s",
+                               def_sb->sp_sb.index_values[i],
+                               (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
+
+               data_size = sizeof(struct hc_sp_status_block_data) /
+                           sizeof(u32);
+               cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+               for (i = 0; i < data_size; i++)
+                       *((u32 *)&sp_sb_data + i) =
+                               REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+                                          i * sizeof(u32));
+
+               pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
+                       sp_sb_data.igu_sb_id,
+                       sp_sb_data.igu_seg_id,
+                       sp_sb_data.p_func.pf_id,
+                       sp_sb_data.p_func.vnic_id,
+                       sp_sb_data.p_func.vf_id,
+                       sp_sb_data.p_func.vf_valid,
+                       sp_sb_data.state);
+       }
 
        for_each_eth_queue(bp, i) {
                struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -1013,6 +1022,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
                        pr_cont("0x%x%s",
                               fp->sb_index_values[j],
                               (j == loop - 1) ? ")" : " ");
+
+               /* VF cannot access FW refelection for status block */
+               if (IS_VF(bp))
+                       continue;
+
                /* fw sb data */
                data_size = CHIP_IS_E1x(bp) ?
                        sizeof(struct hc_status_block_data_e1x) :
@@ -1064,16 +1078,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
        }
 
 #ifdef BNX2X_STOP_ON_ERROR
-
-       /* event queue */
-       BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
-       for (i = 0; i < NUM_EQ_DESC; i++) {
-               u32 *data = (u32 *)&bp->eq_ring[i].message.data;
-
-               BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
-                         i, bp->eq_ring[i].message.opcode,
-                         bp->eq_ring[i].message.error);
-               BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+       if (IS_PF(bp)) {
+               /* event queue */
+               BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+               for (i = 0; i < NUM_EQ_DESC; i++) {
+                       u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+                       BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+                                 i, bp->eq_ring[i].message.opcode,
+                                 bp->eq_ring[i].message.error);
+                       BNX2X_ERR("data: %x %x %x\n",
+                                 data[0], data[1], data[2]);
+               }
        }
 
        /* Rings */
@@ -1140,8 +1156,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
                }
        }
 #endif
-       bnx2x_fw_dump(bp);
-       bnx2x_mc_assert(bp);
+       if (IS_PF(bp)) {
+               bnx2x_fw_dump(bp);
+               bnx2x_mc_assert(bp);
+       }
        BNX2X_ERR("end crash dump -----------------\n");
 }
 
@@ -1814,6 +1832,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                drv_cmd = BNX2X_Q_CMD_EMPTY;
                break;
 
+       case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+               DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+               drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+               break;
+
        default:
                BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
                          command, fp->index);
@@ -1834,8 +1857,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
 #else
                return;
 #endif
-       /* SRIOV: reschedule any 'in_progress' operations */
-       bnx2x_iov_sp_event(bp, cid, true);
 
        smp_mb__before_atomic_inc();
        atomic_inc(&bp->cq_spq_left);
@@ -3460,10 +3481,15 @@ static void bnx2x_handle_eee_event(struct bnx2x *bp)
        bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
 }
 
+#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH       (20)
+#define BNX2X_UPDATE_DRV_INFO_IND_COUNT                (25)
+
 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
 {
        enum drv_info_opcode op_code;
        u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+       bool release = false;
+       int wait;
 
        /* if drv_info version supported by MFW doesn't match - send NACK */
        if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
@@ -3474,6 +3500,9 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
        op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
                  DRV_INFO_CONTROL_OP_CODE_SHIFT;
 
+       /* Must prevent other flows from accessing drv_info_to_mcp */
+       mutex_lock(&bp->drv_info_mutex);
+
        memset(&bp->slowpath->drv_info_to_mcp, 0,
               sizeof(union drv_info_to_mcp));
 
@@ -3490,7 +3519,7 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
        default:
                /* if op code isn't supported - send NACK */
                bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
-               return;
+               goto out;
        }
 
        /* if we got drv_info attn from MFW then these fields are defined in
@@ -3502,6 +3531,106 @@ static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
                U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
 
        bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+
+       /* Since possible management wants both this and get_driver_version
+        * need to wait until management notifies us it finished utilizing
+        * the buffer.
+        */
+       if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
+               DP(BNX2X_MSG_MCP, "Management does not support indication\n");
+       } else if (!bp->drv_info_mng_owner) {
+               u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
+
+               for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
+                       u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
+
+                       /* Management is done; need to clear indication */
+                       if (indication & bit) {
+                               SHMEM2_WR(bp, mfw_drv_indication,
+                                         indication & ~bit);
+                               release = true;
+                               break;
+                       }
+
+                       msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
+               }
+       }
+       if (!release) {
+               DP(BNX2X_MSG_MCP, "Management did not release indication\n");
+               bp->drv_info_mng_owner = true;
+       }
+
+out:
+       mutex_unlock(&bp->drv_info_mutex);
+}
+
+static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
+{
+       u8 vals[4];
+       int i = 0;
+
+       if (bnx2x_format) {
+               i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
+                          &vals[0], &vals[1], &vals[2], &vals[3]);
+               if (i > 0)
+                       vals[0] -= '0';
+       } else {
+               i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
+                          &vals[0], &vals[1], &vals[2], &vals[3]);
+       }
+
+       while (i < 4)
+               vals[i++] = 0;
+
+       return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
+}
+
+void bnx2x_update_mng_version(struct bnx2x *bp)
+{
+       u32 iscsiver = DRV_VER_NOT_LOADED;
+       u32 fcoever = DRV_VER_NOT_LOADED;
+       u32 ethver = DRV_VER_NOT_LOADED;
+       int idx = BP_FW_MB_IDX(bp);
+       u8 *version;
+
+       if (!SHMEM2_HAS(bp, func_os_drv_ver))
+               return;
+
+       mutex_lock(&bp->drv_info_mutex);
+       /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
+       if (bp->drv_info_mng_owner)
+               goto out;
+
+       if (bp->state != BNX2X_STATE_OPEN)
+               goto out;
+
+       /* Parse ethernet driver version */
+       ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+       if (!CNIC_LOADED(bp))
+               goto out;
+
+       /* Try getting storage driver version via cnic */
+       memset(&bp->slowpath->drv_info_to_mcp, 0,
+              sizeof(union drv_info_to_mcp));
+       bnx2x_drv_info_iscsi_stat(bp);
+       version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
+       iscsiver = bnx2x_update_mng_version_utility(version, false);
+
+       memset(&bp->slowpath->drv_info_to_mcp, 0,
+              sizeof(union drv_info_to_mcp));
+       bnx2x_drv_info_fcoe_stat(bp);
+       version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
+       fcoever = bnx2x_update_mng_version_utility(version, false);
+
+out:
+       SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
+       SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
+       SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
+
+       mutex_unlock(&bp->drv_info_mutex);
+
+       DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
+          ethver, iscsiver, fcoever);
 }
 
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
@@ -3644,10 +3773,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
                        cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
                                    HW_CID(bp, cid));
 
-       type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
-       type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
-                SPE_HDR_FUNCTION_ID);
+       /* In some cases, type may already contain the func-id
+        * mainly in SRIOV related use cases, so we add it here only
+        * if it's not already set.
+        */
+       if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+               type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+                       SPE_HDR_CONN_TYPE;
+               type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+                        SPE_HDR_FUNCTION_ID);
+       } else {
+               type = cmd_type;
+       }
 
        spe->hdr.type = cpu_to_le16(type);
 
@@ -3878,10 +4015,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
         * This is due to some boards consuming sufficient power when driver is
         * up to overheat if fan fails.
         */
-       smp_mb__before_clear_bit();
-       set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
-       smp_mb__after_clear_bit();
-       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
 }
 
 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4025,7 +4159,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
                                bnx2x_handle_drv_info_req(bp);
 
                        if (val & DRV_STATUS_VF_DISABLED)
-                               bnx2x_vf_handle_flr_event(bp);
+                               bnx2x_schedule_iov_task(bp,
+                                                       BNX2X_IOV_HANDLE_FLR);
 
                        if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
                                bnx2x_pmf_update(bp);
@@ -5216,14 +5351,14 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                /* handle eq element */
                switch (opcode) {
                case EVENT_RING_OPCODE_VF_PF_CHANNEL:
-                       DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
-                       bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+                       bnx2x_vf_mbx_schedule(bp,
+                                             &elem->message.data.vf_pf_event);
                        continue;
 
                case EVENT_RING_OPCODE_STAT_QUERY:
-                       DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
-                          "got statistics comp event %d\n",
-                          bp->stats_comp++);
+                       DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+                              "got statistics comp event %d\n",
+                              bp->stats_comp++);
                        /* nothing to do with stats comp */
                        goto next_spqe;
 
@@ -5273,6 +5408,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                                        break;
 
                        } else {
+                               int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
                                DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
                                   "AFEX: ramrod completed FUNCTION_UPDATE\n");
                                f_obj->complete_cmd(bp, f_obj,
@@ -5282,12 +5419,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                                 * sp_rtnl task as all Queue SP operations
                                 * should run under rtnl_lock.
                                 */
-                               smp_mb__before_clear_bit();
-                               set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
-                                       &bp->sp_rtnl_state);
-                               smp_mb__after_clear_bit();
-
-                               schedule_delayed_work(&bp->sp_rtnl_task, 0);
+                               bnx2x_schedule_sp_rtnl(bp, cmd, 0);
                        }
 
                        goto next_spqe;
@@ -5435,13 +5567,6 @@ static void bnx2x_sp_task(struct work_struct *work)
                             le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
        }
 
-       /* must be called after the EQ processing (since eq leads to sriov
-        * ramrod completion flows).
-        * This flow may have been scheduled by the arrival of a ramrod
-        * completion, or by the sriov code rescheduling itself.
-        */
-       bnx2x_iov_sp_task(bp);
-
        /* afex - poll to check if VIFSET_ACK should be sent to MFW */
        if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
                               &bp->sp_state)) {
@@ -6005,18 +6130,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
 {
        int i;
 
-       if (IS_MF_SI(bp))
-               /*
-                * In switch independent mode, the TSTORM needs to accept
-                * packets that failed classification, since approximate match
-                * mac addresses aren't written to NIG LLH
-                */
-               REG_WR8(bp, BAR_TSTRORM_INTMEM +
-                           TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
-       else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
-               REG_WR8(bp, BAR_TSTRORM_INTMEM +
-                           TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
-
        /* Zero this manually as its initialization is
           currently missing in the initTool */
        for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -7989,19 +8102,25 @@ void bnx2x_free_mem(struct bnx2x *bp)
 
 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
 {
-       if (!CHIP_IS_E1x(bp))
+       if (!CHIP_IS_E1x(bp)) {
                /* size = the status block + ramrod buffers */
-               BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
-                               sizeof(struct host_hc_status_block_e2));
-       else
-               BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
-                               &bp->cnic_sb_mapping,
-                               sizeof(struct
-                                      host_hc_status_block_e1x));
+               bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+                                                   sizeof(struct host_hc_status_block_e2));
+               if (!bp->cnic_sb.e2_sb)
+                       goto alloc_mem_err;
+       } else {
+               bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+                                                    sizeof(struct host_hc_status_block_e1x));
+               if (!bp->cnic_sb.e1x_sb)
+                       goto alloc_mem_err;
+       }
 
-       if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
+       if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
                /* allocate searcher T2 table, as it wasn't allocated before */
-               BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+               bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+               if (!bp->t2)
+                       goto alloc_mem_err;
+       }
 
        /* write address to which L5 should insert its values */
        bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -8022,15 +8141,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
 {
        int i, allocated, context_size;
 
-       if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
+       if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
                /* allocate searcher T2 table */
-               BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+               bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+               if (!bp->t2)
+                       goto alloc_mem_err;
+       }
 
-       BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
-                       sizeof(struct host_sp_status_block));
+       bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+                                            sizeof(struct host_sp_status_block));
+       if (!bp->def_status_blk)
+               goto alloc_mem_err;
 
-       BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
-                       sizeof(struct bnx2x_slowpath));
+       bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+                                      sizeof(struct bnx2x_slowpath));
+       if (!bp->slowpath)
+               goto alloc_mem_err;
 
        /* Allocate memory for CDU context:
         * This memory is allocated separately and not in the generic ILT
@@ -8050,12 +8176,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
        for (i = 0, allocated = 0; allocated < context_size; i++) {
                bp->context[i].size = min(CDU_ILT_PAGE_SZ,
                                          (context_size - allocated));
-               BNX2X_PCI_ALLOC(bp->context[i].vcxt,
-                               &bp->context[i].cxt_mapping,
-                               bp->context[i].size);
+               bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+                                                     bp->context[i].size);
+               if (!bp->context[i].vcxt)
+                       goto alloc_mem_err;
                allocated += bp->context[i].size;
        }
-       BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+       bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+                                GFP_KERNEL);
+       if (!bp->ilt->lines)
+               goto alloc_mem_err;
 
        if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
                goto alloc_mem_err;
@@ -8064,11 +8194,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
                goto alloc_mem_err;
 
        /* Slow path ring */
-       BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+       bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+       if (!bp->spq)
+               goto alloc_mem_err;
 
        /* EQ */
-       BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
-                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
+       bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+                                     BCM_PAGE_SIZE * NUM_EQ_PAGES);
+       if (!bp->eq_ring)
+               goto alloc_mem_err;
 
        return 0;
 
@@ -8849,6 +8983,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp)
                synchronize_irq(bp->pdev->irq);
 
        flush_workqueue(bnx2x_wq);
+       flush_workqueue(bnx2x_iov_wq);
 
        while (bnx2x_func_get_state(bp, &bp->func_obj) !=
                                BNX2X_F_STATE_STARTED && tout--)
@@ -9774,6 +9909,10 @@ sp_rtnl_not_reset:
                bnx2x_dcbx_resume_hw_tx(bp);
        }
 
+       if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
+                              &bp->sp_rtnl_state))
+               bnx2x_update_mng_version(bp);
+
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
@@ -11724,12 +11863,15 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
+       mutex_init(&bp->drv_info_mutex);
+       bp->drv_info_mng_owner = false;
        spin_lock_init(&bp->stats_lock);
        sema_init(&bp->stats_sema, 1);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
        INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+       INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
        if (IS_PF(bp)) {
                rc = bnx2x_get_hwinfo(bp);
                if (rc)
@@ -11771,6 +11913,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 
        bp->disable_tpa = disable_tpa;
        bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+       /* Reduce memory usage in kdump environment by disabling TPA */
+       bp->disable_tpa |= reset_devices;
 
        /* Set TPA flags */
        if (bp->disable_tpa) {
@@ -11942,7 +12086,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
 {
        int mc_count = netdev_mc_count(bp->dev);
        struct bnx2x_mcast_list_elem *mc_mac =
-               kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+               kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
        struct netdev_hw_addr *ha;
 
        if (!mc_mac)
@@ -12064,11 +12208,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
                return;
        } else {
                /* Schedule an SP task to handle rest of change */
-               DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
-               smp_mb__before_clear_bit();
-               set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
-               smp_mb__after_clear_bit();
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
+               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+                                      NETIF_MSG_IFUP);
        }
 }
 
@@ -12101,11 +12242,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
                        /* configuring mcast to a vf involves sleeping (when we
                         * wait for the pf's response).
                         */
-                       smp_mb__before_clear_bit();
-                       set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
-                               &bp->sp_rtnl_state);
-                       smp_mb__after_clear_bit();
-                       schedule_delayed_work(&bp->sp_rtnl_task, 0);
+                       bnx2x_schedule_sp_rtnl(bp,
+                                              BNX2X_SP_RTNL_VFPF_MCAST, 0);
                }
        }
 
@@ -13356,11 +13494,18 @@ static int __init bnx2x_init(void)
                pr_err("Cannot create workqueue\n");
                return -ENOMEM;
        }
+       bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
+       if (!bnx2x_iov_wq) {
+               pr_err("Cannot create iov workqueue\n");
+               destroy_workqueue(bnx2x_wq);
+               return -ENOMEM;
+       }
 
        ret = pci_register_driver(&bnx2x_pci_driver);
        if (ret) {
                pr_err("Cannot register driver\n");
                destroy_workqueue(bnx2x_wq);
+               destroy_workqueue(bnx2x_iov_wq);
        }
        return ret;
 }
@@ -13372,6 +13517,7 @@ static void __exit bnx2x_cleanup(void)
        pci_unregister_driver(&bnx2x_pci_driver);
 
        destroy_workqueue(bnx2x_wq);
+       destroy_workqueue(bnx2x_iov_wq);
 
        /* Free globally allocated resources */
        list_for_each_safe(pos, q, &bnx2x_prev_list) {
@@ -13765,6 +13911,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                                REG_WR(bp, scratch_offset + i,
                                       *(host_addr + i/4));
                }
+               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
                break;
        }
 
@@ -13782,6 +13929,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
                                cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
                        SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
                }
+               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
                break;
        }
 
@@ -13887,6 +14035,9 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
 
        rcu_assign_pointer(bp->cnic_ops, ops);
 
+       /* Schedule driver to read CNIC driver versions */
+       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
+
        return 0;
 }
 
index 0fb6ff2ac8e3738372bcb95f4278ef600c2bd76b..31297266b743e27fa527da4636bc893b1c64cc56 100644 (file)
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
                         data->header.rule_cnt, p->rx_accept_flags,
                         p->tx_accept_flags);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
 
        /* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
                raw->clear_pending(raw);
                return 0;
        } else {
-               /* No need for an explicit memory barrier here as long we would
-                * need to ensure the ordering of writing to the SPQ element
+               /* No need for an explicit memory barrier here as long as we
+                * ensure the ordering of writing to the SPQ element
                 * and updating of the SPQ producer which involves a memory
-                * read and we will have to put a full memory barrier there
-                * (inside bnx2x_sp_post()).
+                * read. If the memory read is removed we will have to put a
+                * full memory barrier there (inside bnx2x_sp_post()).
                 */
 
                /* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
                raw->clear_pending(raw);
                return 0;
        } else {
-               /* No need for an explicit memory barrier here as long we would
-                * need to ensure the ordering of writing to the SPQ element
+               /* No need for an explicit memory barrier here as long as we
+                * ensure the ordering of writing to the SPQ element
                 * and updating of the SPQ producer which involves a memory
-                * read and we will have to put a full memory barrier there
-                * (inside bnx2x_sp_post()).
+                * read. If the memory read is removed we will have to put a
+                * full memory barrier there (inside bnx2x_sp_post()).
                 */
 
                /* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
                data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
        }
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
 
        /* Send a ramrod */
@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
        rss_obj->config_rss = bnx2x_setup_rss;
 }
 
-int validate_vlan_mac(struct bnx2x *bp,
-                     struct bnx2x_vlan_mac_obj *vlan_mac)
-{
-       if (!vlan_mac->get_n_elements) {
-               BNX2X_ERR("vlan mac object was not intialized\n");
-               return -EINVAL;
-       }
-       return 0;
-}
-
 /********************** Queue state object ***********************************/
 
 /**
@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
        /* Fill the ramrod data */
        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
        bnx2x_q_fill_setup_data_e2(bp, params, rdata);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
                         o->cids[cid_index], rdata->general.client_id,
                         rdata->general.sp_client_id, rdata->general.cos);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
        /* Fill the ramrod data */
        bnx2x_q_fill_update_data(bp, o, update_params, rdata);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
                             o->cids[cid_index], U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
        return bnx2x_q_send_update(bp, params);
 }
 
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+                               struct bnx2x_queue_sp_obj *obj,
+                               struct bnx2x_queue_update_tpa_params *params,
+                               struct tpa_update_ramrod_data *data)
+{
+       data->client_id = obj->cl_id;
+       data->complete_on_both_clients = params->complete_on_both_clients;
+       data->dont_verify_rings_pause_thr_flg =
+               params->dont_verify_thr;
+       data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+       data->max_sges_for_packet = params->max_sges_pkt;
+       data->max_tpa_queues = params->max_tpa_queues;
+       data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+       data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+       data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+       data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+       data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+       data->tpa_mode = params->tpa_mode;
+       data->update_ipv4 = params->update_ipv4;
+       data->update_ipv6 = params->update_ipv6;
+}
+
 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
                                        struct bnx2x_queue_state_params *params)
 {
-       /* TODO: Not implemented yet. */
-       return -1;
+       struct bnx2x_queue_sp_obj *o = params->q_obj;
+       struct tpa_update_ramrod_data *rdata =
+               (struct tpa_update_ramrod_data *)o->rdata;
+       dma_addr_t data_mapping = o->rdata_mapping;
+       struct bnx2x_queue_update_tpa_params *update_tpa_params =
+               &params->params.update_tpa;
+       u16 type;
+
+       /* Clear the ramrod data */
+       memset(rdata, 0, sizeof(*rdata));
+
+       /* Fill the ramrod data */
+       bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+       /* Add the function id inside the type, so that sp post function
+        * doesn't automatically add the PF func-id, this is required
+        * for operations done by PFs on behalf of their VFs
+        */
+       type = ETH_CONNECTION_TYPE |
+               ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
+        */
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+                            o->cids[BNX2X_PRIMARY_CID_INDEX],
+                            U64_HI(data_mapping),
+                            U64_LO(data_mapping), type);
 }
 
 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
        rdata->tx_switch_suspend = switch_update_params->suspend;
        rdata->echo = SWITCH_UPDATE;
 
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
+        */
        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
        rdata->allowed_priorities = afex_update_params->allowed_priorities;
        rdata->echo = AFEX_UPDATE;
 
-       /*  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
        DP(BNX2X_MSG_SP,
           "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
                rdata->traffic_type_to_priority_cos[i] =
                        tx_start_params->traffic_type_to_priority_cos[i];
 
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
+        */
        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
index 00d7f214a40a2caf2619fac0343ad3798b4bdaed..80f6c790ed88097ed17b3c3f259179e86451eff2 100644 (file)
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
        u8              cid_index;
 };
 
+struct bnx2x_queue_update_tpa_params {
+       dma_addr_t sge_map;
+       u8 update_ipv4;
+       u8 update_ipv6;
+       u8 max_tpa_queues;
+       u8 max_sges_pkt;
+       u8 complete_on_both_clients;
+       u8 dont_verify_thr;
+       u8 tpa_mode;
+       u8 _pad;
+
+       u16 sge_buff_sz;
+       u16 max_agg_sz;
+
+       u16 sge_pause_thr_low;
+       u16 sge_pause_thr_high;
+};
+
 struct rxq_pause_params {
        u16             bd_th_lo;
        u16             bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
        /* Params according to the current command */
        union {
                struct bnx2x_queue_update_params        update;
+               struct bnx2x_queue_update_tpa_params    update_tpa;
                struct bnx2x_queue_setup_params         setup;
                struct bnx2x_queue_init_params          init;
                struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
                             u8 *ind_table);
 
-int validate_vlan_mac(struct bnx2x *bp,
-                     struct bnx2x_vlan_mac_obj *vlan_mac);
 #endif /* BNX2X_SP_VERBS */
index e42f48df6e943e9ab21a34a5e3a7a49c43a726c8..5c523b32db70126720dbf0b2914dcbb1a3391a2b 100644 (file)
@@ -102,82 +102,22 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
        mmiowb();
        barrier();
 }
-/* VFOP - VF slow-path operation support */
 
-#define BNX2X_VFOP_FILTER_ADD_CNT_MAX          0x10000
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+                                      struct bnx2x_virtf *vf,
+                                      bool print_err)
+{
+       if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+               if (print_err)
+                       BNX2X_ERR("Slowpath objects not yet initialized!\n");
+               else
+                       DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+               return false;
+       }
+       return true;
+}
 
 /* VFOP operations states */
-enum bnx2x_vfop_qctor_state {
-          BNX2X_VFOP_QCTOR_INIT,
-          BNX2X_VFOP_QCTOR_SETUP,
-          BNX2X_VFOP_QCTOR_INT_EN
-};
-
-enum bnx2x_vfop_qdtor_state {
-          BNX2X_VFOP_QDTOR_HALT,
-          BNX2X_VFOP_QDTOR_TERMINATE,
-          BNX2X_VFOP_QDTOR_CFCDEL,
-          BNX2X_VFOP_QDTOR_DONE
-};
-
-enum bnx2x_vfop_vlan_mac_state {
-          BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
-          BNX2X_VFOP_VLAN_MAC_CLEAR,
-          BNX2X_VFOP_VLAN_MAC_CHK_DONE,
-          BNX2X_VFOP_MAC_CONFIG_LIST,
-          BNX2X_VFOP_VLAN_CONFIG_LIST,
-          BNX2X_VFOP_VLAN_CONFIG_LIST_0
-};
-
-enum bnx2x_vfop_qsetup_state {
-          BNX2X_VFOP_QSETUP_CTOR,
-          BNX2X_VFOP_QSETUP_VLAN0,
-          BNX2X_VFOP_QSETUP_DONE
-};
-
-enum bnx2x_vfop_mcast_state {
-          BNX2X_VFOP_MCAST_DEL,
-          BNX2X_VFOP_MCAST_ADD,
-          BNX2X_VFOP_MCAST_CHK_DONE
-};
-enum bnx2x_vfop_qflr_state {
-          BNX2X_VFOP_QFLR_CLR_VLAN,
-          BNX2X_VFOP_QFLR_CLR_MAC,
-          BNX2X_VFOP_QFLR_TERMINATE,
-          BNX2X_VFOP_QFLR_DONE
-};
-
-enum bnx2x_vfop_flr_state {
-          BNX2X_VFOP_FLR_QUEUES,
-          BNX2X_VFOP_FLR_HW
-};
-
-enum bnx2x_vfop_close_state {
-          BNX2X_VFOP_CLOSE_QUEUES,
-          BNX2X_VFOP_CLOSE_HW
-};
-
-enum bnx2x_vfop_rxmode_state {
-          BNX2X_VFOP_RXMODE_CONFIG,
-          BNX2X_VFOP_RXMODE_DONE
-};
-
-enum bnx2x_vfop_qteardown_state {
-          BNX2X_VFOP_QTEARDOWN_RXMODE,
-          BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
-          BNX2X_VFOP_QTEARDOWN_CLR_MAC,
-          BNX2X_VFOP_QTEARDOWN_CLR_MCAST,
-          BNX2X_VFOP_QTEARDOWN_QDTOR,
-          BNX2X_VFOP_QTEARDOWN_DONE
-};
-
-enum bnx2x_vfop_rss_state {
-          BNX2X_VFOP_RSS_CONFIG,
-          BNX2X_VFOP_RSS_DONE
-};
-
-#define bnx2x_vfop_reset_wq(vf)        atomic_set(&vf->op_in_progress, 0)
-
 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
                              struct bnx2x_queue_init_params *init_params,
                              struct bnx2x_queue_setup_params *setup_params,
@@ -221,7 +161,7 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
                           struct bnx2x_virtf *vf,
                           struct bnx2x_vf_queue *q,
-                          struct bnx2x_vfop_qctor_params *p,
+                          struct bnx2x_vf_queue_construct_params *p,
                           unsigned long q_type)
 {
        struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
@@ -290,191 +230,85 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
        }
 }
 
-/* VFOP queue construction */
-static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_create(struct bnx2x *bp,
+                                struct bnx2x_virtf *vf, int qid,
+                                struct bnx2x_vf_queue_construct_params *qctor)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
-       struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
-       enum bnx2x_vfop_qctor_state state = vfop->state;
-
-       bnx2x_vfop_reset_wq(vf);
-
-       if (vfop->rc < 0)
-               goto op_err;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       switch (state) {
-       case BNX2X_VFOP_QCTOR_INIT:
-
-               /* has this queue already been opened? */
-               if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
-                   BNX2X_Q_LOGICAL_STATE_ACTIVE) {
-                       DP(BNX2X_MSG_IOV,
-                          "Entered qctor but queue was already up. Aborting gracefully\n");
-                       goto op_done;
-               }
-
-               /* next state */
-               vfop->state = BNX2X_VFOP_QCTOR_SETUP;
-
-               q_params->cmd = BNX2X_Q_CMD_INIT;
-               vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
-       case BNX2X_VFOP_QCTOR_SETUP:
-               /* next state */
-               vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
-
-               /* copy pre-prepared setup params to the queue-state params */
-               vfop->op_p->qctor.qstate.params.setup =
-                       vfop->op_p->qctor.prep_qsetup;
-
-               q_params->cmd = BNX2X_Q_CMD_SETUP;
-               vfop->rc = bnx2x_queue_state_change(bp, q_params);
+       struct bnx2x_queue_state_params *q_params;
+       int rc = 0;
 
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+       DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 
-       case BNX2X_VFOP_QCTOR_INT_EN:
+       /* Prepare ramrod information */
+       q_params = &qctor->qstate;
+       q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+       set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
 
-               /* enable interrupts */
-               bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
-                                   USTORM_ID, 0, IGU_INT_ENABLE, 0);
-               goto op_done;
-       default:
-               bnx2x_vfop_default(state);
+       if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+           BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+               DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
+               goto out;
        }
-op_err:
-       BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
-                 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
-op_done:
-       bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
-       return;
-}
 
-static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
-                               struct bnx2x_virtf *vf,
-                               struct bnx2x_vfop_cmd *cmd,
-                               int qid)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
-       if (vfop) {
-               vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+       /* Run Queue 'construction' ramrods */
+       q_params->cmd = BNX2X_Q_CMD_INIT;
+       rc = bnx2x_queue_state_change(bp, q_params);
+       if (rc)
+               goto out;
 
-               vfop->args.qctor.qid = qid;
-               vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
+       memcpy(&q_params->params.setup, &qctor->prep_qsetup,
+              sizeof(struct bnx2x_queue_setup_params));
+       q_params->cmd = BNX2X_Q_CMD_SETUP;
+       rc = bnx2x_queue_state_change(bp, q_params);
+       if (rc)
+               goto out;
 
-               bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
-                                bnx2x_vfop_qctor, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
-                                            cmd->block);
-       }
-       return -ENOMEM;
+       /* enable interrupts */
+       bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
+                           USTORM_ID, 0, IGU_INT_ENABLE, 0);
+out:
+       return rc;
 }
 
-/* VFOP queue destruction */
-static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                 int qid)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
-       struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
-       enum bnx2x_vfop_qdtor_state state = vfop->state;
-
-       bnx2x_vfop_reset_wq(vf);
-
-       if (vfop->rc < 0)
-               goto op_err;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       switch (state) {
-       case BNX2X_VFOP_QDTOR_HALT:
-
-               /* has this queue already been stopped? */
-               if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
-                   BNX2X_Q_LOGICAL_STATE_STOPPED) {
-                       DP(BNX2X_MSG_IOV,
-                          "Entered qdtor but queue was already stopped. Aborting gracefully\n");
-
-                       /* next state */
-                       vfop->state = BNX2X_VFOP_QDTOR_DONE;
-
-                       bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-               }
-
-               /* next state */
-               vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
-
-               q_params->cmd = BNX2X_Q_CMD_HALT;
-               vfop->rc = bnx2x_queue_state_change(bp, q_params);
-
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
-       case BNX2X_VFOP_QDTOR_TERMINATE:
-               /* next state */
-               vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
-
-               q_params->cmd = BNX2X_Q_CMD_TERMINATE;
-               vfop->rc = bnx2x_queue_state_change(bp, q_params);
+       enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
+                                      BNX2X_Q_CMD_TERMINATE,
+                                      BNX2X_Q_CMD_CFC_DEL};
+       struct bnx2x_queue_state_params q_params;
+       int rc, i;
 
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 
-       case BNX2X_VFOP_QDTOR_CFCDEL:
-               /* next state */
-               vfop->state = BNX2X_VFOP_QDTOR_DONE;
+       /* Prepare ramrod information */
+       memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
+       q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+       set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 
-               q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
-               vfop->rc = bnx2x_queue_state_change(bp, q_params);
+       if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
+           BNX2X_Q_LOGICAL_STATE_STOPPED) {
+               DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
+               goto out;
+       }
 
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
-       BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
-                 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
-op_done:
-       case BNX2X_VFOP_QDTOR_DONE:
-               /* invalidate the context */
-               if (qdtor->cxt) {
-                       qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
-                       qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+       /* Run Queue 'destruction' ramrods */
+       for (i = 0; i < ARRAY_SIZE(cmds); i++) {
+               q_params.cmd = cmds[i];
+               rc = bnx2x_queue_state_change(bp, &q_params);
+               if (rc) {
+                       BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
+                       return rc;
                }
-               bnx2x_vfop_end(bp, vf, vfop);
-               return;
-       default:
-               bnx2x_vfop_default(state);
        }
-op_pending:
-       return;
-}
-
-static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
-                               struct bnx2x_virtf *vf,
-                               struct bnx2x_vfop_cmd *cmd,
-                               int qid)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
-       if (vfop) {
-               struct bnx2x_queue_state_params *qstate =
-                       &vf->op_params.qctor.qstate;
-
-               memset(qstate, 0, sizeof(*qstate));
-               qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
-
-               vfop->args.qdtor.qid = qid;
-               vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
-
-               bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
-                                bnx2x_vfop_qdtor, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
-                                            cmd->block);
-       } else {
-               BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
-               return -ENOMEM;
+out:
+       /* Clean Context */
+       if (bnx2x_vfq(vf, qid, cxt)) {
+               bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
+               bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
        }
+
+       return 0;
 }
 
 static void
@@ -496,751 +330,293 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
        BP_VFDB(bp)->vf_sbs_pool++;
 }
 
-/* VFOP MAC/VLAN helpers */
-static inline void bnx2x_vfop_credit(struct bnx2x *bp,
-                                    struct bnx2x_vfop *vfop,
-                                    struct bnx2x_vlan_mac_obj *obj)
+static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
+                                       struct bnx2x_vlan_mac_obj *obj,
+                                       atomic_t *counter)
 {
-       struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
-
-       /* update credit only if there is no error
-        * and a valid credit counter
-        */
-       if (!vfop->rc && args->credit) {
-               struct list_head *pos;
-               int read_lock;
-               int cnt = 0;
-
-               read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
-               if (read_lock)
-                       DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
+       struct list_head *pos;
+       int read_lock;
+       int cnt = 0;
 
-               list_for_each(pos, &obj->head)
-                       cnt++;
+       read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+       if (read_lock)
+               DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
 
-               if (!read_lock)
-                       bnx2x_vlan_mac_h_read_unlock(bp, obj);
+       list_for_each(pos, &obj->head)
+               cnt++;
 
-               atomic_set(args->credit, cnt);
-       }
-}
-
-static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
-                                   struct bnx2x_vfop_filter *pos,
-                                   struct bnx2x_vlan_mac_data *user_req)
-{
-       user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
-               BNX2X_VLAN_MAC_DEL;
+       if (!read_lock)
+               bnx2x_vlan_mac_h_read_unlock(bp, obj);
 
-       switch (pos->type) {
-       case BNX2X_VFOP_FILTER_MAC:
-               memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
-               break;
-       case BNX2X_VFOP_FILTER_VLAN:
-               user_req->u.vlan.vlan = pos->vid;
-               break;
-       default:
-               BNX2X_ERR("Invalid filter type, skipping\n");
-               return 1;
-       }
-       return 0;
+       atomic_set(counter, cnt);
 }
 
-static int bnx2x_vfop_config_list(struct bnx2x *bp,
-                                 struct bnx2x_vfop_filters *filters,
-                                 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
+static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                  int qid, bool drv_only, bool mac)
 {
-       struct bnx2x_vfop_filter *pos, *tmp;
-       struct list_head rollback_list, *filters_list = &filters->head;
-       struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
-       int rc = 0, cnt = 0;
-
-       INIT_LIST_HEAD(&rollback_list);
-
-       list_for_each_entry_safe(pos, tmp, filters_list, link) {
-               if (bnx2x_vfop_set_user_req(bp, pos, user_req))
-                       continue;
+       struct bnx2x_vlan_mac_ramrod_params ramrod;
+       int rc;
 
-               rc = bnx2x_config_vlan_mac(bp, vlan_mac);
-               if (rc >= 0) {
-                       cnt += pos->add ? 1 : -1;
-                       list_move(&pos->link, &rollback_list);
-                       rc = 0;
-               } else if (rc == -EEXIST) {
-                       rc = 0;
-               } else {
-                       BNX2X_ERR("Failed to add a new vlan_mac command\n");
-                       break;
-               }
-       }
+       DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
+          mac ? "MACs" : "VLANs");
 
-       /* rollback if error or too many rules added */
-       if (rc || cnt > filters->add_cnt) {
-               BNX2X_ERR("error or too many rules added. Performing rollback\n");
-               list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
-                       pos->add = !pos->add;   /* reverse op */
-                       bnx2x_vfop_set_user_req(bp, pos, user_req);
-                       bnx2x_config_vlan_mac(bp, vlan_mac);
-                       list_del(&pos->link);
-               }
-               cnt = 0;
-               if (!rc)
-                       rc = -EINVAL;
+       /* Prepare ramrod params */
+       memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+       if (mac) {
+               set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+       } else {
+               set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+                       &ramrod.user_req.vlan_mac_flags);
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
        }
-       filters->add_cnt = cnt;
-       return rc;
-}
-
-/* VFOP set VLAN/MAC */
-static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
-       struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
-       struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
-
-       enum bnx2x_vfop_vlan_mac_state state = vfop->state;
-
-       if (vfop->rc < 0)
-               goto op_err;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       bnx2x_vfop_reset_wq(vf);
-
-       switch (state) {
-       case BNX2X_VFOP_VLAN_MAC_CLEAR:
-               /* next state */
-               vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
-               /* do delete */
-               vfop->rc = obj->delete_all(bp, obj,
-                                          &vlan_mac->user_req.vlan_mac_flags,
-                                          &vlan_mac->ramrod_flags);
-
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
-       case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
-               /* next state */
-               vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
-               /* do config */
-               vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
-               if (vfop->rc == -EEXIST)
-                       vfop->rc = 0;
+       ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
-       case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
-               vfop->rc = !!obj->raw.check_pending(&obj->raw);
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
-       case BNX2X_VFOP_MAC_CONFIG_LIST:
-               /* next state */
-               vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
-               /* do list config */
-               vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
-               if (vfop->rc)
-                       goto op_err;
-
-               set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
-               vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
-       case BNX2X_VFOP_VLAN_CONFIG_LIST:
-               /* next state */
-               vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
-               /* do list config */
-               vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
-               if (!vfop->rc) {
-                       set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
-                       vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
-               }
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+       set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+       if (drv_only)
+               set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+       else
+               set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 
-       default:
-               bnx2x_vfop_default(state);
+       /* Start deleting */
+       rc = ramrod.vlan_mac_obj->delete_all(bp,
+                                            ramrod.vlan_mac_obj,
+                                            &ramrod.user_req.vlan_mac_flags,
+                                            &ramrod.ramrod_flags);
+       if (rc) {
+               BNX2X_ERR("Failed to delete all %s\n",
+                         mac ? "MACs" : "VLANs");
+               return rc;
        }
-op_err:
-       BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
-op_done:
-       kfree(filters);
-       bnx2x_vfop_credit(bp, vfop, obj);
-       bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
-       return;
-}
-
-struct bnx2x_vfop_vlan_mac_flags {
-       bool drv_only;
-       bool dont_consume;
-       bool single_cmd;
-       bool add;
-};
-
-static void
-bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
-                               struct bnx2x_vfop_vlan_mac_flags *flags)
-{
-       struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
-
-       memset(ramrod, 0, sizeof(*ramrod));
 
-       /* ramrod flags */
-       if (flags->drv_only)
-               set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
-       if (flags->single_cmd)
-               set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
+       /* Clear the vlan counters */
+       if (!mac)
+               atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
 
-       /* mac_vlan flags */
-       if (flags->dont_consume)
-               set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
-
-       /* cmd */
-       ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
-}
-
-static inline void
-bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
-                          struct bnx2x_vfop_vlan_mac_flags *flags)
-{
-       bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
-       set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
+       return 0;
 }
 
-static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
-                                    struct bnx2x_virtf *vf,
-                                    struct bnx2x_vfop_cmd *cmd,
-                                    int qid, bool drv_only)
+static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
+                                   struct bnx2x_virtf *vf, int qid,
+                                   struct bnx2x_vf_mac_vlan_filter *filter,
+                                   bool drv_only)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+       struct bnx2x_vlan_mac_ramrod_params ramrod;
        int rc;
 
-       if (vfop) {
-               struct bnx2x_vfop_args_filters filters = {
-                       .multi_filter = NULL,   /* single */
-                       .credit = NULL,         /* consume credit */
-               };
-               struct bnx2x_vfop_vlan_mac_flags flags = {
-                       .drv_only = drv_only,
-                       .dont_consume = (filters.credit != NULL),
-                       .single_cmd = true,
-                       .add = false /* don't care */,
-               };
-               struct bnx2x_vlan_mac_ramrod_params *ramrod =
-                       &vf->op_params.vlan_mac;
-
-               /* set ramrod params */
-               bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
-               /* set object */
-               rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
-               if (rc)
-                       return rc;
-               ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
-
-               /* set extra args */
-               vfop->args.filters = filters;
-
-               bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
-                                bnx2x_vfop_vlan_mac, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
-                                            cmd->block);
+       DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
+          vf->abs_vfid, filter->add ? "Adding" : "Deleting",
+          filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+
+       /* Prepare ramrod params */
+       memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
+       if (filter->type == BNX2X_VF_FILTER_VLAN) {
+               set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+                       &ramrod.user_req.vlan_mac_flags);
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+               ramrod.user_req.u.vlan.vlan = filter->vid;
+       } else {
+               set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+               memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+       }
+       ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
+                                           BNX2X_VLAN_MAC_DEL;
+
+       /* Verify there are available vlan credits */
+       if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
+           (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
+            vf_vlan_rules_cnt(vf))) {
+               BNX2X_ERR("No credits for vlan\n");
+               return -ENOMEM;
        }
-       return -ENOMEM;
-}
-
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
-                           struct bnx2x_virtf *vf,
-                           struct bnx2x_vfop_cmd *cmd,
-                           struct bnx2x_vfop_filters *macs,
-                           int qid, bool drv_only)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-       int rc;
 
-       if (vfop) {
-               struct bnx2x_vfop_args_filters filters = {
-                       .multi_filter = macs,
-                       .credit = NULL,         /* consume credit */
-               };
-               struct bnx2x_vfop_vlan_mac_flags flags = {
-                       .drv_only = drv_only,
-                       .dont_consume = (filters.credit != NULL),
-                       .single_cmd = false,
-                       .add = false, /* don't care since only the items in the
-                                      * filters list affect the sp operation,
-                                      * not the list itself
-                                      */
-               };
-               struct bnx2x_vlan_mac_ramrod_params *ramrod =
-                       &vf->op_params.vlan_mac;
-
-               /* set ramrod params */
-               bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
-
-               /* set object */
-               rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
-               if (rc)
-                       return rc;
-               ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+       set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
+       if (drv_only)
+               set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
+       else
+               set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+
+       /* Add/Remove the filter */
+       rc = bnx2x_config_vlan_mac(bp, &ramrod);
+       if (rc && rc != -EEXIST) {
+               BNX2X_ERR("Failed to %s %s\n",
+                         filter->add ? "add" : "delete",
+                         filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
+                                                               "VLAN");
+               return rc;
+       }
 
-               /* set extra args */
-               filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
-               vfop->args.filters = filters;
+       /* Update the vlan counters */
+       if (filter->type == BNX2X_VF_FILTER_VLAN)
+               bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
+                                    &bnx2x_vfq(vf, qid, vlan_count));
 
-               bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
-                                bnx2x_vfop_vlan_mac, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
-                                            cmd->block);
-       }
-       return -ENOMEM;
+       return 0;
 }
 
-static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
-                                  struct bnx2x_virtf *vf,
-                                  struct bnx2x_vfop_cmd *cmd,
-                                  int qid, u16 vid, bool add)
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                 struct bnx2x_vf_mac_vlan_filters *filters,
+                                 int qid, bool drv_only)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-       int rc;
+       int rc = 0, i;
 
-       if (vfop) {
-               struct bnx2x_vfop_args_filters filters = {
-                       .multi_filter = NULL, /* single command */
-                       .credit = &bnx2x_vfq(vf, qid, vlan_count),
-               };
-               struct bnx2x_vfop_vlan_mac_flags flags = {
-                       .drv_only = false,
-                       .dont_consume = (filters.credit != NULL),
-                       .single_cmd = true,
-                       .add = add,
-               };
-               struct bnx2x_vlan_mac_ramrod_params *ramrod =
-                       &vf->op_params.vlan_mac;
-
-               /* set ramrod params */
-               bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-               ramrod->user_req.u.vlan.vlan = vid;
-
-               /* set object */
-               rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
-               if (rc)
-                       return rc;
-               ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 
-               /* set extra args */
-               vfop->args.filters = filters;
+       if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+               return -EINVAL;
 
-               bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
-                                bnx2x_vfop_vlan_mac, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
-                                            cmd->block);
+       /* Prepare ramrod params */
+       for (i = 0; i < filters->count; i++) {
+               rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
+                                             &filters->filters[i], drv_only);
+               if (rc)
+                       break;
        }
-       return -ENOMEM;
-}
-
-static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
-                              struct bnx2x_virtf *vf,
-                              struct bnx2x_vfop_cmd *cmd,
-                              int qid, bool drv_only)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-       int rc;
 
-       if (vfop) {
-               struct bnx2x_vfop_args_filters filters = {
-                       .multi_filter = NULL, /* single command */
-                       .credit = &bnx2x_vfq(vf, qid, vlan_count),
-               };
-               struct bnx2x_vfop_vlan_mac_flags flags = {
-                       .drv_only = drv_only,
-                       .dont_consume = (filters.credit != NULL),
-                       .single_cmd = true,
-                       .add = false, /* don't care */
-               };
-               struct bnx2x_vlan_mac_ramrod_params *ramrod =
-                       &vf->op_params.vlan_mac;
-
-               /* set ramrod params */
-               bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-
-               /* set object */
-               rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
-               if (rc)
-                       return rc;
-               ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+       /* Rollback if needed */
+       if (i != filters->count) {
+               BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
+                         i, filters->count + 1);
+               while (--i >= 0) {
+                       filters->filters[i].add = !filters->filters[i].add;
+                       bnx2x_vf_mac_vlan_config(bp, vf, qid,
+                                                &filters->filters[i],
+                                                drv_only);
+               }
+       }
 
-               /* set extra args */
-               vfop->args.filters = filters;
+       /* It's our responsibility to free the filters */
+       kfree(filters);
 
-               bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
-                                bnx2x_vfop_vlan_mac, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
-                                            cmd->block);
-       }
-       return -ENOMEM;
+       return rc;
 }
 
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
-                            struct bnx2x_virtf *vf,
-                            struct bnx2x_vfop_cmd *cmd,
-                            struct bnx2x_vfop_filters *vlans,
-                            int qid, bool drv_only)
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+                        struct bnx2x_vf_queue_construct_params *qctor)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
        int rc;
 
-       if (vfop) {
-               struct bnx2x_vfop_args_filters filters = {
-                       .multi_filter = vlans,
-                       .credit = &bnx2x_vfq(vf, qid, vlan_count),
-               };
-               struct bnx2x_vfop_vlan_mac_flags flags = {
-                       .drv_only = drv_only,
-                       .dont_consume = (filters.credit != NULL),
-                       .single_cmd = false,
-                       .add = false, /* don't care */
-               };
-               struct bnx2x_vlan_mac_ramrod_params *ramrod =
-                       &vf->op_params.vlan_mac;
-
-               /* set ramrod params */
-               bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
-
-               /* set object */
-               rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
-               if (rc)
-                       return rc;
-               ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
-
-               /* set extra args */
-               filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
-                       atomic_read(filters.credit);
-
-               vfop->args.filters = filters;
+       DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 
-               bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
-                                bnx2x_vfop_vlan_mac, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
-                                            cmd->block);
-       }
-       return -ENOMEM;
-}
-
-/* VFOP queue setup (queue constructor + set vlan 0) */
-static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       int qid = vfop->args.qctor.qid;
-       enum bnx2x_vfop_qsetup_state state = vfop->state;
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vfop_qsetup,
-               .block = false,
-       };
-
-       if (vfop->rc < 0)
+       rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
+       if (rc)
                goto op_err;
 
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+       /* Configure vlan0 for leading queue */
+       if (!qid) {
+               struct bnx2x_vf_mac_vlan_filter filter;
 
-       switch (state) {
-       case BNX2X_VFOP_QSETUP_CTOR:
-               /* init the queue ctor command */
-               vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
-               vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
-               if (vfop->rc)
+               memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
+               filter.type = BNX2X_VF_FILTER_VLAN;
+               filter.add = true;
+               filter.vid = 0;
+               rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
+               if (rc)
                        goto op_err;
-               return;
-
-       case BNX2X_VFOP_QSETUP_VLAN0:
-               /* skip if non-leading or FPGA/EMU*/
-               if (qid)
-                       goto op_done;
+       }
 
-               /* init the queue set-vlan command (for vlan 0) */
-               vfop->state = BNX2X_VFOP_QSETUP_DONE;
-               vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
-               if (vfop->rc)
-                       goto op_err;
-               return;
+       /* Schedule the configuration of any pending vlan filters */
+       vf->cfg_flags |= VF_CFG_VLAN;
+       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+                              BNX2X_MSG_IOV);
+       return 0;
 op_err:
-       BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
-op_done:
-       case BNX2X_VFOP_QSETUP_DONE:
-               vf->cfg_flags |= VF_CFG_VLAN;
-               smp_mb__before_clear_bit();
-               set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
-                       &bp->sp_rtnl_state);
-               smp_mb__after_clear_bit();
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
-               bnx2x_vfop_end(bp, vf, vfop);
-               return;
-       default:
-               bnx2x_vfop_default(state);
-       }
+       BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+       return rc;
 }
 
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
-                         struct bnx2x_virtf *vf,
-                         struct bnx2x_vfop_cmd *cmd,
-                         int qid)
+static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                              int qid)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+       int rc;
 
-       if (vfop) {
-               vfop->args.qctor.qid = qid;
+       DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 
-               bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
-                                bnx2x_vfop_qsetup, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
-                                            cmd->block);
+       /* If needed, clean the filtering data base */
+       if ((qid == LEADING_IDX) &&
+           bnx2x_validate_vf_sp_objs(bp, vf, false)) {
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+               if (rc)
+                       goto op_err;
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+               if (rc)
+                       goto op_err;
        }
-       return -ENOMEM;
-}
-
-/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
-static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       int qid = vfop->args.qx.qid;
-       enum bnx2x_vfop_qflr_state state = vfop->state;
-       struct bnx2x_queue_state_params *qstate;
-       struct bnx2x_vfop_cmd cmd;
-
-       bnx2x_vfop_reset_wq(vf);
-
-       if (vfop->rc < 0)
-               goto op_err;
 
-       DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
+       /* Terminate queue */
+       if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
+               struct bnx2x_queue_state_params qstate;
 
-       cmd.done = bnx2x_vfop_qflr;
-       cmd.block = false;
-
-       switch (state) {
-       case BNX2X_VFOP_QFLR_CLR_VLAN:
-               /* vlan-clear-all: driver-only, don't consume credit */
-               vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
-
-               if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
-                       /* the vlan_mac vfop will re-schedule us */
-                       vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
-                                                             qid, true);
-                       if (vfop->rc)
-                               goto op_err;
-                       return;
-
-               } else {
-                       /* need to reschedule ourselves */
-                       bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-               }
-
-       case BNX2X_VFOP_QFLR_CLR_MAC:
-               /* mac-clear-all: driver only consume credit */
-               vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
-               if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
-                       /* the vlan_mac vfop will re-schedule us */
-                       vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
-                                                            qid, true);
-                       if (vfop->rc)
-                               goto op_err;
-                       return;
-
-               } else {
-                       /* need to reschedule ourselves */
-                       bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-               }
-
-       case BNX2X_VFOP_QFLR_TERMINATE:
-               qstate = &vfop->op_p->qctor.qstate;
-               memset(qstate , 0, sizeof(*qstate));
-               qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
-               vfop->state = BNX2X_VFOP_QFLR_DONE;
-
-               DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
-                  vf->abs_vfid, qstate->q_obj->state);
-
-               if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
-                       qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
-                       qstate->cmd = BNX2X_Q_CMD_TERMINATE;
-                       vfop->rc = bnx2x_queue_state_change(bp, qstate);
-                       bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
-               } else {
-                       goto op_done;
-               }
-
-op_err:
-       BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
-                 vf->abs_vfid, qid, vfop->rc);
-op_done:
-       case BNX2X_VFOP_QFLR_DONE:
-               bnx2x_vfop_end(bp, vf, vfop);
-               return;
-       default:
-               bnx2x_vfop_default(state);
+               memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+               qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+               qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
+               qstate.cmd = BNX2X_Q_CMD_TERMINATE;
+               set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
+               rc = bnx2x_queue_state_change(bp, &qstate);
+               if (rc)
+                       goto op_err;
        }
-op_pending:
-       return;
-}
-
-static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
-                              struct bnx2x_virtf *vf,
-                              struct bnx2x_vfop_cmd *cmd,
-                              int qid)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
 
-       if (vfop) {
-               vfop->args.qx.qid = qid;
-               bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
-                                bnx2x_vfop_qflr, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
-                                            cmd->block);
-       }
-       return -ENOMEM;
+       return 0;
+op_err:
+       BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
+       return rc;
 }
 
-/* VFOP multi-casts */
-static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                  bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
-       struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
-       struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
-       enum bnx2x_vfop_mcast_state state = vfop->state;
-       int i;
-
-       bnx2x_vfop_reset_wq(vf);
-
-       if (vfop->rc < 0)
-               goto op_err;
+       struct bnx2x_mcast_list_elem *mc = NULL;
+       struct bnx2x_mcast_ramrod_params mcast;
+       int rc, i;
 
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       switch (state) {
-       case BNX2X_VFOP_MCAST_DEL:
-               /* clear existing mcasts */
-               vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD
-                                            : BNX2X_VFOP_MCAST_CHK_DONE;
-               mcast->mcast_list_len = vf->mcast_list_len;
-               vf->mcast_list_len = args->mc_num;
-               vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
-
-       case BNX2X_VFOP_MCAST_ADD:
-               if (raw->check_pending(raw))
-                       goto op_pending;
-
-               /* update mcast list on the ramrod params */
-               INIT_LIST_HEAD(&mcast->mcast_list);
-               for (i = 0; i < args->mc_num; i++)
-                       list_add_tail(&(args->mc[i].link),
-                                     &mcast->mcast_list);
-               mcast->mcast_list_len = args->mc_num;
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 
-               /* add new mcasts */
-               vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
-               vfop->rc = bnx2x_config_mcast(bp, mcast,
-                                             BNX2X_MCAST_CMD_ADD);
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-
-       case BNX2X_VFOP_MCAST_CHK_DONE:
-               vfop->rc = raw->check_pending(raw) ? 1 : 0;
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-       default:
-               bnx2x_vfop_default(state);
+       /* Prepare Multicast command */
+       memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
+       mcast.mcast_obj = &vf->mcast_obj;
+       if (drv_only)
+               set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
+       else
+               set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
+       if (mc_num) {
+               mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+                            GFP_KERNEL);
+               if (!mc) {
+                       BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
+                       return -ENOMEM;
+               }
        }
-op_err:
-       BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
-op_done:
-       kfree(args->mc);
-       bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
-       return;
-}
 
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
-                        struct bnx2x_virtf *vf,
-                        struct bnx2x_vfop_cmd *cmd,
-                        bnx2x_mac_addr_t *mcasts,
-                        int mcast_num, bool drv_only)
-{
-       struct bnx2x_vfop *vfop = NULL;
-       size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
-       struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
-                                          NULL;
-
-       if (!mc_sz || mc) {
-               vfop = bnx2x_vfop_add(bp, vf);
-               if (vfop) {
-                       int i;
-                       struct bnx2x_mcast_ramrod_params *ramrod =
-                               &vf->op_params.mcast;
-
-                       /* set ramrod params */
-                       memset(ramrod, 0, sizeof(*ramrod));
-                       ramrod->mcast_obj = &vf->mcast_obj;
-                       if (drv_only)
-                               set_bit(RAMROD_DRV_CLR_ONLY,
-                                       &ramrod->ramrod_flags);
-
-                       /* copy mcasts pointers */
-                       vfop->args.mc_list.mc_num = mcast_num;
-                       vfop->args.mc_list.mc = mc;
-                       for (i = 0; i < mcast_num; i++)
-                               mc[i].mac = mcasts[i];
-
-                       bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
-                                        bnx2x_vfop_mcast, cmd->done);
-                       return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
-                                                    cmd->block);
-               } else {
+       /* clear existing mcasts */
+       mcast.mcast_list_len = vf->mcast_list_len;
+       vf->mcast_list_len = mc_num;
+       rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
+       if (rc) {
+               BNX2X_ERR("Failed to remove multicasts\n");
+               if (mc)
                        kfree(mc);
-               }
+               return rc;
        }
-       return -ENOMEM;
-}
-
-/* VFOP rx-mode */
-static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
-       enum bnx2x_vfop_rxmode_state state = vfop->state;
-
-       bnx2x_vfop_reset_wq(vf);
-
-       if (vfop->rc < 0)
-               goto op_err;
 
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       switch (state) {
-       case BNX2X_VFOP_RXMODE_CONFIG:
-               /* next state */
-               vfop->state = BNX2X_VFOP_RXMODE_DONE;
+       /* update mcast list on the ramrod params */
+       if (mc_num) {
+               INIT_LIST_HEAD(&mcast.mcast_list);
+               for (i = 0; i < mc_num; i++) {
+                       mc[i].mac = mcasts[i];
+                       list_add_tail(&mc[i].link,
+                                     &mcast.mcast_list);
+               }
 
-               /* record the accept flags in vfdb so hypervisor can modify them
-                * if necessary
-                */
-               bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
-                       ramrod->rx_accept_flags;
-               vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
-               BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
-op_done:
-       case BNX2X_VFOP_RXMODE_DONE:
-               bnx2x_vfop_end(bp, vf, vfop);
-               return;
-       default:
-               bnx2x_vfop_default(state);
+               /* add new mcasts */
+               rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
+               if (rc)
+                       BNX2X_ERR("Faled to add multicasts\n");
+               kfree(mc);
        }
-op_pending:
-       return;
+
+       return rc;
 }
 
 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
@@ -1268,118 +644,56 @@ static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
        ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
 }
 
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
-                         struct bnx2x_virtf *vf,
-                         struct bnx2x_vfop_cmd *cmd,
-                         int qid, unsigned long accept_flags)
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                   int qid, unsigned long accept_flags)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
-       if (vfop) {
-               struct bnx2x_rx_mode_ramrod_params *ramrod =
-                       &vf->op_params.rx_mode;
+       struct bnx2x_rx_mode_ramrod_params ramrod;
 
-               bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 
-               bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
-                                bnx2x_vfop_rxmode, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
-                                            cmd->block);
-       }
-       return -ENOMEM;
+       bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
+       set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
+       vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
+       return bnx2x_config_rx_mode(bp, &ramrod);
 }
 
-/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
- * queue destructor)
- */
-static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       int qid = vfop->args.qx.qid;
-       enum bnx2x_vfop_qteardown_state state = vfop->state;
-       struct bnx2x_vfop_cmd cmd;
-
-       if (vfop->rc < 0)
-               goto op_err;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       cmd.done = bnx2x_vfop_qdown;
-       cmd.block = false;
-
-       switch (state) {
-       case BNX2X_VFOP_QTEARDOWN_RXMODE:
-               /* Drop all */
-               vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
-               vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
-               if (vfop->rc)
-                       goto op_err;
-               return;
-
-       case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
-               /* vlan-clear-all: don't consume credit */
-               vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
-               vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
-               if (vfop->rc)
-                       goto op_err;
-               return;
-
-       case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
-               /* mac-clear-all: consume credit */
-               vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST;
-               vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
-               if (vfop->rc)
-                       goto op_err;
-               return;
+       int rc;
 
-       case BNX2X_VFOP_QTEARDOWN_CLR_MCAST:
-               vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
-               vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
-               if (vfop->rc)
-                       goto op_err;
-               return;
+       DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 
-       case BNX2X_VFOP_QTEARDOWN_QDTOR:
-               /* run the queue destruction flow */
-               DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
-               vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
-               DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
-               vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
-               DP(BNX2X_MSG_IOV, "returned from cmd\n");
-               if (vfop->rc)
+       /* Remove all classification configuration for leading queue */
+       if (qid == LEADING_IDX) {
+               rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
+               if (rc)
                        goto op_err;
-               return;
-op_err:
-       BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
-                 vf->abs_vfid, qid, vfop->rc);
-
-       case BNX2X_VFOP_QTEARDOWN_DONE:
-               bnx2x_vfop_end(bp, vf, vfop);
-               return;
-       default:
-               bnx2x_vfop_default(state);
-       }
-}
 
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
-                        struct bnx2x_virtf *vf,
-                        struct bnx2x_vfop_cmd *cmd,
-                        int qid)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-
-       /* for non leading queues skip directly to qdown sate */
-       if (vfop) {
-               vfop->args.qx.qid = qid;
-               bnx2x_vfop_opset(qid == LEADING_IDX ?
-                                BNX2X_VFOP_QTEARDOWN_RXMODE :
-                                BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
-                                cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
-                                            cmd->block);
+               /* Remove filtering if feasible */
+               if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
+                       rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+                                                    false, false);
+                       if (rc)
+                               goto op_err;
+                       rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+                                                    false, true);
+                       if (rc)
+                               goto op_err;
+                       rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
+                       if (rc)
+                               goto op_err;
+               }
        }
 
-       return -ENOMEM;
+       /* Destroy queue */
+       rc = bnx2x_vf_queue_destroy(bp, vf, qid);
+       if (rc)
+               goto op_err;
+       return rc;
+op_err:
+       BNX2X_ERR("vf[%d:%d] error: rc %d\n",
+                 vf->abs_vfid, qid, rc);
+       return rc;
 }
 
 /* VF enable primitives
@@ -1579,120 +893,63 @@ static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
        bnx2x_tx_hw_flushed(bp, poll_cnt);
 }
 
-static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
-       enum bnx2x_vfop_flr_state state = vfop->state;
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vfop_flr,
-               .block = false,
-       };
-
-       if (vfop->rc < 0)
-               goto op_err;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+       int rc, i;
 
-       switch (state) {
-       case BNX2X_VFOP_FLR_QUEUES:
-               /* the cleanup operations are valid if and only if the VF
-                * was first acquired.
-                */
-               if (++(qx->qid) < vf_rxq_count(vf)) {
-                       vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
-                                                      qx->qid);
-                       if (vfop->rc)
-                               goto op_err;
-                       return;
-               }
-               /* remove multicasts */
-               vfop->state = BNX2X_VFOP_FLR_HW;
-               vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
-                                               0, true);
-               if (vfop->rc)
-                       goto op_err;
-               return;
-       case BNX2X_VFOP_FLR_HW:
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 
-               /* dispatch final cleanup and wait for HW queues to flush */
-               bnx2x_vf_flr_clnup_hw(bp, vf);
+       /* the cleanup operations are valid if and only if the VF
+        * was first acquired.
+        */
+       for (i = 0; i < vf_rxq_count(vf); i++) {
+               rc = bnx2x_vf_queue_flr(bp, vf, i);
+               if (rc)
+                       goto out;
+       }
 
-               /* release VF resources */
-               bnx2x_vf_free_resc(bp, vf);
+       /* remove multicasts */
+       bnx2x_vf_mcast(bp, vf, NULL, 0, true);
 
-               /* re-open the mailbox */
-               bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+       /* dispatch final cleanup and wait for HW queues to flush */
+       bnx2x_vf_flr_clnup_hw(bp, vf);
 
-               goto op_done;
-       default:
-               bnx2x_vfop_default(state);
-       }
-op_err:
-       BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
-       vf->flr_clnup_stage = VF_FLR_ACK;
-       bnx2x_vfop_end(bp, vf, vfop);
-       bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
-}
+       /* release VF resources */
+       bnx2x_vf_free_resc(bp, vf);
 
-static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
-                             struct bnx2x_virtf *vf,
-                             vfop_handler_t done)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-       if (vfop) {
-               vfop->args.qx.qid = -1; /* loop */
-               bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
-                                bnx2x_vfop_flr, done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
-       }
-       return -ENOMEM;
+       /* re-open the mailbox */
+       bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+       return;
+out:
+       BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
+                 vf->abs_vfid, i, rc);
 }
 
-static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
 {
-       int i = prev_vf ? prev_vf->index + 1 : 0;
        struct bnx2x_virtf *vf;
+       int i;
 
-       /* find next VF to cleanup */
-next_vf_to_clean:
-       for (;
-            i < BNX2X_NR_VIRTFN(bp) &&
-            (bnx2x_vf(bp, i, state) != VF_RESET ||
-             bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
-            i++)
-               ;
+       for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
+               /* VF should be RESET & in FLR cleanup states */
+               if (bnx2x_vf(bp, i, state) != VF_RESET ||
+                   !bnx2x_vf(bp, i, flr_clnup_stage))
+                       continue;
 
-       DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
-          BNX2X_NR_VIRTFN(bp));
+               DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
+                  i, BNX2X_NR_VIRTFN(bp));
 
-       if (i < BNX2X_NR_VIRTFN(bp)) {
                vf = BP_VF(bp, i);
 
                /* lock the vf pf channel */
                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 
                /* invoke the VF FLR SM */
-               if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
-                       BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
-                                 vf->abs_vfid);
+               bnx2x_vf_flr(bp, vf);
 
-                       /* mark the VF to be ACKED and continue */
-                       vf->flr_clnup_stage = VF_FLR_ACK;
-                       goto next_vf_to_clean;
-               }
-               return;
-       }
-
-       /* we are done, update vf records */
-       for_each_vf(bp, i) {
-               vf = BP_VF(bp, i);
-
-               if (vf->flr_clnup_stage != VF_FLR_ACK)
-                       continue;
-
-               vf->flr_clnup_stage = VF_FLR_EPILOG;
+               /* mark the VF to be ACKED and continue */
+               vf->flr_clnup_stage = false;
+               bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
        }
 
        /* Acknowledge the handled VFs.
@@ -1742,7 +999,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
                if (reset) {
                        /* set as reset and ready for cleanup */
                        vf->state = VF_RESET;
-                       vf->flr_clnup_stage = VF_FLR_CLN;
+                       vf->flr_clnup_stage = true;
 
                        DP(BNX2X_MSG_IOV,
                           "Initiating Final cleanup for VF %d\n",
@@ -1751,7 +1008,7 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
        }
 
        /* do the FLR cleanup for all marked VFs*/
-       bnx2x_vf_flr_clnup(bp, NULL);
+       bnx2x_vf_flr_clnup(bp);
 }
 
 /* IOV global initialization routines  */
@@ -2018,7 +1275,6 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
                bnx2x_vf(bp, i, index) = i;
                bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
                bnx2x_vf(bp, i, state) = VF_FREE;
-               INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
                mutex_init(&bnx2x_vf(bp, i, op_mutex));
                bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
        }
@@ -2039,6 +1295,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
                goto failed;
        }
 
+       /* Prepare the VFs event synchronization mechanism */
+       mutex_init(&bp->vfdb->event_mutex);
+
        return 0;
 failed:
        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -2117,7 +1376,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
                cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
 
                if (cxt->size) {
-                       BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+                       cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+                       if (!cxt->addr)
+                               goto alloc_mem_err;
                } else {
                        cxt->addr = NULL;
                        cxt->mapping = 0;
@@ -2127,20 +1388,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
 
        /* allocate vfs ramrods dma memory - client_init and set_mac */
        tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
-       BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
-                       tot_size);
+       BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+                                                  tot_size);
+       if (!BP_VFDB(bp)->sp_dma.addr)
+               goto alloc_mem_err;
        BP_VFDB(bp)->sp_dma.size = tot_size;
 
        /* allocate mailboxes */
        tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
-       BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
-                       tot_size);
+       BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+                                                 tot_size);
+       if (!BP_VF_MBX_DMA(bp)->addr)
+               goto alloc_mem_err;
+
        BP_VF_MBX_DMA(bp)->size = tot_size;
 
        /* allocate local bulletin boards */
        tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
-       BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
-                       &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+       BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+                                                      tot_size);
+       if (!BP_VF_BULLETIN_DMA(bp)->addr)
+               goto alloc_mem_err;
+
        BP_VF_BULLETIN_DMA(bp)->size = tot_size;
 
        return 0;
@@ -2166,6 +1435,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
                             bnx2x_vf_sp_map(bp, vf, q_data),
                             q_type);
 
+       /* sp indication is set only when vlan/mac/etc. are initialized */
+       q->sp_initialized = false;
+
        DP(BNX2X_MSG_IOV,
           "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
           vf->abs_vfid, q->sp_obj.func_id, q->cid);
@@ -2269,7 +1541,7 @@ int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
 
        /* release all the VFs */
        for_each_vf(bp, i)
-               bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
+               bnx2x_vf_release(bp, BP_VF(bp, i));
 
        return 0;
 }
@@ -2359,6 +1631,12 @@ void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
        smp_mb__after_clear_bit();
 }
 
+static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
+                                          struct bnx2x_virtf *vf)
+{
+       vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
+}
+
 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
 {
        struct bnx2x_virtf *vf;
@@ -2383,6 +1661,7 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
        case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
        case EVENT_RING_OPCODE_MULTICAST_RULES:
        case EVENT_RING_OPCODE_FILTERS_RULES:
+       case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
                cid = (elem->message.data.eth_event.echo &
                       BNX2X_SWCID_MASK);
                DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
@@ -2447,13 +1726,15 @@ get_vf:
                   vf->abs_vfid, qidx);
                bnx2x_vf_handle_filters_eqe(bp, vf);
                break;
+       case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
+               DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
+                  vf->abs_vfid, qidx);
+               bnx2x_vf_handle_rss_update_eqe(bp, vf);
        case EVENT_RING_OPCODE_VF_FLR:
        case EVENT_RING_OPCODE_MALICIOUS_VF:
                /* Do nothing for now */
                return 0;
        }
-       /* SRIOV: reschedule any 'in_progress' operations */
-       bnx2x_iov_sp_event(bp, cid, false);
 
        return 0;
 }
@@ -2490,23 +1771,6 @@ void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
        }
 }
 
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
-{
-       struct bnx2x_virtf *vf;
-
-       /* check if the cid is the VF range */
-       if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
-               return;
-
-       vf = bnx2x_vf_by_cid(bp, vf_cid);
-       if (vf) {
-               /* set in_progress flag */
-               atomic_set(&vf->op_in_progress, 1);
-               if (queue_work)
-                       queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
-       }
-}
-
 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
 {
        int i;
@@ -2527,10 +1791,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
        first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
                (is_fcoe ? 0 : 1);
 
-       DP(BNX2X_MSG_IOV,
-          "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
-          BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
-          first_queue_query_index + num_queues_req);
+       DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+              "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+              BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+              first_queue_query_index + num_queues_req);
 
        cur_data_offset = bp->fw_stats_data_mapping +
                offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2544,9 +1808,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
                struct bnx2x_virtf *vf = BP_VF(bp, i);
 
                if (vf->state != VF_ENABLED) {
-                       DP(BNX2X_MSG_IOV,
-                          "vf %d not enabled so no stats for it\n",
-                          vf->abs_vfid);
+                       DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+                              "vf %d not enabled so no stats for it\n",
+                              vf->abs_vfid);
                        continue;
                }
 
@@ -2588,32 +1852,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
        bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
 }
 
-void bnx2x_iov_sp_task(struct bnx2x *bp)
-{
-       int i;
-
-       if (!IS_SRIOV(bp))
-               return;
-       /* Iterate over all VFs and invoke state transition for VFs with
-        * 'in-progress' slow-path operations
-        */
-       DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
-       for_each_vf(bp, i) {
-               struct bnx2x_virtf *vf = BP_VF(bp, i);
-
-               if (!vf) {
-                       BNX2X_ERR("VF was null! skipping...\n");
-                       continue;
-               }
-
-               if (!list_empty(&vf->op_list_head) &&
-                   atomic_read(&vf->op_in_progress)) {
-                       DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
-                       bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
-               }
-       }
-}
-
 static inline
 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
 {
@@ -2849,52 +2087,26 @@ static void bnx2x_set_vf_state(void *cookie)
        p->vf->state = p->state;
 }
 
-/* VFOP close (teardown the queues, delete mcasts and close HW) */
-static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
-       enum bnx2x_vfop_close_state state = vfop->state;
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vfop_close,
-               .block = false,
-       };
+       int rc = 0, i;
 
-       if (vfop->rc < 0)
-               goto op_err;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       switch (state) {
-       case BNX2X_VFOP_CLOSE_QUEUES:
-
-               if (++(qx->qid) < vf_rxq_count(vf)) {
-                       vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
-                       if (vfop->rc)
-                               goto op_err;
-                       return;
-               }
-               vfop->state = BNX2X_VFOP_CLOSE_HW;
-               vfop->rc = 0;
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 
-       case BNX2X_VFOP_CLOSE_HW:
-
-               /* disable the interrupts */
-               DP(BNX2X_MSG_IOV, "disabling igu\n");
-               bnx2x_vf_igu_disable(bp, vf);
+       /* Close all queues */
+       for (i = 0; i < vf_rxq_count(vf); i++) {
+               rc = bnx2x_vf_queue_teardown(bp, vf, i);
+               if (rc)
+                       goto op_err;
+       }
 
-               /* disable the VF */
-               DP(BNX2X_MSG_IOV, "clearing qtbl\n");
-               bnx2x_vf_clr_qtbl(bp, vf);
+       /* disable the interrupts */
+       DP(BNX2X_MSG_IOV, "disabling igu\n");
+       bnx2x_vf_igu_disable(bp, vf);
 
-               goto op_done;
-       default:
-               bnx2x_vfop_default(state);
-       }
-op_err:
-       BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
+       /* disable the VF */
+       DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+       bnx2x_vf_clr_qtbl(bp, vf);
 
        /* need to make sure there are no outstanding stats ramrods which may
         * cause the device to access the VF's stats buffer which it will free
@@ -2909,43 +2121,20 @@ op_done:
        }
 
        DP(BNX2X_MSG_IOV, "set state to acquired\n");
-       bnx2x_vfop_end(bp, vf, vfop);
-op_pending:
-       /* Not supported at the moment; Exists for macros only */
-       return;
-}
 
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
-                        struct bnx2x_virtf *vf,
-                        struct bnx2x_vfop_cmd *cmd)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-       if (vfop) {
-               vfop->args.qx.qid = -1; /* loop */
-               bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
-                                bnx2x_vfop_close, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
-                                            cmd->block);
-       }
-       return -ENOMEM;
+       return 0;
+op_err:
+       BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
+       return rc;
 }
 
 /* VF release can be called either: 1. The VF was acquired but
  * not enabled 2. the vf was enabled or in the process of being
  * enabled
  */
-static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vfop_release,
-               .block = false,
-       };
-
-       DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
-
-       if (vfop->rc < 0)
-               goto op_err;
+       int rc;
 
        DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
           vf->state == VF_FREE ? "Free" :
@@ -2956,116 +2145,87 @@ static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
        switch (vf->state) {
        case VF_ENABLED:
-               vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
-               if (vfop->rc)
+               rc = bnx2x_vf_close(bp, vf);
+               if (rc)
                        goto op_err;
-               return;
-
+               /* Fallthrough to release resources */
        case VF_ACQUIRED:
                DP(BNX2X_MSG_IOV, "about to free resources\n");
                bnx2x_vf_free_resc(bp, vf);
-               DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
-               goto op_done;
+               break;
 
        case VF_FREE:
        case VF_RESET:
-               /* do nothing */
-               goto op_done;
        default:
-               bnx2x_vfop_default(vf->state);
+               break;
        }
+       return 0;
 op_err:
-       BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
-op_done:
-       bnx2x_vfop_end(bp, vf, vfop);
+       BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
+       return rc;
 }
 
-static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                       struct bnx2x_config_rss_params *rss)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       enum bnx2x_vfop_rss_state state;
-
-       if (!vfop) {
-               BNX2X_ERR("vfop was null\n");
-               return;
-       }
-
-       state = vfop->state;
-       bnx2x_vfop_reset_wq(vf);
-
-       if (vfop->rc < 0)
-               goto op_err;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
-
-       switch (state) {
-       case BNX2X_VFOP_RSS_CONFIG:
-               /* next state */
-               vfop->state = BNX2X_VFOP_RSS_DONE;
-               bnx2x_config_rss(bp, &vfop->op_p->rss);
-               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
-op_err:
-               BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
-op_done:
-       case BNX2X_VFOP_RSS_DONE:
-               bnx2x_vfop_end(bp, vf, vfop);
-               return;
-       default:
-               bnx2x_vfop_default(state);
-       }
-op_pending:
-       return;
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+       set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
+       return bnx2x_config_rss(bp, rss);
 }
 
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
-                          struct bnx2x_virtf *vf,
-                          struct bnx2x_vfop_cmd *cmd)
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                       struct vfpf_tpa_tlv *tlv,
+                       struct bnx2x_queue_update_tpa_params *params)
 {
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-       if (vfop) {
-               bnx2x_vfop_opset(-1, /* use vf->state */
-                                bnx2x_vfop_release, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
-                                            cmd->block);
-       }
-       return -ENOMEM;
-}
+       aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
+       struct bnx2x_queue_state_params qstate;
+       int qid, rc = 0;
 
-int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
-                      struct bnx2x_virtf *vf,
-                      struct bnx2x_vfop_cmd *cmd)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+       DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
+
+       /* Set ramrod params */
+       memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
+       memcpy(&qstate.params.update_tpa, params,
+              sizeof(struct bnx2x_queue_update_tpa_params));
+       qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
+       set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
 
-       if (vfop) {
-               bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
-                                cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
-                                            cmd->block);
+       for (qid = 0; qid < vf_rxq_count(vf); qid++) {
+               qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+               qstate.params.update_tpa.sge_map = sge_addr[qid];
+               DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
+                  vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
+                  U64_LO(sge_addr[qid]));
+               rc = bnx2x_queue_state_change(bp, &qstate);
+               if (rc) {
+                       BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
+                                 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
+                                 vf->abs_vfid, qid);
+                       return rc;
+               }
        }
-       return -ENOMEM;
+
+       return rc;
 }
 
 /* VF release ~ VF close + VF release-resources
  * Release is the ultimate SW shutdown and is called whenever an
  * irrecoverable error is encountered.
  */
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
-       struct bnx2x_vfop_cmd cmd = {
-               .done = NULL,
-               .block = block,
-       };
        int rc;
 
        DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
 
-       rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+       rc = bnx2x_vf_free(bp, vf);
        if (rc)
                WARN(rc,
                     "VF[%d] Failed to allocate resources for release op- rc=%d\n",
                     vf->abs_vfid, rc);
+       bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+       return rc;
 }
 
 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
@@ -3074,16 +2234,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
        *sbdf = vf->devfn | (vf->bus << 8);
 }
 
-static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
-                      struct bnx2x_vf_bar_info *bar_info)
-{
-       int n;
-
-       bar_info->nr_bars = bp->vfdb->sriov.nres;
-       for (n = 0; n < bar_info->nr_bars; n++)
-               bar_info->bars[n] = vf->bars[n];
-}
-
 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
                              enum channel_tlvs tlv)
 {
@@ -3405,13 +2555,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
        ivi->spoofchk = 1; /*always enabled */
        if (vf->state == VF_ENABLED) {
                /* mac and vlan are in vlan_mac objects */
-               if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+               if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
                        mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
                                                0, ETH_ALEN);
-               if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
                        vlan_obj->get_n_elements(bp, vlan_obj, 1,
                                                 (u8 *)&ivi->vlan, 0,
                                                 VLAN_HLEN);
+               }
        } else {
                /* mac */
                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3485,17 +2635,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
            q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
                /* configure the mac in device on this vf's queue */
                unsigned long ramrod_flags = 0;
-               struct bnx2x_vlan_mac_obj *mac_obj =
-                       &bnx2x_leading_vfq(vf, mac_obj);
+               struct bnx2x_vlan_mac_obj *mac_obj;
 
-               rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
-               if (rc)
-                       return rc;
+               /* User should be able to see failure reason in system logs */
+               if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+                       return -EINVAL;
 
                /* must lock vfpf channel to protect against vf flows */
                bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
 
                /* remove existing eth macs */
+               mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
                if (rc) {
                        BNX2X_ERR("failed to delete eth macs\n");
@@ -3569,17 +2719,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
            BNX2X_Q_LOGICAL_STATE_ACTIVE)
                return rc;
 
-       /* configure the vlan in device on this vf's queue */
-       vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
-       rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
-       if (rc)
-               return rc;
+       /* User should be able to see error in system logs */
+       if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+               return -EINVAL;
 
        /* must lock vfpf channel to protect against vf flows */
        bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
 
        /* remove existing vlans */
        __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+       vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
        rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
                                  &ramrod_flags);
        if (rc) {
@@ -3736,13 +2885,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
        bnx2x_sample_bulletin(bp);
 
        /* if channel is down we need to self destruct */
-       if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
-               smp_mb__before_clear_bit();
-               set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
-                       &bp->sp_rtnl_state);
-               smp_mb__after_clear_bit();
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
-       }
+       if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+                                      BNX2X_MSG_IOV);
 }
 
 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
@@ -3756,12 +2901,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
        mutex_init(&bp->vf2pf_mutex);
 
        /* allocate vf2pf mailbox for vf to pf channel */
-       BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
-                       sizeof(struct bnx2x_vf_mbx_msg));
+       bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+                                        sizeof(struct bnx2x_vf_mbx_msg));
+       if (!bp->vf2pf_mbox)
+               goto alloc_mem_err;
 
        /* allocate pf 2 vf bulletin board */
-       BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
-                       sizeof(union pf_vf_bulletin));
+       bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+                                            sizeof(union pf_vf_bulletin));
+       if (!bp->pf2vf_bulletin)
+               goto alloc_mem_err;
 
        return 0;
 
@@ -3792,3 +2941,28 @@ void bnx2x_iov_channel_down(struct bnx2x *bp)
                bnx2x_post_vf_bulletin(bp, vf_idx);
        }
 }
+
+void bnx2x_iov_task(struct work_struct *work)
+{
+       struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
+
+       if (!netif_running(bp->dev))
+               return;
+
+       if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
+                              &bp->iov_task_state))
+               bnx2x_vf_handle_flr_event(bp);
+
+       if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
+                              &bp->iov_task_state))
+               bnx2x_vf_mbx(bp);
+}
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
+{
+       smp_mb__before_clear_bit();
+       set_bit(flag, &bp->iov_task_state);
+       smp_mb__after_clear_bit();
+       DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+       queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
+}
index d9fcca1b5a9db2365773c48c4d9a3ad28462d797..8bf764570eef773eafa87ffd0fca26592e4d64ef 100644 (file)
@@ -30,6 +30,8 @@ enum sample_bulletin_result {
 
 #ifdef CONFIG_BNX2X_SRIOV
 
+extern struct workqueue_struct *bnx2x_iov_wq;
+
 /* The bnx2x device structure holds vfdb structure described below.
  * The VF array is indexed by the relative vfid.
  */
@@ -83,108 +85,35 @@ struct bnx2x_vf_queue {
        u16 index;
        u16 sb_idx;
        bool is_leading;
+       bool sp_initialized;
 };
 
-/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
- * q-init, q-setup and SB index
+/* struct bnx2x_vf_queue_construct_params - prepare queue construction
+ * parameters: q-init, q-setup and SB index
  */
-struct bnx2x_vfop_qctor_params {
+struct bnx2x_vf_queue_construct_params {
        struct bnx2x_queue_state_params         qstate;
        struct bnx2x_queue_setup_params         prep_qsetup;
 };
 
-/* VFOP parameters (one copy per VF) */
-union bnx2x_vfop_params {
-       struct bnx2x_vlan_mac_ramrod_params     vlan_mac;
-       struct bnx2x_rx_mode_ramrod_params      rx_mode;
-       struct bnx2x_mcast_ramrod_params        mcast;
-       struct bnx2x_config_rss_params          rss;
-       struct bnx2x_vfop_qctor_params          qctor;
-};
-
 /* forward */
 struct bnx2x_virtf;
 
 /* VFOP definitions */
-typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
-
-struct bnx2x_vfop_cmd {
-       vfop_handler_t done;
-       bool block;
-};
 
-/* VFOP queue filters command additional arguments */
-struct bnx2x_vfop_filter {
-       struct list_head link;
+struct bnx2x_vf_mac_vlan_filter {
        int type;
-#define BNX2X_VFOP_FILTER_MAC  1
-#define BNX2X_VFOP_FILTER_VLAN 2
+#define BNX2X_VF_FILTER_MAC    1
+#define BNX2X_VF_FILTER_VLAN   2
 
        bool add;
        u8 *mac;
        u16 vid;
 };
 
-struct bnx2x_vfop_filters {
-       int add_cnt;
-       struct list_head head;
-       struct bnx2x_vfop_filter filters[];
-};
-
-/* transient list allocated, built and saved until its
- * passed to the SP-VERBs layer.
- */
-struct bnx2x_vfop_args_mcast {
-       int mc_num;
-       struct bnx2x_mcast_list_elem *mc;
-};
-
-struct bnx2x_vfop_args_qctor {
-       int     qid;
-       u16     sb_idx;
-};
-
-struct bnx2x_vfop_args_qdtor {
-       int     qid;
-       struct eth_context *cxt;
-};
-
-struct bnx2x_vfop_args_defvlan {
-       int     qid;
-       bool    enable;
-       u16     vid;
-       u8      prio;
-};
-
-struct bnx2x_vfop_args_qx {
-       int     qid;
-       bool    en_add;
-};
-
-struct bnx2x_vfop_args_filters {
-       struct bnx2x_vfop_filters *multi_filter;
-       atomic_t *credit;       /* non NULL means 'don't consume credit' */
-};
-
-union bnx2x_vfop_args {
-       struct bnx2x_vfop_args_mcast    mc_list;
-       struct bnx2x_vfop_args_qctor    qctor;
-       struct bnx2x_vfop_args_qdtor    qdtor;
-       struct bnx2x_vfop_args_defvlan  defvlan;
-       struct bnx2x_vfop_args_qx       qx;
-       struct bnx2x_vfop_args_filters  filters;
-};
-
-struct bnx2x_vfop {
-       struct list_head link;
-       int                     rc;             /* return code */
-       int                     state;          /* next state */
-       union bnx2x_vfop_args   args;           /* extra arguments */
-       union bnx2x_vfop_params *op_p;          /* ramrod params */
-
-       /* state machine callbacks */
-       vfop_handler_t transition;
-       vfop_handler_t done;
+struct bnx2x_vf_mac_vlan_filters {
+       int count;
+       struct bnx2x_vf_mac_vlan_filter filters[];
 };
 
 /* vf context */
@@ -204,15 +133,7 @@ struct bnx2x_virtf {
 #define VF_ENABLED     2       /* VF Enabled */
 #define VF_RESET       3       /* VF FLR'd, pending cleanup */
 
-       /* non 0 during flr cleanup */
-       u8 flr_clnup_stage;
-#define VF_FLR_CLN     1       /* reclaim resources and do 'final cleanup'
-                                * sans the end-wait
-                                */
-#define VF_FLR_ACK     2       /* ACK flr notification */
-#define VF_FLR_EPILOG  3       /* wait for VF remnants to dissipate in the HW
-                                * ~ final cleanup' end wait
-                                */
+       bool flr_clnup_stage;   /* true during flr cleanup */
 
        /* dma */
        dma_addr_t fw_stat_map;         /* valid iff VF_CFG_STATS */
@@ -276,11 +197,6 @@ struct bnx2x_virtf {
        struct bnx2x_rss_config_obj     rss_conf_obj;
 
        /* slow-path operations */
-       atomic_t                        op_in_progress;
-       int                             op_rc;
-       bool                            op_wait_blocking;
-       struct list_head                op_list_head;
-       union bnx2x_vfop_params         op_params;
        struct mutex                    op_mutex; /* one vfop at a time mutex */
        enum channel_tlvs               op_current;
 };
@@ -338,11 +254,6 @@ struct bnx2x_vf_mbx {
        u32 vf_addr_hi;
 
        struct vfpf_first_tlv first_tlv;        /* saved VF request header */
-
-       u8 flags;
-#define VF_MSG_INPROCESS       0x1     /* failsafe - the FW should prevent
-                                        * more then one pending msg
-                                        */
 };
 
 struct bnx2x_vf_sp {
@@ -419,6 +330,10 @@ struct bnx2x_vfdb {
        /* the number of msix vectors belonging to this PF designated for VFs */
        u16 vf_sbs_pool;
        u16 first_vf_igu_entry;
+
+       /* sp_rtnl synchronization */
+       struct mutex                    event_mutex;
+       u64                             event_occur;
 };
 
 /* queue access */
@@ -468,13 +383,13 @@ void bnx2x_iov_init_dq(struct bnx2x *bp);
 void bnx2x_iov_init_dmae(struct bnx2x *bp);
 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
                                struct bnx2x_queue_sp_obj **q_obj);
-void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
 void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
-void bnx2x_iov_sp_task(struct bnx2x *bp);
 /* global vf mailbox routines */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_mbx(struct bnx2x *bp);
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+                          struct vf_pf_event_data *vfpf_event);
 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
 
 /* CORE VF API */
@@ -487,162 +402,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
                  dma_addr_t *sb_map);
 
-/* VFOP generic helpers */
-#define bnx2x_vfop_default(state) do {                         \
-               BNX2X_ERR("Bad state %d\n", (state));           \
-               vfop->rc = -EINVAL;                             \
-               goto op_err;                                    \
-       } while (0)
-
-enum {
-       VFOP_DONE,
-       VFOP_CONT,
-       VFOP_VERIFY_PEND,
-};
-
-#define bnx2x_vfop_finalize(vf, rc, next) do {                         \
-               if ((rc) < 0)                                           \
-                       goto op_err;                                    \
-               else if ((rc) > 0)                                      \
-                       goto op_pending;                                \
-               else if ((next) == VFOP_DONE)                           \
-                       goto op_done;                                   \
-               else if ((next) == VFOP_VERIFY_PEND)                    \
-                       BNX2X_ERR("expected pending\n");                \
-               else {                                                  \
-                       DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n");   \
-                       atomic_set(&vf->op_in_progress, 1);             \
-                       queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);  \
-                       return;                                         \
-               }                                                       \
-       } while (0)
-
-#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr)         \
-       do {                                                            \
-               vfop->state = first_state;                              \
-               vfop->op_p = &vf->op_params;                            \
-               vfop->transition = trans_hndlr;                         \
-               vfop->done = done_hndlr;                                \
-       } while (0)
-
-static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
-                                               struct bnx2x_virtf *vf)
-{
-       WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
-       WARN_ON(list_empty(&vf->op_list_head));
-       return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
-}
-
-static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
-                                               struct bnx2x_virtf *vf)
-{
-       struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
-
-       WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
-       if (vfop) {
-               INIT_LIST_HEAD(&vfop->link);
-               list_add(&vfop->link, &vf->op_list_head);
-       }
-       return vfop;
-}
-
-static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
-                                 struct bnx2x_vfop *vfop)
-{
-       /* rc < 0 - error, otherwise set to 0 */
-       DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
-       if (vfop->rc >= 0)
-               vfop->rc = 0;
-       DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
-
-       /* unlink the current op context and propagate error code
-        * must be done before invoking the 'done()' handler
-        */
-       WARN(!mutex_is_locked(&vf->op_mutex),
-            "about to access vf op linked list but mutex was not locked!");
-       list_del(&vfop->link);
-
-       if (list_empty(&vf->op_list_head)) {
-               DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
-               vf->op_rc = vfop->rc;
-               DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d,  vfop->rc %d\n",
-                  vf->op_rc, vfop->rc);
-       } else {
-               struct bnx2x_vfop *cur_vfop;
-
-               DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
-               cur_vfop = bnx2x_vfop_cur(bp, vf);
-               cur_vfop->rc = vfop->rc;
-               DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
-                  vf->op_rc, vfop->rc);
-       }
-
-       /* invoke done handler */
-       if (vfop->done) {
-               DP(BNX2X_MSG_IOV, "calling done handler\n");
-               vfop->done(bp, vf);
-       } else {
-               /* there is no done handler for the operation to unlock
-                * the mutex. Must have gotten here from PF initiated VF RELEASE
-                */
-               bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
-       }
-
-       DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
-          vf->op_rc, vfop->rc);
-
-       /* if this is the last nested op reset the wait_blocking flag
-        * to release any blocking wrappers, only after 'done()' is invoked
-        */
-       if (list_empty(&vf->op_list_head)) {
-               DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
-               vf->op_wait_blocking = false;
-       }
-
-       kfree(vfop);
-}
-
-static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
-                                          struct bnx2x_virtf *vf)
-{
-       /* can take a while if any port is running */
-       int cnt = 5000;
-
-       might_sleep();
-       while (cnt--) {
-               if (vf->op_wait_blocking == false) {
-#ifdef BNX2X_STOP_ON_ERROR
-                       DP(BNX2X_MSG_IOV, "exit  (cnt %d)\n", 5000 - cnt);
-#endif
-                       return 0;
-               }
-               usleep_range(1000, 2000);
-
-               if (bp->panic)
-                       return -EIO;
-       }
-
-       /* timeout! */
-#ifdef BNX2X_STOP_ON_ERROR
-       bnx2x_panic();
-#endif
-
-       return -EBUSY;
-}
-
-static inline int bnx2x_vfop_transition(struct bnx2x *bp,
-                                       struct bnx2x_virtf *vf,
-                                       vfop_handler_t transition,
-                                       bool block)
-{
-       if (block)
-               vf->op_wait_blocking = true;
-       transition(bp, vf);
-       if (block)
-               return bnx2x_vfop_wait_blocking(bp, vf);
-       return 0;
-}
-
 /* VFOP queue construction helpers */
 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
                            struct bnx2x_queue_init_params *init_params,
@@ -657,59 +416,41 @@ void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
                           struct bnx2x_virtf *vf,
                           struct bnx2x_vf_queue *q,
-                          struct bnx2x_vfop_qctor_params *p,
+                          struct bnx2x_vf_queue_construct_params *p,
                           unsigned long q_type);
-int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
-                           struct bnx2x_virtf *vf,
-                           struct bnx2x_vfop_cmd *cmd,
-                           struct bnx2x_vfop_filters *macs,
-                           int qid, bool drv_only);
-
-int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
-                            struct bnx2x_virtf *vf,
-                            struct bnx2x_vfop_cmd *cmd,
-                            struct bnx2x_vfop_filters *vlans,
-                            int qid, bool drv_only);
-
-int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
-                         struct bnx2x_virtf *vf,
-                         struct bnx2x_vfop_cmd *cmd,
-                         int qid);
-
-int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
-                        struct bnx2x_virtf *vf,
-                        struct bnx2x_vfop_cmd *cmd,
-                        int qid);
-
-int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
-                        struct bnx2x_virtf *vf,
-                        struct bnx2x_vfop_cmd *cmd,
-                        bnx2x_mac_addr_t *mcasts,
-                        int mcast_num, bool drv_only);
-
-int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
-                         struct bnx2x_virtf *vf,
-                         struct bnx2x_vfop_cmd *cmd,
-                         int qid, unsigned long accept_flags);
-
-int bnx2x_vfop_close_cmd(struct bnx2x *bp,
-                        struct bnx2x_virtf *vf,
-                        struct bnx2x_vfop_cmd *cmd);
-
-int bnx2x_vfop_release_cmd(struct bnx2x *bp,
-                          struct bnx2x_virtf *vf,
-                          struct bnx2x_vfop_cmd *cmd);
 
-int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
-                      struct bnx2x_virtf *vf,
-                      struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                 struct bnx2x_vf_mac_vlan_filters *filters,
+                                 int qid, bool drv_only);
+
+int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
+                        struct bnx2x_vf_queue_construct_params *qctor);
+
+int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid);
+
+int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                  bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only);
+
+int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                   int qid, unsigned long accept_flags);
+
+int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                       struct bnx2x_config_rss_params *rss);
+
+int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                       struct vfpf_tpa_tlv *tlv,
+                       struct bnx2x_queue_update_tpa_params *params);
 
 /* VF release ~ VF close + VF release-resources
  *
  * Release is the ultimate SW shutdown and is called whenever an
  * irrecoverable error is encountered.
  */
-void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
+int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf);
 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
 
@@ -772,18 +513,20 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
 void bnx2x_iov_channel_down(struct bnx2x *bp);
 
+void bnx2x_iov_task(struct work_struct *work);
+
+void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
+
 #else /* CONFIG_BNX2X_SRIOV */
 
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
                                struct bnx2x_queue_sp_obj **q_obj) {}
-static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
-                                     bool queue_work) {}
 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
 static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
                                        union event_ring_elem *elem) {return 1; }
-static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
-static inline void bnx2x_vf_mbx(struct bnx2x *bp,
-                               struct vf_pf_event_data *vfpf_event) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+                                        struct vf_pf_event_data *vfpf_event) {}
 static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
 static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
 static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
@@ -830,5 +573,8 @@ static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
 
+static inline void bnx2x_iov_task(struct work_struct *work) {}
+static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index 3fa6c2a2a5a9f46ba82c19103c114649af4da238..0622884596b2f478ec4a2789c17fdd0938544995 100644 (file)
@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
 
        vf->leading_rss = cl_id;
        q->is_leading = true;
+       q->sp_initialized = true;
 }
 
 /* ask the pf to open a queue for the vf */
@@ -672,6 +673,7 @@ static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
 
 out:
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
        return rc;
 }
 
@@ -894,29 +896,16 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
 
        DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
 
-       switch (mode) {
-       case BNX2X_RX_MODE_NONE: /* no Rx */
+       /* Ignore everything accept MODE_NONE */
+       if (mode  == BNX2X_RX_MODE_NONE) {
                req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
-               break;
-       case BNX2X_RX_MODE_NORMAL:
+       } else {
+               /* Current PF driver will not look at the specific flags,
+                * but they are required when working with older drivers on hv.
+                */
                req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
                req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
                req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
-               break;
-       case BNX2X_RX_MODE_ALLMULTI:
-               req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
-               req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
-               req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
-               break;
-       case BNX2X_RX_MODE_PROMISC:
-               req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
-               req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
-               req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
-               break;
-       default:
-               BNX2X_ERR("BAD rx mode (%d)\n", mode);
-               rc = -EINVAL;
-               goto out;
        }
 
        req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -937,7 +926,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
                BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
                rc = -EINVAL;
        }
-out:
+
        bnx2x_vfpf_finalize(bp, &req->first_tlv);
 
        return rc;
@@ -1047,7 +1036,8 @@ static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
 }
 
 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
-                                      struct bnx2x_virtf *vf)
+                                      struct bnx2x_virtf *vf,
+                                      int vf_rc)
 {
        struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
        struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
@@ -1059,7 +1049,7 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
        DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
           mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
 
-       resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+       resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
 
        /* send response */
        vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
@@ -1088,9 +1078,6 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
        storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
        mmiowb();
 
-       /* initiate dmae to send the response */
-       mbx->flags &= ~VF_MSG_INPROCESS;
-
        /* copy the response header including status-done field,
         * must be last dmae, must be after FW is acked
         */
@@ -1110,14 +1097,15 @@ static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
        return;
 
 mbx_error:
-       bnx2x_vf_release(bp, vf, false); /* non blocking */
+       bnx2x_vf_release(bp, vf);
 }
 
 static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
-                                      struct bnx2x_virtf *vf)
+                             struct bnx2x_virtf *vf,
+                             int rc)
 {
        bnx2x_vf_mbx_resp_single_tlv(bp, vf);
-       bnx2x_vf_mbx_resp_send_msg(bp, vf);
+       bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
 }
 
 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
@@ -1159,7 +1147,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        resp->pfdev_info.db_size = bp->db_size;
        resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
        resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
-                                  /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+                                  PFVF_CAP_TPA |
+                                  PFVF_CAP_TPA_UPDATE);
        bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
                          sizeof(resp->pfdev_info.fw_ver));
 
@@ -1240,8 +1229,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                      sizeof(struct channel_list_end_tlv));
 
        /* send the response */
-       vf->op_rc = vfop_status;
-       bnx2x_vf_mbx_resp_send_msg(bp, vf);
+       bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
 }
 
 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1273,19 +1261,20 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
                              struct bnx2x_vf_mbx *mbx)
 {
        struct vfpf_init_tlv *init = &mbx->msg->req.init;
+       int rc;
 
        /* record ghost addresses from vf message */
        vf->spq_map = init->spq_addr;
        vf->fw_stat_map = init->stats_addr;
        vf->stats_stride = init->stats_stride;
-       vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+       rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
 
        /* set VF multiqueue statistics collection mode */
        if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
                vf->cfg_flags |= VF_CFG_STATS_COALESCE;
 
        /* response */
-       bnx2x_vf_mbx_resp(bp, vf);
+       bnx2x_vf_mbx_resp(bp, vf, rc);
 }
 
 /* convert MBX queue-flags to standard SP queue-flags */
@@ -1320,16 +1309,14 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                 struct bnx2x_vf_mbx *mbx)
 {
        struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vf_mbx_resp,
-               .block = false,
-       };
+       struct bnx2x_vf_queue_construct_params qctor;
+       int rc = 0;
 
        /* verify vf_qid */
        if (setup_q->vf_qid >= vf_rxq_count(vf)) {
                BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
                          setup_q->vf_qid, vf_rxq_count(vf));
-               vf->op_rc = -EINVAL;
+               rc = -EINVAL;
                goto response;
        }
 
@@ -1347,9 +1334,10 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        bnx2x_leading_vfq_init(bp, vf, q);
 
                /* re-init the VF operation context */
-               memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
-               setup_p = &vf->op_params.qctor.prep_qsetup;
-               init_p =  &vf->op_params.qctor.qstate.params.init;
+               memset(&qctor, 0 ,
+                      sizeof(struct bnx2x_vf_queue_construct_params));
+               setup_p = &qctor.prep_qsetup;
+               init_p =  &qctor.qstate.params.init;
 
                /* activate immediately */
                __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
@@ -1435,44 +1423,34 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                                 q->index, q->sb_idx);
                }
                /* complete the preparations */
-               bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
+               bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
 
-               vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
-               if (vf->op_rc)
+               rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
+               if (rc)
                        goto response;
-               return;
        }
 response:
-       bnx2x_vf_mbx_resp(bp, vf);
+       bnx2x_vf_mbx_resp(bp, vf, rc);
 }
 
-enum bnx2x_vfop_filters_state {
-          BNX2X_VFOP_MBX_Q_FILTERS_MACS,
-          BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
-          BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
-          BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
-          BNX2X_VFOP_MBX_Q_FILTERS_DONE
-};
-
 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
                                     struct bnx2x_virtf *vf,
                                     struct vfpf_set_q_filters_tlv *tlv,
-                                    struct bnx2x_vfop_filters **pfl,
+                                    struct bnx2x_vf_mac_vlan_filters **pfl,
                                     u32 type_flag)
 {
        int i, j;
-       struct bnx2x_vfop_filters *fl = NULL;
+       struct bnx2x_vf_mac_vlan_filters *fl = NULL;
        size_t fsz;
 
-       fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
-               sizeof(struct bnx2x_vfop_filters);
+       fsz = tlv->n_mac_vlan_filters *
+             sizeof(struct bnx2x_vf_mac_vlan_filter) +
+             sizeof(struct bnx2x_vf_mac_vlan_filters);
 
        fl = kzalloc(fsz, GFP_KERNEL);
        if (!fl)
                return -ENOMEM;
 
-       INIT_LIST_HEAD(&fl->head);
-
        for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
                struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
 
@@ -1480,17 +1458,17 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
                        continue;
                if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
                        fl->filters[j].mac = msg_filter->mac;
-                       fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
+                       fl->filters[j].type = BNX2X_VF_FILTER_MAC;
                } else {
                        fl->filters[j].vid = msg_filter->vlan_tag;
-                       fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
+                       fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
                }
                fl->filters[j].add =
                        (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
                        true : false;
-               list_add_tail(&fl->filters[j++].link, &fl->head);
+               fl->count++;
        }
-       if (list_empty(&fl->head))
+       if (!fl->count)
                kfree(fl);
        else
                *pfl = fl;
@@ -1530,180 +1508,96 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
 #define VFPF_MAC_FILTER                VFPF_Q_FILTER_DEST_MAC_VALID
 #define VFPF_VLAN_FILTER       VFPF_Q_FILTER_VLAN_TAG_VALID
 
-static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
-       int rc;
+       int rc = 0;
 
        struct vfpf_set_q_filters_tlv *msg =
                &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
 
-       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
-       enum bnx2x_vfop_filters_state state = vfop->state;
-
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vfop_mbx_qfilters,
-               .block = false,
-       };
-
-       DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
-
-       if (vfop->rc < 0)
-               goto op_err;
+       /* check for any mac/vlan changes */
+       if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+               /* build mac list */
+               struct bnx2x_vf_mac_vlan_filters *fl = NULL;
 
-       switch (state) {
-       case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
-               /* next state */
-               vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
+               rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+                                              VFPF_MAC_FILTER);
+               if (rc)
+                       goto op_err;
 
-               /* check for any vlan/mac changes */
-               if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
-                       /* build mac list */
-                       struct bnx2x_vfop_filters *fl = NULL;
+               if (fl) {
 
-                       vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-                                                            VFPF_MAC_FILTER);
-                       if (vfop->rc)
+                       /* set mac list */
+                       rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+                                                          msg->vf_qid,
+                                                          false);
+                       if (rc)
                                goto op_err;
-
-                       if (fl) {
-                               /* set mac list */
-                               rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
-                                                            msg->vf_qid,
-                                                            false);
-                               if (rc) {
-                                       vfop->rc = rc;
-                                       goto op_err;
-                               }
-                               return;
-                       }
                }
-               /* fall through */
 
-       case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
-               /* next state */
-               vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
+               /* build vlan list */
+               fl = NULL;
 
-               /* check for any vlan/mac changes */
-               if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
-                       /* build vlan list */
-                       struct bnx2x_vfop_filters *fl = NULL;
-
-                       vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-                                                            VFPF_VLAN_FILTER);
-                       if (vfop->rc)
+               rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+                                              VFPF_VLAN_FILTER);
+               if (rc)
+                       goto op_err;
+
+               if (fl) {
+                       /* set vlan list */
+                       rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+                                                          msg->vf_qid,
+                                                          false);
+                       if (rc)
                                goto op_err;
-
-                       if (fl) {
-                               /* set vlan list */
-                               rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
-                                                             msg->vf_qid,
-                                                             false);
-                               if (rc) {
-                                       vfop->rc = rc;
-                                       goto op_err;
-                               }
-                               return;
-                       }
                }
-               /* fall through */
-
-       case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
-               /* next state */
-               vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
-
-               if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
-                       unsigned long accept = 0;
-                       struct pf_vf_bulletin_content *bulletin =
-                               BP_VF_BULLETIN(bp, vf->index);
-
-                       /* covert VF-PF if mask to bnx2x accept flags */
-                       if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
-                               __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
-
-                       if (msg->rx_mask &
-                                       VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
-                               __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
-
-                       if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
-                               __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
-
-                       if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
-                               __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
+       }
 
-                       if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
-                               __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+       if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+               unsigned long accept = 0;
+               struct pf_vf_bulletin_content *bulletin =
+                                       BP_VF_BULLETIN(bp, vf->index);
 
-                       /* A packet arriving the vf's mac should be accepted
-                        * with any vlan, unless a vlan has already been
-                        * configured.
-                        */
-                       if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
-                               __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
-
-                       /* set rx-mode */
-                       rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
-                                                  msg->vf_qid, accept);
-                       if (rc) {
-                               vfop->rc = rc;
-                               goto op_err;
-                       }
-                       return;
+               /* Ignore VF requested mode; instead set a regular mode */
+               if (msg->rx_mask !=  VFPF_RX_MASK_ACCEPT_NONE) {
+                       __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+                       __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+                       __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
                }
-               /* fall through */
-
-       case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
-               /* next state */
-               vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
-
-               if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
-                       /* set mcasts */
-                       rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
-                                                 msg->n_multicast, false);
-                       if (rc) {
-                               vfop->rc = rc;
-                               goto op_err;
-                       }
-                       return;
-               }
-               /* fall through */
-op_done:
-       case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
-               bnx2x_vfop_end(bp, vf, vfop);
-               return;
-op_err:
-       BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
-                 vf->abs_vfid, msg->vf_qid, vfop->rc);
-       goto op_done;
 
-       default:
-               bnx2x_vfop_default(state);
+               /* A packet arriving the vf's mac should be accepted
+                * with any vlan, unless a vlan has already been
+                * configured.
+                */
+               if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+               /* set rx-mode */
+               rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
+               if (rc)
+                       goto op_err;
        }
-}
 
-static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
-                                       struct bnx2x_virtf *vf,
-                                       struct bnx2x_vfop_cmd *cmd)
-{
-       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
-       if (vfop) {
-               bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
-                                bnx2x_vfop_mbx_qfilters, cmd->done);
-               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
-                                            cmd->block);
+       if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+               /* set mcasts */
+               rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
+                                   msg->n_multicast, false);
+               if (rc)
+                       goto op_err;
        }
-       return -ENOMEM;
+op_err:
+       if (rc)
+               BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+                         vf->abs_vfid, msg->vf_qid, rc);
+       return rc;
 }
 
-static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
-                                      struct bnx2x_virtf *vf,
-                                      struct bnx2x_vf_mbx *mbx)
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+                                     struct bnx2x_virtf *vf,
+                                     struct vfpf_set_q_filters_tlv *filters)
 {
-       struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
        struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vf_mbx_resp,
-               .block = false,
-       };
+       int rc = 0;
 
        /* if a mac was already set for this VF via the set vf mac ndo, we only
         * accept mac configurations of that mac. Why accept them at all?
@@ -1715,7 +1609,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
                if (filters->n_mac_vlan_filters > 1) {
                        BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
                                  vf->abs_vfid);
-                       vf->op_rc = -EPERM;
+                       rc = -EPERM;
                        goto response;
                }
 
@@ -1725,10 +1619,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
                        BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
                                  vf->abs_vfid);
 
-                       vf->op_rc = -EPERM;
+                       rc = -EPERM;
                        goto response;
                }
        }
+
+response:
+       return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+                                      struct bnx2x_virtf *vf,
+                                      struct vfpf_set_q_filters_tlv *filters)
+{
+       struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+       int rc = 0;
+
        /* if vlan was set by hypervisor we don't allow guest to config vlan */
        if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
                int i;
@@ -1739,14 +1645,35 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
                            VFPF_Q_FILTER_VLAN_TAG_VALID) {
                                BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
                                          vf->abs_vfid);
-                               vf->op_rc = -EPERM;
+                               rc = -EPERM;
                                goto response;
                        }
                }
        }
 
        /* verify vf_qid */
-       if (filters->vf_qid > vf_rxq_count(vf))
+       if (filters->vf_qid > vf_rxq_count(vf)) {
+               rc = -EPERM;
+               goto response;
+       }
+
+response:
+       return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+                                      struct bnx2x_virtf *vf,
+                                      struct bnx2x_vf_mbx *mbx)
+{
+       struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+       int rc;
+
+       rc = bnx2x_filters_validate_mac(bp, vf, filters);
+       if (rc)
+               goto response;
+
+       rc = bnx2x_filters_validate_vlan(bp, vf, filters);
+       if (rc)
                goto response;
 
        DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1756,125 +1683,169 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
        /* print q_filter message */
        bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
 
-       vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
-       if (vf->op_rc)
-               goto response;
-       return;
-
+       rc = bnx2x_vf_mbx_qfilters(bp, vf);
 response:
-       bnx2x_vf_mbx_resp(bp, vf);
+       bnx2x_vf_mbx_resp(bp, vf, rc);
 }
 
 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                    struct bnx2x_vf_mbx *mbx)
 {
        int qid = mbx->msg->req.q_op.vf_qid;
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vf_mbx_resp,
-               .block = false,
-       };
+       int rc;
 
        DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
           vf->abs_vfid, qid);
 
-       vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
-       if (vf->op_rc)
-               bnx2x_vf_mbx_resp(bp, vf);
+       rc = bnx2x_vf_queue_teardown(bp, vf, qid);
+       bnx2x_vf_mbx_resp(bp, vf, rc);
 }
 
 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                  struct bnx2x_vf_mbx *mbx)
 {
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vf_mbx_resp,
-               .block = false,
-       };
+       int rc;
 
        DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
 
-       vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
-       if (vf->op_rc)
-               bnx2x_vf_mbx_resp(bp, vf);
+       rc = bnx2x_vf_close(bp, vf);
+       bnx2x_vf_mbx_resp(bp, vf, rc);
 }
 
 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                    struct bnx2x_vf_mbx *mbx)
 {
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vf_mbx_resp,
-               .block = false,
-       };
+       int rc;
 
        DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
 
-       vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
-       if (vf->op_rc)
-               bnx2x_vf_mbx_resp(bp, vf);
+       rc = bnx2x_vf_free(bp, vf);
+       bnx2x_vf_mbx_resp(bp, vf, rc);
 }
 
 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                    struct bnx2x_vf_mbx *mbx)
 {
-       struct bnx2x_vfop_cmd cmd = {
-               .done = bnx2x_vf_mbx_resp,
-               .block = false,
-       };
-       struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
+       struct bnx2x_config_rss_params rss;
        struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+       int rc = 0;
 
        if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
            rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
                BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
                          vf->index);
-               vf->op_rc = -EINVAL;
+               rc = -EINVAL;
                goto mbx_resp;
        }
 
+       memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
+
        /* set vfop params according to rss tlv */
-       memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
+       memcpy(rss.ind_table, rss_tlv->ind_table,
               T_ETH_INDIRECTION_TABLE_SIZE);
-       memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
-              sizeof(rss_tlv->rss_key));
-       vf_op_params->rss_obj = &vf->rss_conf_obj;
-       vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
+       memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
+       rss.rss_obj = &vf->rss_conf_obj;
+       rss.rss_result_mask = rss_tlv->rss_result_mask;
 
        /* flags handled individually for backward/forward compatability */
-       vf_op_params->rss_flags = 0;
-       vf_op_params->ramrod_flags = 0;
+       rss.rss_flags = 0;
+       rss.ramrod_flags = 0;
 
        if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
-               __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
-               __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
-               __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
-               __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
-               __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
-               __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
-               __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
-               __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
        if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
-               __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
+               __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
 
        if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
             rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
            (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
             rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
                BNX2X_ERR("about to hit a FW assert. aborting...\n");
-               vf->op_rc = -EINVAL;
+               rc = -EINVAL;
                goto mbx_resp;
        }
 
-       vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
+       rc = bnx2x_vf_rss_update(bp, vf, &rss);
+mbx_resp:
+       bnx2x_vf_mbx_resp(bp, vf, rc);
+}
+
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+                                      struct vfpf_tpa_tlv *tpa_tlv)
+{
+       int rc = 0;
+
+       if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+           U_ETH_MAX_SGES_FOR_PACKET) {
+               rc = -EINVAL;
+               BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+                         tpa_tlv->tpa_client_info.max_sges_for_packet,
+                         U_ETH_MAX_SGES_FOR_PACKET);
+       }
+
+       if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+               rc = -EINVAL;
+               BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+                         tpa_tlv->tpa_client_info.max_tpa_queues,
+                         MAX_AGG_QS(bp));
+       }
+
+       return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                   struct bnx2x_vf_mbx *mbx)
+{
+       struct bnx2x_queue_update_tpa_params vf_op_params;
+       struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+       int rc = 0;
+
+       memset(&vf_op_params, 0, sizeof(vf_op_params));
+
+       if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+               goto mbx_resp;
+
+       vf_op_params.complete_on_both_clients =
+               tpa_tlv->tpa_client_info.complete_on_both_clients;
+       vf_op_params.dont_verify_thr =
+               tpa_tlv->tpa_client_info.dont_verify_thr;
+       vf_op_params.max_agg_sz =
+               tpa_tlv->tpa_client_info.max_agg_size;
+       vf_op_params.max_sges_pkt =
+               tpa_tlv->tpa_client_info.max_sges_for_packet;
+       vf_op_params.max_tpa_queues =
+               tpa_tlv->tpa_client_info.max_tpa_queues;
+       vf_op_params.sge_buff_sz =
+               tpa_tlv->tpa_client_info.sge_buff_size;
+       vf_op_params.sge_pause_thr_high =
+               tpa_tlv->tpa_client_info.sge_pause_thr_high;
+       vf_op_params.sge_pause_thr_low =
+               tpa_tlv->tpa_client_info.sge_pause_thr_low;
+       vf_op_params.tpa_mode =
+               tpa_tlv->tpa_client_info.tpa_mode;
+       vf_op_params.update_ipv4 =
+               tpa_tlv->tpa_client_info.update_ipv4;
+       vf_op_params.update_ipv6 =
+               tpa_tlv->tpa_client_info.update_ipv6;
+
+       rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
 
 mbx_resp:
-       if (vf->op_rc)
-               bnx2x_vf_mbx_resp(bp, vf);
+       bnx2x_vf_mbx_resp(bp, vf, rc);
 }
 
 /* dispatch request */
@@ -1916,6 +1887,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                case CHANNEL_TLV_UPDATE_RSS:
                        bnx2x_vf_mbx_update_rss(bp, vf, mbx);
                        return;
+               case CHANNEL_TLV_UPDATE_TPA:
+                       bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+                       return;
                }
 
        } else {
@@ -1935,11 +1909,8 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
 
        /* can we respond to VF (do we have an address for it?) */
        if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
-               /* mbx_resp uses the op_rc of the VF */
-               vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
-
                /* notify the VF that we do not support this request */
-               bnx2x_vf_mbx_resp(bp, vf);
+               bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
        } else {
                /* can't send a response since this VF is unknown to us
                 * just ack the FW to release the mailbox and unlock
@@ -1952,13 +1923,10 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
        }
 }
 
-/* handle new vf-pf message */
-void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
+void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
+                          struct vf_pf_event_data *vfpf_event)
 {
-       struct bnx2x_virtf *vf;
-       struct bnx2x_vf_mbx *mbx;
        u8 vf_idx;
-       int rc;
 
        DP(BNX2X_MSG_IOV,
           "vf pf event received: vfid %d, address_hi %x, address lo %x",
@@ -1970,50 +1938,73 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
            BNX2X_NR_VIRTFN(bp)) {
                BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
                          vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
-               goto mbx_done;
+               return;
        }
+
        vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
-       mbx = BP_VF_MBX(bp, vf_idx);
 
-       /* verify an event is not currently being processed -
-        * debug failsafe only
-        */
-       if (mbx->flags & VF_MSG_INPROCESS) {
-               BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
-                         vfpf_event->vf_id);
-               goto mbx_done;
-       }
-       vf = BP_VF(bp, vf_idx);
+       /* Update VFDB with current message and schedule its handling */
+       mutex_lock(&BP_VFDB(bp)->event_mutex);
+       BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
+       BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
+       BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
+       mutex_unlock(&BP_VFDB(bp)->event_mutex);
 
-       /* save the VF message address */
-       mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
-       mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
-       DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
-          mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+       bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
+}
 
-       /* dmae to get the VF request */
-       rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
-                                 mbx->vf_addr_hi, mbx->vf_addr_lo,
-                                 sizeof(union vfpf_tlvs)/4);
-       if (rc) {
-               BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
-               goto mbx_error;
-       }
+/* handle new vf-pf messages */
+void bnx2x_vf_mbx(struct bnx2x *bp)
+{
+       struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
+       u64 events;
+       u8 vf_idx;
+       int rc;
 
-       /* process the VF message header */
-       mbx->first_tlv = mbx->msg->req.first_tlv;
+       if (!vfdb)
+               return;
 
-       /* Clean response buffer to refrain from falsely seeing chains */
-       memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+       mutex_lock(&vfdb->event_mutex);
+       events = vfdb->event_occur;
+       vfdb->event_occur = 0;
+       mutex_unlock(&vfdb->event_mutex);
 
-       /* dispatch the request (will prepare the response) */
-       bnx2x_vf_mbx_request(bp, vf, mbx);
-       goto mbx_done;
+       for_each_vf(bp, vf_idx) {
+               struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
+               struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
 
-mbx_error:
-       bnx2x_vf_release(bp, vf, false); /* non blocking */
-mbx_done:
-       return;
+               /* Handle VFs which have pending events */
+               if (!(events & (1ULL << vf_idx)))
+                       continue;
+
+               DP(BNX2X_MSG_IOV,
+                  "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
+                  vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
+                  mbx->first_tlv.resp_msg_offset);
+
+               /* dmae to get the VF request */
+               rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
+                                         vf->abs_vfid, mbx->vf_addr_hi,
+                                         mbx->vf_addr_lo,
+                                         sizeof(union vfpf_tlvs)/4);
+               if (rc) {
+                       BNX2X_ERR("Failed to copy request VF %d\n",
+                                 vf->abs_vfid);
+                       bnx2x_vf_release(bp, vf);
+                       return;
+               }
+
+               /* process the VF message header */
+               mbx->first_tlv = mbx->msg->req.first_tlv;
+
+               /* Clean response buffer to refrain from falsely
+                * seeing chains.
+                */
+               memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
+               /* dispatch the request (will prepare the response) */
+               bnx2x_vf_mbx_request(bp, vf, mbx);
+       }
 }
 
 /* propagate local bulletin board to vf */
index 208568bc7a71d72d8e19aea70f02485e17c0e8ee..c922b81170e5bc20c4ff69d34d16d40f853c9d8a 100644 (file)
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
 #define PFVF_CAP_RSS           0x00000001
 #define PFVF_CAP_DHC           0x00000002
 #define PFVF_CAP_TPA           0x00000004
+#define PFVF_CAP_TPA_UPDATE    0x00000008
                char fw_ver[32];
                u16 db_size;
                u8  indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
        u32 rx_mask;    /* see mask constants at the top of the file */
 };
 
+struct vfpf_tpa_tlv {
+       struct vfpf_first_tlv   first_tlv;
+
+       struct vf_pf_tpa_client_info {
+               aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+               u8 update_ipv4;
+               u8 update_ipv6;
+               u8 max_tpa_queues;
+               u8 max_sges_for_packet;
+               u8 complete_on_both_clients;
+               u8 dont_verify_thr;
+               u8 tpa_mode;
+               u16 sge_buff_size;
+               u16 max_agg_size;
+               u16 sge_pause_thr_low;
+               u16 sge_pause_thr_high;
+       } tpa_client_info;
+};
+
 /* close VF (disable VF) */
 struct vfpf_close_tlv {
        struct vfpf_first_tlv   first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
        struct vfpf_set_q_filters_tlv   set_q_filters;
        struct vfpf_release_tlv         release;
        struct vfpf_rss_tlv             update_rss;
+       struct vfpf_tpa_tlv             update_tpa;
        struct channel_list_end_tlv     list_end;
        struct tlv_buffer_size          tlv_buf_size;
 };
@@ -405,6 +426,7 @@ enum channel_tlvs {
        CHANNEL_TLV_PF_SET_VLAN,
        CHANNEL_TLV_UPDATE_RSS,
        CHANNEL_TLV_PHYS_PORT_ID,
+       CHANNEL_TLV_UPDATE_TPA,
        CHANNEL_TLV_MAX
 };
 
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644 (file)
index 0000000..31f55a9
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMGENET) += genet.o
+genet-objs := bcmgenet.o bcmmii.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644 (file)
index 0000000..adf8acb
--- /dev/null
@@ -0,0 +1,2584 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) controller driver
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt)                            "bcmgenet: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include <asm/unaligned.h>
+
+#include "bcmgenet.h"
+
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT       4
+
+/* Default highest priority queue for multi queue support */
+#define GENET_Q0_PRIORITY      0
+
+#define GENET_DEFAULT_BD_CNT   \
+       (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
+
+#define RX_BUF_LENGTH          2048
+#define SKB_ALIGNMENT          32
+
+/* Tx/Rx DMA register offset, skip 256 descriptors */
+#define WORDS_PER_BD(p)                (p->hw_params->words_per_bd)
+#define DMA_DESC_SIZE          (WORDS_PER_BD(priv) * sizeof(u32))
+
+#define GENET_TDMA_REG_OFF     (priv->hw_params->tdma_offset + \
+                               TOTAL_DESC * DMA_DESC_SIZE)
+
+#define GENET_RDMA_REG_OFF     (priv->hw_params->rdma_offset + \
+                               TOTAL_DESC * DMA_DESC_SIZE)
+
+static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
+                                               void __iomem *d, u32 value)
+{
+       __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
+                                               void __iomem *d)
+{
+       return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
+                                   void __iomem *d,
+                                   dma_addr_t addr)
+{
+       __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
+
+       /* Register writes to GISB bus can take couple hundred nanoseconds
+        * and are done for each packet, save these expensive writes unless
+        * the platform is explicitely configured for 64-bits/LPAE.
+        */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       if (priv->hw_params->flags & GENET_HAS_40BITS)
+               __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
+#endif
+}
+
+/* Combined address + length/status setter */
+static inline void dmadesc_set(struct bcmgenet_priv *priv,
+                               void __iomem *d, dma_addr_t addr, u32 val)
+{
+       dmadesc_set_length_status(priv, d, val);
+       dmadesc_set_addr(priv, d, addr);
+}
+
+static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
+                                         void __iomem *d)
+{
+       dma_addr_t addr;
+
+       addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
+
+       /* Register writes to GISB bus can take couple hundred nanoseconds
+        * and are done for each packet, save these expensive writes unless
+        * the platform is explicitely configured for 64-bits/LPAE.
+        */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       if (priv->hw_params->flags & GENET_HAS_40BITS)
+               addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
+#endif
+       return addr;
+}
+
+#define GENET_VER_FMT  "%1d.%1d EPHY: 0x%04x"
+
+#define GENET_MSG_DEFAULT      (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+                               NETIF_MSG_LINK)
+
+static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+       if (GENET_IS_V1(priv))
+               return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
+       else
+               return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
+}
+
+static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+       if (GENET_IS_V1(priv))
+               bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
+       else
+               bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
+}
+
+/* These macros are defined to deal with register map change
+ * between GENET1.1 and GENET2. Only those currently being used
+ * by driver are defined.
+ */
+static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+       if (GENET_IS_V1(priv))
+               return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
+       else
+               return __raw_readl(priv->base +
+                               priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+       if (GENET_IS_V1(priv))
+               bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
+       else
+               __raw_writel(val, priv->base +
+                               priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
+{
+       if (GENET_IS_V1(priv))
+               return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
+       else
+               return __raw_readl(priv->base +
+                               priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
+{
+       if (GENET_IS_V1(priv))
+               bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
+       else
+               __raw_writel(val, priv->base +
+                               priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+/* RX/TX DMA register accessors */
+enum dma_reg {
+       DMA_RING_CFG = 0,
+       DMA_CTRL,
+       DMA_STATUS,
+       DMA_SCB_BURST_SIZE,
+       DMA_ARB_CTRL,
+       DMA_PRIORITY,
+       DMA_RING_PRIORITY,
+};
+
+static const u8 bcmgenet_dma_regs_v3plus[] = {
+       [DMA_RING_CFG]          = 0x00,
+       [DMA_CTRL]              = 0x04,
+       [DMA_STATUS]            = 0x08,
+       [DMA_SCB_BURST_SIZE]    = 0x0C,
+       [DMA_ARB_CTRL]          = 0x2C,
+       [DMA_PRIORITY]          = 0x30,
+       [DMA_RING_PRIORITY]     = 0x38,
+};
+
+static const u8 bcmgenet_dma_regs_v2[] = {
+       [DMA_RING_CFG]          = 0x00,
+       [DMA_CTRL]              = 0x04,
+       [DMA_STATUS]            = 0x08,
+       [DMA_SCB_BURST_SIZE]    = 0x0C,
+       [DMA_ARB_CTRL]          = 0x30,
+       [DMA_PRIORITY]          = 0x34,
+       [DMA_RING_PRIORITY]     = 0x3C,
+};
+
+static const u8 bcmgenet_dma_regs_v1[] = {
+       [DMA_CTRL]              = 0x00,
+       [DMA_STATUS]            = 0x04,
+       [DMA_SCB_BURST_SIZE]    = 0x0C,
+       [DMA_ARB_CTRL]          = 0x30,
+       [DMA_PRIORITY]          = 0x34,
+       [DMA_RING_PRIORITY]     = 0x3C,
+};
+
+/* Set at runtime once bcmgenet version is known */
+static const u8 *bcmgenet_dma_regs;
+
+static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
+{
+       return netdev_priv(dev_get_drvdata(dev));
+}
+
+static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
+                                       enum dma_reg r)
+{
+       return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+                       DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
+                                       u32 val, enum dma_reg r)
+{
+       __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+                       DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
+                                       enum dma_reg r)
+{
+       return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+                       DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
+                                       u32 val, enum dma_reg r)
+{
+       __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+                       DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+/* RDMA/TDMA ring registers and accessors
+ * we merge the common fields and just prefix with T/D the registers
+ * having different meaning depending on the direction
+ */
+enum dma_ring_reg {
+       TDMA_READ_PTR = 0,
+       RDMA_WRITE_PTR = TDMA_READ_PTR,
+       TDMA_READ_PTR_HI,
+       RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
+       TDMA_CONS_INDEX,
+       RDMA_PROD_INDEX = TDMA_CONS_INDEX,
+       TDMA_PROD_INDEX,
+       RDMA_CONS_INDEX = TDMA_PROD_INDEX,
+       DMA_RING_BUF_SIZE,
+       DMA_START_ADDR,
+       DMA_START_ADDR_HI,
+       DMA_END_ADDR,
+       DMA_END_ADDR_HI,
+       DMA_MBUF_DONE_THRESH,
+       TDMA_FLOW_PERIOD,
+       RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
+       TDMA_WRITE_PTR,
+       RDMA_READ_PTR = TDMA_WRITE_PTR,
+       TDMA_WRITE_PTR_HI,
+       RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
+};
+
+/* GENET v4 supports 40-bits pointer addressing
+ * for obvious reasons the LO and HI word parts
+ * are contiguous, but this offsets the other
+ * registers.
+ */
+static const u8 genet_dma_ring_regs_v4[] = {
+       [TDMA_READ_PTR]                 = 0x00,
+       [TDMA_READ_PTR_HI]              = 0x04,
+       [TDMA_CONS_INDEX]               = 0x08,
+       [TDMA_PROD_INDEX]               = 0x0C,
+       [DMA_RING_BUF_SIZE]             = 0x10,
+       [DMA_START_ADDR]                = 0x14,
+       [DMA_START_ADDR_HI]             = 0x18,
+       [DMA_END_ADDR]                  = 0x1C,
+       [DMA_END_ADDR_HI]               = 0x20,
+       [DMA_MBUF_DONE_THRESH]          = 0x24,
+       [TDMA_FLOW_PERIOD]              = 0x28,
+       [TDMA_WRITE_PTR]                = 0x2C,
+       [TDMA_WRITE_PTR_HI]             = 0x30,
+};
+
+static const u8 genet_dma_ring_regs_v123[] = {
+       [TDMA_READ_PTR]                 = 0x00,
+       [TDMA_CONS_INDEX]               = 0x04,
+       [TDMA_PROD_INDEX]               = 0x08,
+       [DMA_RING_BUF_SIZE]             = 0x0C,
+       [DMA_START_ADDR]                = 0x10,
+       [DMA_END_ADDR]                  = 0x14,
+       [DMA_MBUF_DONE_THRESH]          = 0x18,
+       [TDMA_FLOW_PERIOD]              = 0x1C,
+       [TDMA_WRITE_PTR]                = 0x20,
+};
+
+/* Set at runtime once GENET version is known */
+static const u8 *genet_dma_ring_regs;
+
+static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
+                                               unsigned int ring,
+                                               enum dma_ring_reg r)
+{
+       return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+                       (DMA_RING_SIZE * ring) +
+                       genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
+                                               unsigned int ring,
+                                               u32 val,
+                                               enum dma_ring_reg r)
+{
+       __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+                       (DMA_RING_SIZE * ring) +
+                       genet_dma_ring_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
+                                               unsigned int ring,
+                                               enum dma_ring_reg r)
+{
+       return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+                       (DMA_RING_SIZE * ring) +
+                       genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
+                                               unsigned int ring,
+                                               u32 val,
+                                               enum dma_ring_reg r)
+{
+       __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+                       (DMA_RING_SIZE * ring) +
+                       genet_dma_ring_regs[r]);
+}
+
+static int bcmgenet_get_settings(struct net_device *dev,
+               struct ethtool_cmd *cmd)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       if (!priv->phydev)
+               return -ENODEV;
+
+       return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_settings(struct net_device *dev,
+               struct ethtool_cmd *cmd)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       if (!priv->phydev)
+               return -ENODEV;
+
+       return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_rx_csum(struct net_device *dev,
+                               netdev_features_t wanted)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 rbuf_chk_ctrl;
+       bool rx_csum_en;
+
+       rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+
+       rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+
+       /* enable rx checksumming */
+       if (rx_csum_en)
+               rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+       else
+               rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
+       priv->desc_rxchk_en = rx_csum_en;
+
+       /* If UniMAC forwards CRC, we need to skip over it to get
+        * a valid CHK bit to be set in the per-packet status word
+       */
+       if (rx_csum_en && priv->crc_fwd_en)
+               rbuf_chk_ctrl |= RBUF_SKIP_FCS;
+       else
+               rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
+
+       bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
+
+       return 0;
+}
+
+static int bcmgenet_set_tx_csum(struct net_device *dev,
+                               netdev_features_t wanted)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       bool desc_64b_en;
+       u32 tbuf_ctrl, rbuf_ctrl;
+
+       tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
+       rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+
+       desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+
+       /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
+       if (desc_64b_en) {
+               tbuf_ctrl |= RBUF_64B_EN;
+               rbuf_ctrl |= RBUF_64B_EN;
+       } else {
+               tbuf_ctrl &= ~RBUF_64B_EN;
+               rbuf_ctrl &= ~RBUF_64B_EN;
+       }
+       priv->desc_64b_en = desc_64b_en;
+
+       bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
+       bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
+
+       return 0;
+}
+
+static int bcmgenet_set_features(struct net_device *dev,
+               netdev_features_t features)
+{
+       netdev_features_t changed = features ^ dev->features;
+       netdev_features_t wanted = dev->wanted_features;
+       int ret = 0;
+
+       if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+               ret = bcmgenet_set_tx_csum(dev, wanted);
+       if (changed & (NETIF_F_RXCSUM))
+               ret = bcmgenet_set_rx_csum(dev, wanted);
+
+       return ret;
+}
+
+static u32 bcmgenet_get_msglevel(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       return priv->msg_enable;
+}
+
+static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       priv->msg_enable = level;
+}
+
+/* standard ethtool support functions. */
+enum bcmgenet_stat_type {
+       BCMGENET_STAT_NETDEV = -1,
+       BCMGENET_STAT_MIB_RX,
+       BCMGENET_STAT_MIB_TX,
+       BCMGENET_STAT_RUNT,
+       BCMGENET_STAT_MISC,
+};
+
+struct bcmgenet_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int stat_sizeof;
+       int stat_offset;
+       enum bcmgenet_stat_type type;
+       /* reg offset from UMAC base for misc counters */
+       u16 reg_offset;
+};
+
+#define STAT_NETDEV(m) { \
+       .stat_string = __stringify(m), \
+       .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+       .stat_offset = offsetof(struct net_device_stats, m), \
+       .type = BCMGENET_STAT_NETDEV, \
+}
+
+#define STAT_GENET_MIB(str, m, _type) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcmgenet_priv, m), \
+       .type = _type, \
+}
+
+#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+
+#define STAT_GENET_MISC(str, m, offset) { \
+       .stat_string = str, \
+       .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+       .stat_offset = offsetof(struct bcmgenet_priv, m), \
+       .type = BCMGENET_STAT_MISC, \
+       .reg_offset = offset, \
+}
+
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define BCMGENET_STAT_OFFSET   0xc
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+       /* general stats */
+       STAT_NETDEV(rx_packets),
+       STAT_NETDEV(tx_packets),
+       STAT_NETDEV(rx_bytes),
+       STAT_NETDEV(tx_bytes),
+       STAT_NETDEV(rx_errors),
+       STAT_NETDEV(tx_errors),
+       STAT_NETDEV(rx_dropped),
+       STAT_NETDEV(tx_dropped),
+       STAT_NETDEV(multicast),
+       /* UniMAC RSV counters */
+       STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+       STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+       STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+       STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+       STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+       STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+       STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+       STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+       STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+       STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+       STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
+       STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
+       STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
+       STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
+       STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
+       STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
+       STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
+       STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
+       STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
+       STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
+       STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
+       STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
+       STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
+       STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
+       STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
+       STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
+       STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
+       STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
+       STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
+       /* UniMAC TSV counters */
+       STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+       STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+       STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+       STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+       STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+       STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+       STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+       STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+       STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+       STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+       STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
+       STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
+       STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
+       STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
+       STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
+       STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
+       STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
+       STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
+       STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
+       STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
+       STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
+       STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
+       STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
+       STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
+       STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
+       STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
+       STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
+       STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
+       STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
+       /* UniMAC RUNT counters */
+       STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+       STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+       STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+       STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+       /* Misc UniMAC counters */
+       STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
+                       UMAC_RBUF_OVFL_CNT),
+       STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+       STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+};
+
+#define BCMGENET_STATS_LEN     ARRAY_SIZE(bcmgenet_gstrings_stats)
+
+static void bcmgenet_get_drvinfo(struct net_device *dev,
+               struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+       strlcpy(info->version, "v2.0", sizeof(info->version));
+       info->n_stats = BCMGENET_STATS_LEN;
+
+}
+
+static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
+{
+       switch (string_set) {
+       case ETH_SS_STATS:
+               return BCMGENET_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void bcmgenet_get_strings(struct net_device *dev,
+                               u32 stringset, u8 *data)
+{
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+                       memcpy(data + i * ETH_GSTRING_LEN,
+                               bcmgenet_gstrings_stats[i].stat_string,
+                               ETH_GSTRING_LEN);
+               }
+               break;
+       }
+}
+
+static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+{
+       int i, j = 0;
+
+       for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+               const struct bcmgenet_stats *s;
+               u8 offset = 0;
+               u32 val = 0;
+               char *p;
+
+               s = &bcmgenet_gstrings_stats[i];
+               switch (s->type) {
+               case BCMGENET_STAT_NETDEV:
+                       continue;
+               case BCMGENET_STAT_MIB_RX:
+               case BCMGENET_STAT_MIB_TX:
+               case BCMGENET_STAT_RUNT:
+                       if (s->type != BCMGENET_STAT_MIB_RX)
+                               offset = BCMGENET_STAT_OFFSET;
+                       val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
+                                                               j + offset);
+                       break;
+               case BCMGENET_STAT_MISC:
+                       val = bcmgenet_umac_readl(priv, s->reg_offset);
+                       /* clear if overflowed */
+                       if (val == ~0)
+                               bcmgenet_umac_writel(priv, 0, s->reg_offset);
+                       break;
+               }
+
+               j += s->stat_sizeof;
+               p = (char *)priv + s->stat_offset;
+               *(u32 *)p = val;
+       }
+}
+
+static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+                                       struct ethtool_stats *stats,
+                                       u64 *data)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int i;
+
+       if (netif_running(dev))
+               bcmgenet_update_mib_counters(priv);
+
+       for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+               const struct bcmgenet_stats *s;
+               char *p;
+
+               s = &bcmgenet_gstrings_stats[i];
+               if (s->type == BCMGENET_STAT_NETDEV)
+                       p = (char *)&dev->stats;
+               else
+                       p = (char *)priv;
+               p += s->stat_offset;
+               data[i] = *(u32 *)p;
+       }
+}
+
+/* standard ethtool support functions. */
+static struct ethtool_ops bcmgenet_ethtool_ops = {
+       .get_strings            = bcmgenet_get_strings,
+       .get_sset_count         = bcmgenet_get_sset_count,
+       .get_ethtool_stats      = bcmgenet_get_ethtool_stats,
+       .get_settings           = bcmgenet_get_settings,
+       .set_settings           = bcmgenet_set_settings,
+       .get_drvinfo            = bcmgenet_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_msglevel           = bcmgenet_get_msglevel,
+       .set_msglevel           = bcmgenet_set_msglevel,
+};
+
+/* Power down the unimac, based on mode. */
+static void bcmgenet_power_down(struct bcmgenet_priv *priv,
+                               enum bcmgenet_power_mode mode)
+{
+       u32 reg;
+
+       switch (mode) {
+       case GENET_POWER_CABLE_SENSE:
+               phy_detach(priv->phydev);
+               break;
+
+       case GENET_POWER_PASSIVE:
+               /* Power down LED */
+               bcmgenet_mii_reset(priv->dev);
+               if (priv->hw_params->flags & GENET_HAS_EXT) {
+                       reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+                       reg |= (EXT_PWR_DOWN_PHY |
+                               EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+                       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static void bcmgenet_power_up(struct bcmgenet_priv *priv,
+                               enum bcmgenet_power_mode mode)
+{
+       u32 reg;
+
+       if (!(priv->hw_params->flags & GENET_HAS_EXT))
+               return;
+
+       reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+
+       switch (mode) {
+       case GENET_POWER_PASSIVE:
+               reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
+                               EXT_PWR_DOWN_BIAS);
+               /* fallthrough */
+       case GENET_POWER_CABLE_SENSE:
+               /* enable APD */
+               reg |= EXT_PWR_DN_EN_LD;
+               break;
+       default:
+               break;
+       }
+
+       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+       bcmgenet_mii_reset(priv->dev);
+}
+
+/* ioctl handle special commands that are not present in ethtool. */
+static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int val = 0;
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       switch (cmd) {
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               if (!priv->phydev)
+                       val = -ENODEV;
+               else
+                       val = phy_mii_ioctl(priv->phydev, rq, cmd);
+               break;
+
+       default:
+               val = -EINVAL;
+               break;
+       }
+
+       return val;
+}
+
+static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
+                                        struct bcmgenet_tx_ring *ring)
+{
+       struct enet_cb *tx_cb_ptr;
+
+       tx_cb_ptr = ring->cbs;
+       tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+       tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
+       /* Advancing local write pointer */
+       if (ring->write_ptr == ring->end_ptr)
+               ring->write_ptr = ring->cb_ptr;
+       else
+               ring->write_ptr++;
+
+       return tx_cb_ptr;
+}
+
+/* Simple helper to free a control block's resources */
+static void bcmgenet_free_cb(struct enet_cb *cb)
+{
+       dev_kfree_skb_any(cb->skb);
+       cb->skb = NULL;
+       dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
+                                                 struct bcmgenet_tx_ring *ring)
+{
+       bcmgenet_intrl2_0_writel(priv,
+                       UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+                       INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
+                                                struct bcmgenet_tx_ring *ring)
+{
+       bcmgenet_intrl2_0_writel(priv,
+                       UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+                       INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
+                                               struct bcmgenet_tx_ring *ring)
+{
+       bcmgenet_intrl2_1_writel(priv,
+                       (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+       priv->int1_mask &= ~(1 << ring->index);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
+                                               struct bcmgenet_tx_ring *ring)
+{
+       bcmgenet_intrl2_1_writel(priv,
+                       (1 << ring->index), INTRL2_CPU_MASK_SET);
+       priv->int1_mask |= (1 << ring->index);
+}
+
+/* Unlocked version of the reclaim routine */
+static void __bcmgenet_tx_reclaim(struct net_device *dev,
+                               struct bcmgenet_tx_ring *ring)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int last_tx_cn, last_c_index, num_tx_bds;
+       struct enet_cb *tx_cb_ptr;
+       struct netdev_queue *txq;
+       unsigned int c_index;
+
+       /* Compute how many buffers are transmited since last xmit call */
+       c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+       txq = netdev_get_tx_queue(dev, ring->queue);
+
+       last_c_index = ring->c_index;
+       num_tx_bds = ring->size;
+
+       c_index &= (num_tx_bds - 1);
+
+       if (c_index >= last_c_index)
+               last_tx_cn = c_index - last_c_index;
+       else
+               last_tx_cn = num_tx_bds - last_c_index + c_index;
+
+       netif_dbg(priv, tx_done, dev,
+                       "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+                       __func__, ring->index,
+                       c_index, last_tx_cn, last_c_index);
+
+       /* Reclaim transmitted buffers */
+       while (last_tx_cn-- > 0) {
+               tx_cb_ptr = ring->cbs + last_c_index;
+               if (tx_cb_ptr->skb) {
+                       dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+                       dma_unmap_single(&dev->dev,
+                                       dma_unmap_addr(tx_cb_ptr, dma_addr),
+                                       tx_cb_ptr->skb->len,
+                                       DMA_TO_DEVICE);
+                       bcmgenet_free_cb(tx_cb_ptr);
+               } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+                       dev->stats.tx_bytes +=
+                               dma_unmap_len(tx_cb_ptr, dma_len);
+                       dma_unmap_page(&dev->dev,
+                                       dma_unmap_addr(tx_cb_ptr, dma_addr),
+                                       dma_unmap_len(tx_cb_ptr, dma_len),
+                                       DMA_TO_DEVICE);
+                       dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+               }
+               dev->stats.tx_packets++;
+               ring->free_bds += 1;
+
+               last_c_index++;
+               last_c_index &= (num_tx_bds - 1);
+       }
+
+       if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+               ring->int_disable(priv, ring);
+
+       if (netif_tx_queue_stopped(txq))
+               netif_tx_wake_queue(txq);
+
+       ring->c_index = c_index;
+}
+
+static void bcmgenet_tx_reclaim(struct net_device *dev,
+               struct bcmgenet_tx_ring *ring)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ring->lock, flags);
+       __bcmgenet_tx_reclaim(dev, ring);
+       spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int i;
+
+       if (netif_is_multiqueue(dev)) {
+               for (i = 0; i < priv->hw_params->tx_queues; i++)
+                       bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
+       }
+
+       bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+}
+
+/* Transmits a single SKB (either head of a fragment or a single SKB)
+ * caller must hold priv->lock
+ */
+static int bcmgenet_xmit_single(struct net_device *dev,
+                               struct sk_buff *skb,
+                               u16 dma_desc_flags,
+                               struct bcmgenet_tx_ring *ring)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       struct enet_cb *tx_cb_ptr;
+       unsigned int skb_len;
+       dma_addr_t mapping;
+       u32 length_status;
+       int ret;
+
+       tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+       if (unlikely(!tx_cb_ptr))
+               BUG();
+
+       tx_cb_ptr->skb = skb;
+
+       skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+
+       mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+       ret = dma_mapping_error(kdev, mapping);
+       if (ret) {
+               netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
+               dev_kfree_skb(skb);
+               return ret;
+       }
+
+       dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+       dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+       length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+                       (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+                       DMA_TX_APPEND_CRC;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               length_status |= DMA_TX_DO_CSUM;
+
+       dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
+
+       /* Decrement total BD count and advance our write pointer */
+       ring->free_bds -= 1;
+       ring->prod_index += 1;
+       ring->prod_index &= DMA_P_INDEX_MASK;
+
+       return 0;
+}
+
+/* Transmit a SKB fragement */
+static int bcmgenet_xmit_frag(struct net_device *dev,
+                               skb_frag_t *frag,
+                               u16 dma_desc_flags,
+                               struct bcmgenet_tx_ring *ring)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       struct enet_cb *tx_cb_ptr;
+       dma_addr_t mapping;
+       int ret;
+
+       tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+       if (unlikely(!tx_cb_ptr))
+               BUG();
+       tx_cb_ptr->skb = NULL;
+
+       mapping = skb_frag_dma_map(kdev, frag, 0,
+               skb_frag_size(frag), DMA_TO_DEVICE);
+       ret = dma_mapping_error(kdev, mapping);
+       if (ret) {
+               netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
+                               __func__);
+               return ret;
+       }
+
+       dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+       dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+
+       dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
+                       (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+                       (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+
+
+       ring->free_bds -= 1;
+       ring->prod_index += 1;
+       ring->prod_index &= DMA_P_INDEX_MASK;
+
+       return 0;
+}
+
+/* Reallocate the SKB to put enough headroom in front of it and insert
+ * the transmit checksum offsets in the descriptors
+ */
+static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
+{
+       struct status_64 *status = NULL;
+       struct sk_buff *new_skb;
+       u16 offset;
+       u8 ip_proto;
+       u16 ip_ver;
+       u32 tx_csum_info;
+
+       if (unlikely(skb_headroom(skb) < sizeof(*status))) {
+               /* If 64 byte status block enabled, must make sure skb has
+                * enough headroom for us to insert 64B status block.
+                */
+               new_skb = skb_realloc_headroom(skb, sizeof(*status));
+               dev_kfree_skb(skb);
+               if (!new_skb) {
+                       dev->stats.tx_errors++;
+                       dev->stats.tx_dropped++;
+                       return -ENOMEM;
+               }
+               skb = new_skb;
+       }
+
+       skb_push(skb, sizeof(*status));
+       status = (struct status_64 *)skb->data;
+
+       if (skb->ip_summed  == CHECKSUM_PARTIAL) {
+               ip_ver = htons(skb->protocol);
+               switch (ip_ver) {
+               case ETH_P_IP:
+                       ip_proto = ip_hdr(skb)->protocol;
+                       break;
+               case ETH_P_IPV6:
+                       ip_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       return 0;
+               }
+
+               offset = skb_checksum_start_offset(skb) - sizeof(*status);
+               tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
+                               (offset + skb->csum_offset);
+
+               /* Set the length valid bit for TCP and UDP and just set
+                * the special UDP flag for IPv4, else just set to 0.
+                */
+               if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+                       tx_csum_info |= STATUS_TX_CSUM_LV;
+                       if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+                               tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
+               } else
+                       tx_csum_info = 0;
+
+               status->tx_csum_info = tx_csum_info;
+       }
+
+       return 0;
+}
+
+static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct bcmgenet_tx_ring *ring = NULL;
+       struct netdev_queue *txq;
+       unsigned long flags = 0;
+       int nr_frags, index;
+       u16 dma_desc_flags;
+       int ret;
+       int i;
+
+       index = skb_get_queue_mapping(skb);
+       /* Mapping strategy:
+        * queue_mapping = 0, unclassified, packet xmited through ring16
+        * queue_mapping = 1, goes to ring 0. (highest priority queue
+        * queue_mapping = 2, goes to ring 1.
+        * queue_mapping = 3, goes to ring 2.
+        * queue_mapping = 4, goes to ring 3.
+        */
+       if (index == 0)
+               index = DESC_INDEX;
+       else
+               index -= 1;
+
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       ring = &priv->tx_rings[index];
+       txq = netdev_get_tx_queue(dev, ring->queue);
+
+       spin_lock_irqsave(&ring->lock, flags);
+       if (ring->free_bds <= nr_frags + 1) {
+               netif_tx_stop_queue(txq);
+               netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
+                               __func__, index, ring->queue);
+               ret = NETDEV_TX_BUSY;
+               goto out;
+       }
+
+       /* set the SKB transmit checksum */
+       if (priv->desc_64b_en) {
+               ret = bcmgenet_put_tx_csum(dev, skb);
+               if (ret) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
+       }
+
+       dma_desc_flags = DMA_SOP;
+       if (nr_frags == 0)
+               dma_desc_flags |= DMA_EOP;
+
+       /* Transmit single SKB or head of fragment list */
+       ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
+       if (ret) {
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
+       /* xmit fragment */
+       for (i = 0; i < nr_frags; i++) {
+               ret = bcmgenet_xmit_frag(dev,
+                               &skb_shinfo(skb)->frags[i],
+                               (i == nr_frags - 1) ? DMA_EOP : 0, ring);
+               if (ret) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
+       }
+
+       skb_tx_timestamp(skb);
+
+       /* we kept a software copy of how much we should advance the TDMA
+        * producer index, now write it down to the hardware
+        */
+       bcmgenet_tdma_ring_writel(priv, ring->index,
+                       ring->prod_index, TDMA_PROD_INDEX);
+
+       if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+               netif_tx_stop_queue(txq);
+               ring->int_enable(priv, ring);
+       }
+
+out:
+       spin_unlock_irqrestore(&ring->lock, flags);
+
+       return ret;
+}
+
+
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+                               struct enet_cb *cb)
+{
+       struct device *kdev = &priv->pdev->dev;
+       struct sk_buff *skb;
+       dma_addr_t mapping;
+       int ret;
+
+       skb = netdev_alloc_skb(priv->dev,
+                               priv->rx_buf_len + SKB_ALIGNMENT);
+       if (!skb)
+               return -ENOMEM;
+
+       /* a caller did not release this control block */
+       WARN_ON(cb->skb != NULL);
+       cb->skb = skb;
+       mapping = dma_map_single(kdev, skb->data,
+                       priv->rx_buf_len, DMA_FROM_DEVICE);
+       ret = dma_mapping_error(kdev, mapping);
+       if (ret) {
+               bcmgenet_free_cb(cb);
+               netif_err(priv, rx_err, priv->dev,
+                               "%s DMA map failed\n", __func__);
+               return ret;
+       }
+
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       /* assign packet, prepare descriptor, and advance pointer */
+
+       dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+       /* turn on the newly assigned BD for DMA to use */
+       priv->rx_bd_assign_index++;
+       priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+
+       priv->rx_bd_assign_ptr = priv->rx_bds +
+               (priv->rx_bd_assign_index * DMA_DESC_SIZE);
+
+       return 0;
+}
+
+/* bcmgenet_desc_rx - descriptor based rx process.
+ * this could be called from bottom half, or from NAPI polling method.
+ */
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
+                                    unsigned int budget)
+{
+       struct net_device *dev = priv->dev;
+       struct enet_cb *cb;
+       struct sk_buff *skb;
+       u32 dma_length_status;
+       unsigned long dma_flag;
+       int len, err;
+       unsigned int rxpktprocessed = 0, rxpkttoprocess;
+       unsigned int p_index;
+       unsigned int chksum_ok = 0;
+
+       p_index = bcmgenet_rdma_ring_readl(priv,
+                       DESC_INDEX, RDMA_PROD_INDEX);
+       p_index &= DMA_P_INDEX_MASK;
+
+       if (p_index < priv->rx_c_index)
+               rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
+                       priv->rx_c_index + p_index;
+       else
+               rxpkttoprocess = p_index - priv->rx_c_index;
+
+       netif_dbg(priv, rx_status, dev,
+               "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+
+       while ((rxpktprocessed < rxpkttoprocess) &&
+                       (rxpktprocessed < budget)) {
+
+               /* Unmap the packet contents such that we can use the
+                * RSV from the 64 bytes descriptor when enabled and save
+                * a 32-bits register read
+                */
+               cb = &priv->rx_cbs[priv->rx_read_ptr];
+               skb = cb->skb;
+               dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
+                               priv->rx_buf_len, DMA_FROM_DEVICE);
+
+               if (!priv->desc_64b_en) {
+                       dma_length_status = dmadesc_get_length_status(priv,
+                                                       priv->rx_bds +
+                                                       (priv->rx_read_ptr *
+                                                        DMA_DESC_SIZE));
+               } else {
+                       struct status_64 *status;
+                       status = (struct status_64 *)skb->data;
+                       dma_length_status = status->length_status;
+               }
+
+               /* DMA flags and length are still valid no matter how
+                * we got the Receive Status Vector (64B RSB or register)
+                */
+               dma_flag = dma_length_status & 0xffff;
+               len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
+
+               netif_dbg(priv, rx_status, dev,
+                       "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+                       __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
+                       dma_length_status);
+
+               rxpktprocessed++;
+
+               priv->rx_read_ptr++;
+               priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+               /* out of memory, just drop packets at the hardware level */
+               if (unlikely(!skb)) {
+                       dev->stats.rx_dropped++;
+                       dev->stats.rx_errors++;
+                       goto refill;
+               }
+
+               if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+                       netif_err(priv, rx_status, dev,
+                                       "Droping fragmented packet!\n");
+                       dev->stats.rx_dropped++;
+                       dev->stats.rx_errors++;
+                       dev_kfree_skb_any(cb->skb);
+                       cb->skb = NULL;
+                       goto refill;
+               }
+               /* report errors */
+               if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
+                                               DMA_RX_OV |
+                                               DMA_RX_NO |
+                                               DMA_RX_LG |
+                                               DMA_RX_RXER))) {
+                       netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
+                                               (unsigned int)dma_flag);
+                       if (dma_flag & DMA_RX_CRC_ERROR)
+                               dev->stats.rx_crc_errors++;
+                       if (dma_flag & DMA_RX_OV)
+                               dev->stats.rx_over_errors++;
+                       if (dma_flag & DMA_RX_NO)
+                               dev->stats.rx_frame_errors++;
+                       if (dma_flag & DMA_RX_LG)
+                               dev->stats.rx_length_errors++;
+                       dev->stats.rx_dropped++;
+                       dev->stats.rx_errors++;
+
+                       /* discard the packet and advance consumer index.*/
+                       dev_kfree_skb_any(cb->skb);
+                       cb->skb = NULL;
+                       goto refill;
+               } /* error packet */
+
+               chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
+                               priv->desc_rxchk_en;
+
+               skb_put(skb, len);
+               if (priv->desc_64b_en) {
+                       skb_pull(skb, 64);
+                       len -= 64;
+               }
+
+               if (likely(chksum_ok))
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+               /* remove hardware 2bytes added for IP alignment */
+               skb_pull(skb, 2);
+               len -= 2;
+
+               if (priv->crc_fwd_en) {
+                       skb_trim(skb, len - ETH_FCS_LEN);
+                       len -= ETH_FCS_LEN;
+               }
+
+               /*Finish setting up the received SKB and send it to the kernel*/
+               skb->protocol = eth_type_trans(skb, priv->dev);
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += len;
+               if (dma_flag & DMA_RX_MULT)
+                       dev->stats.multicast++;
+
+               /* Notify kernel */
+               napi_gro_receive(&priv->napi, skb);
+               cb->skb = NULL;
+               netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
+
+               /* refill RX path on the current control block */
+refill:
+               err = bcmgenet_rx_refill(priv, cb);
+               if (err)
+                       netif_err(priv, rx_err, dev, "Rx refill failed\n");
+       }
+
+       return rxpktprocessed;
+}
+
+/* Assign skb to RX DMA descriptor. */
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
+{
+       struct enet_cb *cb;
+       int ret = 0;
+       int i;
+
+       netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
+
+       /* loop here for each buffer needing assign */
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+               if (cb->skb)
+                       continue;
+
+               /* set the DMA descriptor length once and for all
+                * it will only change if we support dynamically sizing
+                * priv->rx_buf_len, but we do not
+                */
+               dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
+                               priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
+
+               ret = bcmgenet_rx_refill(priv, cb);
+               if (ret)
+                       break;
+
+       }
+
+       return ret;
+}
+
+static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
+{
+       struct enet_cb *cb;
+       int i;
+
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = &priv->rx_cbs[i];
+
+               if (dma_unmap_addr(cb, dma_addr)) {
+                       dma_unmap_single(&priv->dev->dev,
+                                       dma_unmap_addr(cb, dma_addr),
+                                       priv->rx_buf_len, DMA_FROM_DEVICE);
+                       dma_unmap_addr_set(cb, dma_addr, 0);
+               }
+
+               if (cb->skb)
+                       bcmgenet_free_cb(cb);
+       }
+}
+
+static int reset_umac(struct bcmgenet_priv *priv)
+{
+       struct device *kdev = &priv->pdev->dev;
+       unsigned int timeout = 0;
+       u32 reg;
+
+       /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
+       bcmgenet_rbuf_ctrl_set(priv, 0);
+       udelay(10);
+
+       /* disable MAC while updating its registers */
+       bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+
+       /* issue soft reset, wait for it to complete */
+       bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+       while (timeout++ < 1000) {
+               reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+               if (!(reg & CMD_SW_RESET))
+                       return 0;
+
+               udelay(1);
+       }
+
+       if (timeout == 1000) {
+               dev_err(kdev,
+                       "timeout waiting for MAC to come out of resetn\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int init_umac(struct bcmgenet_priv *priv)
+{
+       struct device *kdev = &priv->pdev->dev;
+       int ret;
+       u32 reg, cpu_mask_clear;
+
+       dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+
+       ret = reset_umac(priv);
+       if (ret)
+               return ret;
+
+       bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+       /* clear tx/rx counter */
+       bcmgenet_umac_writel(priv,
+               MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+       bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
+
+       bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+       /* init rx registers, enable ip header optimization */
+       reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+       reg |= RBUF_ALIGN_2B;
+       bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
+
+       if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
+               bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
+
+       /* Mask all interrupts.*/
+       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+       cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+
+       dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+
+       /* Monitor cable plug/unpluged event for internal PHY */
+       if (phy_is_internal(priv->phydev))
+               cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+       else if (priv->ext_phy)
+               cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+       else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+               reg = bcmgenet_bp_mc_get(priv);
+               reg |= BIT(priv->hw_params->bp_in_en_shift);
+
+               /* bp_mask: back pressure mask */
+               if (netif_is_multiqueue(priv->dev))
+                       reg |= priv->hw_params->bp_in_mask;
+               else
+                       reg &= ~priv->hw_params->bp_in_mask;
+               bcmgenet_bp_mc_set(priv, reg);
+       }
+
+       /* Enable MDIO interrupts on GENET v3+ */
+       if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
+               cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
+
+       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
+               INTRL2_CPU_MASK_CLEAR);
+
+       /* Enable rx/tx engine.*/
+       dev_dbg(kdev, "done init umac\n");
+
+       return 0;
+}
+
+/* Initialize all house-keeping variables for a TX ring, along
+ * with corresponding hardware registers
+ */
+static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+                                 unsigned int index, unsigned int size,
+                                 unsigned int write_ptr, unsigned int end_ptr)
+{
+       struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+       u32 words_per_bd = WORDS_PER_BD(priv);
+       u32 flow_period_val = 0;
+       unsigned int first_bd;
+
+       spin_lock_init(&ring->lock);
+       ring->index = index;
+       if (index == DESC_INDEX) {
+               ring->queue = 0;
+               ring->int_enable = bcmgenet_tx_ring16_int_enable;
+               ring->int_disable = bcmgenet_tx_ring16_int_disable;
+       } else {
+               ring->queue = index + 1;
+               ring->int_enable = bcmgenet_tx_ring_int_enable;
+               ring->int_disable = bcmgenet_tx_ring_int_disable;
+       }
+       ring->cbs = priv->tx_cbs + write_ptr;
+       ring->size = size;
+       ring->c_index = 0;
+       ring->free_bds = size;
+       ring->write_ptr = write_ptr;
+       ring->cb_ptr = write_ptr;
+       ring->end_ptr = end_ptr - 1;
+       ring->prod_index = 0;
+
+       /* Set flow period for ring != 16 */
+       if (index != DESC_INDEX)
+               flow_period_val = ENET_MAX_MTU_SIZE << 16;
+
+       bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
+       bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
+       bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
+       /* Disable rate control for now */
+       bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
+                       TDMA_FLOW_PERIOD);
+       /* Unclassified traffic goes to ring 16 */
+       bcmgenet_tdma_ring_writel(priv, index,
+                       ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+                       DMA_RING_BUF_SIZE);
+
+       first_bd = write_ptr;
+
+       /* Set start and end address, read and write pointers */
+       bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+                       DMA_START_ADDR);
+       bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+                       TDMA_READ_PTR);
+       bcmgenet_tdma_ring_writel(priv, index, first_bd,
+                       TDMA_WRITE_PTR);
+       bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+                       DMA_END_ADDR);
+}
+
+/* Initialize a RDMA ring */
+static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
+                                 unsigned int index, unsigned int size)
+{
+       u32 words_per_bd = WORDS_PER_BD(priv);
+       int ret;
+
+       priv->num_rx_bds = TOTAL_DESC;
+       priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+       priv->rx_bd_assign_ptr = priv->rx_bds;
+       priv->rx_bd_assign_index = 0;
+       priv->rx_c_index = 0;
+       priv->rx_read_ptr = 0;
+       priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
+                               GFP_KERNEL);
+       if (!priv->rx_cbs)
+               return -ENOMEM;
+
+       ret = bcmgenet_alloc_rx_buffers(priv);
+       if (ret) {
+               kfree(priv->rx_cbs);
+               return ret;
+       }
+
+       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
+       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
+       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+       bcmgenet_rdma_ring_writel(priv, index,
+               ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+               DMA_RING_BUF_SIZE);
+       bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
+       bcmgenet_rdma_ring_writel(priv, index,
+               words_per_bd * size - 1, DMA_END_ADDR);
+       bcmgenet_rdma_ring_writel(priv, index,
+                       (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
+                       DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+       bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+
+       return ret;
+}
+
+/* init multi xmit queues, only available for GENET2+
+ * the queue is partitioned as follows:
+ *
+ * queue 0 - 3 is priority based, each one has 32 descriptors,
+ * with queue 0 being the highest priority queue.
+ *
+ * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
+ * descriptors: 256 - (number of tx queues * bds per queues) = 128
+ * descriptors.
+ *
+ * The transmit control block pool is then partitioned as following:
+ * - tx_cbs[0...127] are for queue 16
+ * - tx_ring_cbs[0] points to tx_cbs[128..159]
+ * - tx_ring_cbs[1] points to tx_cbs[160..191]
+ * - tx_ring_cbs[2] points to tx_cbs[192..223]
+ * - tx_ring_cbs[3] points to tx_cbs[224..255]
+ */
+static void bcmgenet_init_multiq(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       unsigned int i, dma_enable;
+       u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
+
+       if (!netif_is_multiqueue(dev)) {
+               netdev_warn(dev, "called with non multi queue aware HW\n");
+               return;
+       }
+
+       dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       dma_enable = dma_ctrl & DMA_EN;
+       dma_ctrl &= ~DMA_EN;
+       bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+       /* Enable strict priority arbiter mode */
+       bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
+
+       for (i = 0; i < priv->hw_params->tx_queues; i++) {
+               /* first 64 tx_cbs are reserved for default tx queue
+                * (ring 16)
+                */
+               bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
+                                       i * priv->hw_params->bds_cnt,
+                                       (i + 1) * priv->hw_params->bds_cnt);
+
+               /* Configure ring as decriptor ring and setup priority */
+               ring_cfg |= 1 << i;
+               dma_priority |= ((GENET_Q0_PRIORITY + i) <<
+                               (GENET_MAX_MQ_CNT + 1) * i);
+               dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
+       }
+
+       /* Enable rings */
+       reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
+       reg |= ring_cfg;
+       bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
+
+       /* Use configured rings priority and set ring #16 priority */
+       reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
+       reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
+       reg |= dma_priority;
+       bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
+
+       /* Configure ring as descriptor ring and re-enable DMA if enabled */
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg |= dma_ctrl;
+       if (dma_enable)
+               reg |= DMA_EN;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+       int i;
+
+       /* disable DMA */
+       bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
+       bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+
+       for (i = 0; i < priv->num_tx_bds; i++) {
+               if (priv->tx_cbs[i].skb != NULL) {
+                       dev_kfree_skb(priv->tx_cbs[i].skb);
+                       priv->tx_cbs[i].skb = NULL;
+               }
+       }
+
+       bcmgenet_free_rx_buffers(priv);
+       kfree(priv->rx_cbs);
+       kfree(priv->tx_cbs);
+}
+
+/* init_edma: Initialize DMA control register */
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+{
+       int ret;
+
+       netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
+
+       /* by default, enable ring 16 (descriptor based) */
+       ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
+       if (ret) {
+               netdev_err(priv->dev, "failed to initialize RX ring\n");
+               return ret;
+       }
+
+       /* init rDma */
+       bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+       /* Init tDma */
+       bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+       /* Initialize commont TX ring structures */
+       priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
+       priv->num_tx_bds = TOTAL_DESC;
+       priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
+                               GFP_KERNEL);
+       if (!priv->tx_cbs) {
+               bcmgenet_fini_dma(priv);
+               return -ENOMEM;
+       }
+
+       /* initialize multi xmit queue */
+       bcmgenet_init_multiq(priv->dev);
+
+       /* initialize special ring 16 */
+       bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
+                       priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
+                       TOTAL_DESC);
+
+       return 0;
+}
+
+/* NAPI polling method*/
+static int bcmgenet_poll(struct napi_struct *napi, int budget)
+{
+       struct bcmgenet_priv *priv = container_of(napi,
+                       struct bcmgenet_priv, napi);
+       unsigned int work_done;
+
+       /* tx reclaim */
+       bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+
+       work_done = bcmgenet_desc_rx(priv, budget);
+
+       /* Advancing our consumer index*/
+       priv->rx_c_index += work_done;
+       priv->rx_c_index &= DMA_C_INDEX_MASK;
+       bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+                               priv->rx_c_index, RDMA_CONS_INDEX);
+       if (work_done < budget) {
+               napi_complete(napi);
+               bcmgenet_intrl2_0_writel(priv,
+                       UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+       }
+
+       return work_done;
+}
+
+/* Interrupt bottom half */
+static void bcmgenet_irq_task(struct work_struct *work)
+{
+       struct bcmgenet_priv *priv = container_of(
+                       work, struct bcmgenet_priv, bcmgenet_irq_work);
+
+       netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
+
+       /* Link UP/DOWN event */
+       if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+               (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+               phy_mac_interrupt(priv->phydev,
+                       priv->irq0_stat & UMAC_IRQ_LINK_UP);
+               priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
+       }
+}
+
+/* bcmgenet_isr1: interrupt handler for ring buffer. */
+static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+{
+       struct bcmgenet_priv *priv = dev_id;
+       unsigned int index;
+
+       /* Save irq status for bottom-half processing. */
+       priv->irq1_stat =
+               bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+               ~priv->int1_mask;
+       /* clear inerrupts*/
+       bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+       netif_dbg(priv, intr, priv->dev,
+               "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+       /* Check the MBDONE interrupts.
+        * packet is done, reclaim descriptors
+        */
+       if (priv->irq1_stat & 0x0000ffff) {
+               index = 0;
+               for (index = 0; index < 16; index++) {
+                       if (priv->irq1_stat & (1 << index))
+                               bcmgenet_tx_reclaim(priv->dev,
+                                               &priv->tx_rings[index]);
+               }
+       }
+       return IRQ_HANDLED;
+}
+
+/* bcmgenet_isr0: Handle various interrupts. */
+static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+{
+       struct bcmgenet_priv *priv = dev_id;
+
+       /* Save irq status for bottom-half processing. */
+       priv->irq0_stat =
+               bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+               ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+       /* clear inerrupts*/
+       bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+       netif_dbg(priv, intr, priv->dev,
+               "IRQ=0x%x\n", priv->irq0_stat);
+
+       if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
+               /* We use NAPI(software interrupt throttling, if
+                * Rx Descriptor throttling is not used.
+                * Disable interrupt, will be enabled in the poll method.
+                */
+               if (likely(napi_schedule_prep(&priv->napi))) {
+                       bcmgenet_intrl2_0_writel(priv,
+                               UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+                       __napi_schedule(&priv->napi);
+               }
+       }
+       if (priv->irq0_stat &
+                       (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+               /* Tx reclaim */
+               bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+       }
+       if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+                               UMAC_IRQ_PHY_DET_F |
+                               UMAC_IRQ_LINK_UP |
+                               UMAC_IRQ_LINK_DOWN |
+                               UMAC_IRQ_HFB_SM |
+                               UMAC_IRQ_HFB_MM |
+                               UMAC_IRQ_MPD_R)) {
+               /* all other interested interrupts handled in bottom half */
+               schedule_work(&priv->bcmgenet_irq_work);
+       }
+
+       if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+               priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+               priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+               wake_up(&priv->wq);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
+{
+       u32 reg;
+
+       reg = bcmgenet_rbuf_ctrl_get(priv);
+       reg |= BIT(1);
+       bcmgenet_rbuf_ctrl_set(priv, reg);
+       udelay(10);
+
+       reg &= ~BIT(1);
+       bcmgenet_rbuf_ctrl_set(priv, reg);
+       udelay(10);
+}
+
+static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
+                                 unsigned char *addr)
+{
+       bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+                       (addr[2] << 8) | addr[3], UMAC_MAC0);
+       bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
+{
+       int ret;
+
+       /* From WOL-enabled suspend, switch to regular clock */
+       clk_disable(priv->clk_wol);
+       /* init umac registers to synchronize s/w with h/w */
+       ret = init_umac(priv);
+       if (ret)
+               return ret;
+
+       phy_init_hw(priv->phydev);
+       /* Speed settings must be restored */
+       bcmgenet_mii_config(priv->dev);
+
+       return 0;
+}
+
+/* Returns a reusable dma control register value */
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+{
+       u32 reg;
+       u32 dma_ctrl;
+
+       /* disable DMA */
+       dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg &= ~dma_ctrl;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       reg &= ~dma_ctrl;
+       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+       bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+       udelay(10);
+       bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+       return dma_ctrl;
+}
+
+static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
+{
+       u32 reg;
+
+       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       reg |= dma_ctrl;
+       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg |= dma_ctrl;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static int bcmgenet_open(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       unsigned long dma_ctrl;
+       u32 reg;
+       int ret;
+
+       netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
+
+       /* Turn on the clock */
+       if (!IS_ERR(priv->clk))
+               clk_prepare_enable(priv->clk);
+
+       /* take MAC out of reset */
+       bcmgenet_umac_reset(priv);
+
+       ret = init_umac(priv);
+       if (ret)
+               goto err_clk_disable;
+
+       /* disable ethernet MAC while updating its registers */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~(CMD_TX_EN | CMD_RX_EN);
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+       if (priv->wol_enabled) {
+               ret = bcmgenet_wol_resume(priv);
+               if (ret)
+                       return ret;
+       }
+
+       if (phy_is_internal(priv->phydev)) {
+               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+               reg |= EXT_ENERGY_DET_MASK;
+               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+       }
+
+       /* Disable RX/TX DMA and flush TX queues */
+       dma_ctrl = bcmgenet_dma_disable(priv);
+
+       /* Reinitialize TDMA and RDMA and SW housekeeping */
+       ret = bcmgenet_init_dma(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize DMA\n");
+               goto err_fini_dma;
+       }
+
+       /* Always enable ring 16 - descriptor ring */
+       bcmgenet_enable_dma(priv, dma_ctrl);
+
+       ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
+                       dev->name, priv);
+       if (ret < 0) {
+               netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
+               goto err_fini_dma;
+       }
+
+       ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
+                               dev->name, priv);
+       if (ret < 0) {
+               netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
+               goto err_irq0;
+       }
+
+       /* Start the network engine */
+       napi_enable(&priv->napi);
+
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg |= (CMD_TX_EN | CMD_RX_EN);
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       /* Make sure we reflect the value of CRC_CMD_FWD */
+       priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+       device_set_wakeup_capable(&dev->dev, 1);
+
+       if (phy_is_internal(priv->phydev))
+               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+       netif_tx_start_all_queues(dev);
+
+       phy_start(priv->phydev);
+
+       return 0;
+
+err_irq0:
+       free_irq(priv->irq0, dev);
+err_fini_dma:
+       bcmgenet_fini_dma(priv);
+err_clk_disable:
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+       return ret;
+}
+
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+       int ret = 0;
+       int timeout = 0;
+       u32 reg;
+
+       /* Disable TDMA to stop add more frames in TX DMA */
+       reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+       /* Check TDMA status register to confirm TDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev,
+                       "Timed out while disabling TX DMA\n");
+               ret = -ETIMEDOUT;
+       }
+
+       /* Wait 10ms for packet drain in both tx and rx dma */
+       usleep_range(10000, 20000);
+
+       /* Disable RDMA */
+       reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+       reg &= ~DMA_EN;
+       bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+       timeout = 0;
+       /* Check RDMA status register to confirm RDMA is disabled */
+       while (timeout++ < DMA_TIMEOUT_VAL) {
+               reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+               if (reg & DMA_DISABLED)
+                       break;
+
+               udelay(1);
+       }
+
+       if (timeout == DMA_TIMEOUT_VAL) {
+               netdev_warn(priv->dev,
+                       "Timed out while disabling RX DMA\n");
+                       ret = -ETIMEDOUT;
+       }
+
+       return ret;
+}
+
+static int bcmgenet_close(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int ret;
+       u32 reg;
+
+       netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+
+       phy_stop(priv->phydev);
+
+       /* Disable MAC receive */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       netif_tx_stop_all_queues(dev);
+
+       ret = bcmgenet_dma_teardown(priv);
+       if (ret)
+               return ret;
+
+       /* Disable MAC transmit. TX DMA disabled have to done before this */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_TX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       napi_disable(&priv->napi);
+
+       /* tx reclaim */
+       bcmgenet_tx_reclaim_all(dev);
+       bcmgenet_fini_dma(priv);
+
+       free_irq(priv->irq0, priv);
+       free_irq(priv->irq1, priv);
+
+       /* Wait for pending work items to complete - we are stopping
+        * the clock now. Since interrupts are disabled, no new work
+        * will be scheduled.
+        */
+       cancel_work_sync(&priv->bcmgenet_irq_work);
+
+       if (phy_is_internal(priv->phydev))
+               bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+
+       if (priv->wol_enabled)
+               clk_enable(priv->clk_wol);
+
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+
+       return 0;
+}
+
+static void bcmgenet_timeout(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+
+       dev->trans_start = jiffies;
+
+       dev->stats.tx_errors++;
+
+       netif_tx_wake_all_queues(dev);
+}
+
+#define MAX_MC_COUNT   16
+
+static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+                                        unsigned char *addr,
+                                        int *i,
+                                        int *mc)
+{
+       u32 reg;
+
+       bcmgenet_umac_writel(priv,
+                       addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
+       bcmgenet_umac_writel(priv,
+                       addr[2] << 24 | addr[3] << 16 |
+                       addr[4] << 8 | addr[5],
+                       UMAC_MDF_ADDR + ((*i + 1) * 4));
+       reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+       reg |= (1 << (MAX_MC_COUNT - *mc));
+       bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+       *i += 2;
+       (*mc)++;
+}
+
+static void bcmgenet_set_rx_mode(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct netdev_hw_addr *ha;
+       int i, mc;
+       u32 reg;
+
+       netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+
+       /* Promiscous mode */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (dev->flags & IFF_PROMISC) {
+               reg |= CMD_PROMISC;
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+               bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+               return;
+       } else {
+               reg &= ~CMD_PROMISC;
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       }
+
+       /* UniMac doesn't support ALLMULTI */
+       if (dev->flags & IFF_ALLMULTI) {
+               netdev_warn(dev, "ALLMULTI is not supported\n");
+               return;
+       }
+
+       /* update MDF filter */
+       i = 0;
+       mc = 0;
+       /* Broadcast */
+       bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+       /* my own address.*/
+       bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+       /* Unicast list*/
+       if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+               return;
+
+       if (!netdev_uc_empty(dev))
+               netdev_for_each_uc_addr(ha, dev)
+                       bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+       /* Multicast */
+       if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+               return;
+
+       netdev_for_each_mc_addr(ha, dev)
+               bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+}
+
+/* Set the hardware MAC address. */
+static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
+{
+       struct sockaddr *addr = p;
+
+       /* Setting the MAC address at the hardware level is not possible
+        * without disabling the UniMAC RX/TX enable bits.
+        */
+       if (netif_running(dev))
+               return -EBUSY;
+
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+       return 0;
+}
+
+static const struct net_device_ops bcmgenet_netdev_ops = {
+       .ndo_open               = bcmgenet_open,
+       .ndo_stop               = bcmgenet_close,
+       .ndo_start_xmit         = bcmgenet_xmit,
+       .ndo_tx_timeout         = bcmgenet_timeout,
+       .ndo_set_rx_mode        = bcmgenet_set_rx_mode,
+       .ndo_set_mac_address    = bcmgenet_set_mac_addr,
+       .ndo_do_ioctl           = bcmgenet_ioctl,
+       .ndo_set_features       = bcmgenet_set_features,
+};
+
+/* Array of GENET hardware parameters/characteristics */
+static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
+       [GENET_V1] = {
+               .tx_queues = 0,
+               .rx_queues = 0,
+               .bds_cnt = 0,
+               .bp_in_en_shift = 16,
+               .bp_in_mask = 0xffff,
+               .hfb_filter_cnt = 16,
+               .qtag_mask = 0x1F,
+               .hfb_offset = 0x1000,
+               .rdma_offset = 0x2000,
+               .tdma_offset = 0x3000,
+               .words_per_bd = 2,
+       },
+       [GENET_V2] = {
+               .tx_queues = 4,
+               .rx_queues = 4,
+               .bds_cnt = 32,
+               .bp_in_en_shift = 16,
+               .bp_in_mask = 0xffff,
+               .hfb_filter_cnt = 16,
+               .qtag_mask = 0x1F,
+               .tbuf_offset = 0x0600,
+               .hfb_offset = 0x1000,
+               .hfb_reg_offset = 0x2000,
+               .rdma_offset = 0x3000,
+               .tdma_offset = 0x4000,
+               .words_per_bd = 2,
+               .flags = GENET_HAS_EXT,
+       },
+       [GENET_V3] = {
+               .tx_queues = 4,
+               .rx_queues = 4,
+               .bds_cnt = 32,
+               .bp_in_en_shift = 17,
+               .bp_in_mask = 0x1ffff,
+               .hfb_filter_cnt = 48,
+               .qtag_mask = 0x3F,
+               .tbuf_offset = 0x0600,
+               .hfb_offset = 0x8000,
+               .hfb_reg_offset = 0xfc00,
+               .rdma_offset = 0x10000,
+               .tdma_offset = 0x11000,
+               .words_per_bd = 2,
+               .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+       },
+       [GENET_V4] = {
+               .tx_queues = 4,
+               .rx_queues = 4,
+               .bds_cnt = 32,
+               .bp_in_en_shift = 17,
+               .bp_in_mask = 0x1ffff,
+               .hfb_filter_cnt = 48,
+               .qtag_mask = 0x3F,
+               .tbuf_offset = 0x0600,
+               .hfb_offset = 0x8000,
+               .hfb_reg_offset = 0xfc00,
+               .rdma_offset = 0x2000,
+               .tdma_offset = 0x4000,
+               .words_per_bd = 3,
+               .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+       },
+};
+
+/* Infer hardware parameters from the detected GENET version */
+static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+{
+       struct bcmgenet_hw_params *params;
+       u32 reg;
+       u8 major;
+
+       if (GENET_IS_V4(priv)) {
+               bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+               genet_dma_ring_regs = genet_dma_ring_regs_v4;
+               priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+               priv->version = GENET_V4;
+       } else if (GENET_IS_V3(priv)) {
+               bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+               genet_dma_ring_regs = genet_dma_ring_regs_v123;
+               priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+               priv->version = GENET_V3;
+       } else if (GENET_IS_V2(priv)) {
+               bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
+               genet_dma_ring_regs = genet_dma_ring_regs_v123;
+               priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+               priv->version = GENET_V2;
+       } else if (GENET_IS_V1(priv)) {
+               bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
+               genet_dma_ring_regs = genet_dma_ring_regs_v123;
+               priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+               priv->version = GENET_V1;
+       }
+
+       /* enum genet_version starts at 1 */
+       priv->hw_params = &bcmgenet_hw_params[priv->version];
+       params = priv->hw_params;
+
+       /* Read GENET HW version */
+       reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
+       major = (reg >> 24 & 0x0f);
+       if (major == 5)
+               major = 4;
+       else if (major == 0)
+               major = 1;
+       if (major != priv->version) {
+               dev_err(&priv->pdev->dev,
+                       "GENET version mismatch, got: %d, configured for: %d\n",
+                       major, priv->version);
+       }
+
+       /* Print the GENET core version */
+       dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
+               major, (reg >> 16) & 0x0f, reg & 0xffff);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       if (!(params->flags & GENET_HAS_40BITS))
+               pr_warn("GENET does not support 40-bits PA\n");
+#endif
+
+       pr_debug("Configuration for version: %d\n"
+               "TXq: %1d, RXq: %1d, BDs: %1d\n"
+               "BP << en: %2d, BP msk: 0x%05x\n"
+               "HFB count: %2d, QTAQ msk: 0x%05x\n"
+               "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
+               "RDMA: 0x%05x, TDMA: 0x%05x\n"
+               "Words/BD: %d\n",
+               priv->version,
+               params->tx_queues, params->rx_queues, params->bds_cnt,
+               params->bp_in_en_shift, params->bp_in_mask,
+               params->hfb_filter_cnt, params->qtag_mask,
+               params->tbuf_offset, params->hfb_offset,
+               params->hfb_reg_offset,
+               params->rdma_offset, params->tdma_offset,
+               params->words_per_bd);
+}
+
+static const struct of_device_id bcmgenet_match[] = {
+       { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
+       { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
+       { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
+       { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+       { },
+};
+
+static int bcmgenet_probe(struct platform_device *pdev)
+{
+       struct device_node *dn = pdev->dev.of_node;
+       const struct of_device_id *of_id;
+       struct bcmgenet_priv *priv;
+       struct net_device *dev;
+       const void *macaddr;
+       struct resource *r;
+       int err = -EIO;
+
+       /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
+       dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+       if (!dev) {
+               dev_err(&pdev->dev, "can't allocate net device\n");
+               return -ENOMEM;
+       }
+
+       of_id = of_match_node(bcmgenet_match, dn);
+       if (!of_id)
+               return -EINVAL;
+
+       priv = netdev_priv(dev);
+       priv->irq0 = platform_get_irq(pdev, 0);
+       priv->irq1 = platform_get_irq(pdev, 1);
+       if (!priv->irq0 || !priv->irq1) {
+               dev_err(&pdev->dev, "can't find IRQs\n");
+               err = -EINVAL;
+               goto err;
+       }
+
+       macaddr = of_get_mac_address(dn);
+       if (!macaddr) {
+               dev_err(&pdev->dev, "can't find MAC address\n");
+               err = -EINVAL;
+               goto err;
+       }
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(priv->base)) {
+               err = PTR_ERR(priv->base);
+               goto err;
+       }
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       dev_set_drvdata(&pdev->dev, dev);
+       ether_addr_copy(dev->dev_addr, macaddr);
+       dev->watchdog_timeo = 2 * HZ;
+       SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+       dev->netdev_ops = &bcmgenet_netdev_ops;
+       netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
+
+       priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
+
+       /* Set hardware features */
+       dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+               NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+
+       /* Set the needed headroom to account for any possible
+        * features enabling/disabling at runtime
+        */
+       dev->needed_headroom += 64;
+
+       netdev_boot_setup_check(dev);
+
+       priv->dev = dev;
+       priv->pdev = pdev;
+       priv->version = (enum bcmgenet_version)of_id->data;
+
+       bcmgenet_set_hw_params(priv);
+
+       /* Mii wait queue */
+       init_waitqueue_head(&priv->wq);
+       /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
+       priv->rx_buf_len = RX_BUF_LENGTH;
+       INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
+
+       priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+       if (IS_ERR(priv->clk))
+               dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+
+       priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+       if (IS_ERR(priv->clk_wol))
+               dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+
+       if (!IS_ERR(priv->clk))
+               clk_prepare_enable(priv->clk);
+
+       err = reset_umac(priv);
+       if (err)
+               goto err_clk_disable;
+
+       err = bcmgenet_mii_init(dev);
+       if (err)
+               goto err_clk_disable;
+
+       /* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
+        * just the ring 16 descriptor based TX
+        */
+       netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
+       netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
+
+       err = register_netdev(dev);
+       if (err)
+               goto err_clk_disable;
+
+       /* Turn off the main clock, WOL clock is handled separately */
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+
+       return err;
+
+err_clk_disable:
+       if (!IS_ERR(priv->clk))
+               clk_disable_unprepare(priv->clk);
+err:
+       free_netdev(dev);
+       return err;
+}
+
+static int bcmgenet_remove(struct platform_device *pdev)
+{
+       struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
+
+       dev_set_drvdata(&pdev->dev, NULL);
+       unregister_netdev(priv->dev);
+       bcmgenet_mii_exit(priv->dev);
+       free_netdev(priv->dev);
+
+       return 0;
+}
+
+
+static struct platform_driver bcmgenet_driver = {
+       .probe  = bcmgenet_probe,
+       .remove = bcmgenet_remove,
+       .driver = {
+               .name   = "bcmgenet",
+               .owner  = THIS_MODULE,
+               .of_match_table = bcmgenet_match,
+       },
+};
+module_platform_driver(bcmgenet_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
+MODULE_ALIAS("platform:bcmgenet");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644 (file)
index 0000000..0f11710
--- /dev/null
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *
+*/
+#ifndef __BCMGENET_H__
+#define __BCMGENET_H__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+/* total number of Buffer Descriptors, same for Rx/Tx */
+#define TOTAL_DESC                             256
+
+/* which ring is descriptor based */
+#define DESC_INDEX                             16
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN      6
+#define ENET_PAD               8
+#define ENET_MAX_MTU_SIZE      (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+                                ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+#define DMA_MAX_BURST_LENGTH    0x10
+
+/* misc. configuration */
+#define CLEAR_ALL_HFB                  0xFF
+#define DMA_FC_THRESH_HI               (TOTAL_DESC >> 4)
+#define DMA_FC_THRESH_LO               5
+
+/* 64B receive/transmit status block */
+struct status_64 {
+       u32     length_status;          /* length and peripheral status */
+       u32     ext_status;             /* Extended status*/
+       u32     rx_csum;                /* partial rx checksum */
+       u32     unused1[9];             /* unused */
+       u32     tx_csum_info;           /* Tx checksum info. */
+       u32     unused2[3];             /* unused */
+};
+
+/* Rx status bits */
+#define STATUS_RX_EXT_MASK             0x1FFFFF
+#define STATUS_RX_CSUM_MASK            0xFFFF
+#define STATUS_RX_CSUM_OK              0x10000
+#define STATUS_RX_CSUM_FR              0x20000
+#define STATUS_RX_PROTO_TCP            0
+#define STATUS_RX_PROTO_UDP            1
+#define STATUS_RX_PROTO_ICMP           2
+#define STATUS_RX_PROTO_OTHER          3
+#define STATUS_RX_PROTO_MASK           3
+#define STATUS_RX_PROTO_SHIFT          18
+#define STATUS_FILTER_INDEX_MASK       0xFFFF
+/* Tx status bits */
+#define STATUS_TX_CSUM_START_MASK      0X7FFF
+#define STATUS_TX_CSUM_START_SHIFT     16
+#define STATUS_TX_CSUM_PROTO_UDP       0x8000
+#define STATUS_TX_CSUM_OFFSET_MASK     0x7FFF
+#define STATUS_TX_CSUM_LV              0x80000000
+
+/* DMA Descriptor */
+#define DMA_DESC_LENGTH_STATUS 0x00    /* in bytes of data in buffer */
+#define DMA_DESC_ADDRESS_LO    0x04    /* lower bits of PA */
+#define DMA_DESC_ADDRESS_HI    0x08    /* upper 32 bits of PA, GENETv4+ */
+
+/* Rx/Tx common counter group */
+struct bcmgenet_pkt_counters {
+       u32     cnt_64;         /* RO Received/Transmited 64 bytes packet */
+       u32     cnt_127;        /* RO Rx/Tx 127 bytes packet */
+       u32     cnt_255;        /* RO Rx/Tx 65-255 bytes packet */
+       u32     cnt_511;        /* RO Rx/Tx 256-511 bytes packet */
+       u32     cnt_1023;       /* RO Rx/Tx 512-1023 bytes packet */
+       u32     cnt_1518;       /* RO Rx/Tx 1024-1518 bytes packet */
+       u32     cnt_mgv;        /* RO Rx/Tx 1519-1522 good VLAN packet */
+       u32     cnt_2047;       /* RO Rx/Tx 1522-2047 bytes packet*/
+       u32     cnt_4095;       /* RO Rx/Tx 2048-4095 bytes packet*/
+       u32     cnt_9216;       /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcmgenet_rx_counters {
+       struct  bcmgenet_pkt_counters pkt_cnt;
+       u32     pkt;            /* RO (0x428) Received pkt count*/
+       u32     bytes;          /* RO Received byte count */
+       u32     mca;            /* RO # of Received multicast pkt */
+       u32     bca;            /* RO # of Receive broadcast pkt */
+       u32     fcs;            /* RO # of Received FCS error  */
+       u32     cf;             /* RO # of Received control frame pkt*/
+       u32     pf;             /* RO # of Received pause frame pkt */
+       u32     uo;             /* RO # of unknown op code pkt */
+       u32     aln;            /* RO # of alignment error count */
+       u32     flr;            /* RO # of frame length out of range count */
+       u32     cde;            /* RO # of code error pkt */
+       u32     fcr;            /* RO # of carrier sense error pkt */
+       u32     ovr;            /* RO # of oversize pkt*/
+       u32     jbr;            /* RO # of jabber count */
+       u32     mtue;           /* RO # of MTU error pkt*/
+       u32     pok;            /* RO # of Received good pkt */
+       u32     uc;             /* RO # of unicast pkt */
+       u32     ppp;            /* RO # of PPP pkt */
+       u32     rcrc;           /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcmgenet_tx_counters {
+       struct bcmgenet_pkt_counters pkt_cnt;
+       u32     pkts;           /* RO (0x4a8) Transmited pkt */
+       u32     mca;            /* RO # of xmited multicast pkt */
+       u32     bca;            /* RO # of xmited broadcast pkt */
+       u32     pf;             /* RO # of xmited pause frame count */
+       u32     cf;             /* RO # of xmited control frame count */
+       u32     fcs;            /* RO # of xmited FCS error count */
+       u32     ovr;            /* RO # of xmited oversize pkt */
+       u32     drf;            /* RO # of xmited deferral pkt */
+       u32     edf;            /* RO # of xmited Excessive deferral pkt*/
+       u32     scl;            /* RO # of xmited single collision pkt */
+       u32     mcl;            /* RO # of xmited multiple collision pkt*/
+       u32     lcl;            /* RO # of xmited late collision pkt */
+       u32     ecl;            /* RO # of xmited excessive collision pkt*/
+       u32     frg;            /* RO # of xmited fragments pkt*/
+       u32     ncl;            /* RO # of xmited total collision count */
+       u32     jbr;            /* RO # of xmited jabber count*/
+       u32     bytes;          /* RO # of xmited byte count */
+       u32     pok;            /* RO # of xmited good pkt */
+       u32     uc;             /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcmgenet_mib_counters {
+       struct bcmgenet_rx_counters rx;
+       struct bcmgenet_tx_counters tx;
+       u32     rx_runt_cnt;
+       u32     rx_runt_fcs;
+       u32     rx_runt_fcs_align;
+       u32     rx_runt_bytes;
+       u32     rbuf_ovflow_cnt;
+       u32     rbuf_err_cnt;
+       u32     mdf_err_cnt;
+};
+
+#define UMAC_HD_BKP_CTRL               0x004
+#define         HD_FC_EN                       (1 << 0)
+#define  HD_FC_BKOFF_OK                        (1 << 1)
+#define  IPG_CONFIG_RX_SHIFT           2
+#define  IPG_CONFIG_RX_MASK            0x1F
+
+#define UMAC_CMD                       0x008
+#define  CMD_TX_EN                     (1 << 0)
+#define  CMD_RX_EN                     (1 << 1)
+#define  UMAC_SPEED_10                 0
+#define  UMAC_SPEED_100                        1
+#define  UMAC_SPEED_1000               2
+#define  UMAC_SPEED_2500               3
+#define  CMD_SPEED_SHIFT               2
+#define  CMD_SPEED_MASK                        3
+#define  CMD_PROMISC                   (1 << 4)
+#define  CMD_PAD_EN                    (1 << 5)
+#define  CMD_CRC_FWD                   (1 << 6)
+#define  CMD_PAUSE_FWD                 (1 << 7)
+#define  CMD_RX_PAUSE_IGNORE           (1 << 8)
+#define  CMD_TX_ADDR_INS               (1 << 9)
+#define  CMD_HD_EN                     (1 << 10)
+#define  CMD_SW_RESET                  (1 << 13)
+#define  CMD_LCL_LOOP_EN               (1 << 15)
+#define  CMD_AUTO_CONFIG               (1 << 22)
+#define  CMD_CNTL_FRM_EN               (1 << 23)
+#define  CMD_NO_LEN_CHK                        (1 << 24)
+#define  CMD_RMT_LOOP_EN               (1 << 25)
+#define  CMD_PRBL_EN                   (1 << 27)
+#define  CMD_TX_PAUSE_IGNORE           (1 << 28)
+#define  CMD_TX_RX_EN                  (1 << 29)
+#define  CMD_RUNT_FILTER_DIS           (1 << 30)
+
+#define UMAC_MAC0                      0x00C
+#define UMAC_MAC1                      0x010
+#define UMAC_MAX_FRAME_LEN             0x014
+
+#define UMAC_TX_FLUSH                  0x334
+
+#define UMAC_MIB_START                 0x400
+
+#define UMAC_MDIO_CMD                  0x614
+#define  MDIO_START_BUSY               (1 << 29)
+#define  MDIO_READ_FAIL                        (1 << 28)
+#define  MDIO_RD                       (2 << 26)
+#define  MDIO_WR                       (1 << 26)
+#define  MDIO_PMD_SHIFT                        21
+#define  MDIO_PMD_MASK                 0x1F
+#define  MDIO_REG_SHIFT                        16
+#define  MDIO_REG_MASK                 0x1F
+
+#define UMAC_RBUF_OVFL_CNT             0x61C
+
+#define UMAC_MPD_CTRL                  0x620
+#define  MPD_EN                                (1 << 0)
+#define  MPD_PW_EN                     (1 << 27)
+#define  MPD_MSEQ_LEN_SHIFT            16
+#define  MPD_MSEQ_LEN_MASK             0xFF
+
+#define UMAC_MPD_PW_MS                 0x624
+#define UMAC_MPD_PW_LS                 0x628
+#define UMAC_RBUF_ERR_CNT              0x634
+#define UMAC_MDF_ERR_CNT               0x638
+#define UMAC_MDF_CTRL                  0x650
+#define UMAC_MDF_ADDR                  0x654
+#define UMAC_MIB_CTRL                  0x580
+#define  MIB_RESET_RX                  (1 << 0)
+#define  MIB_RESET_RUNT                        (1 << 1)
+#define  MIB_RESET_TX                  (1 << 2)
+
+#define RBUF_CTRL                      0x00
+#define  RBUF_64B_EN                   (1 << 0)
+#define  RBUF_ALIGN_2B                 (1 << 1)
+#define  RBUF_BAD_DIS                  (1 << 2)
+
+#define RBUF_STATUS                    0x0C
+#define  RBUF_STATUS_WOL               (1 << 0)
+#define  RBUF_STATUS_MPD_INTR_ACTIVE   (1 << 1)
+#define  RBUF_STATUS_ACPI_INTR_ACTIVE  (1 << 2)
+
+#define RBUF_CHK_CTRL                  0x14
+#define  RBUF_RXCHK_EN                 (1 << 0)
+#define  RBUF_SKIP_FCS                 (1 << 4)
+
+#define RBUF_TBUF_SIZE_CTRL            0xb4
+
+#define RBUF_HFB_CTRL_V1               0x38
+#define  RBUF_HFB_FILTER_EN_SHIFT      16
+#define  RBUF_HFB_FILTER_EN_MASK       0xffff0000
+#define  RBUF_HFB_EN                   (1 << 0)
+#define  RBUF_HFB_256B                 (1 << 1)
+#define  RBUF_ACPI_EN                  (1 << 2)
+
+#define RBUF_HFB_LEN_V1                        0x3C
+#define  RBUF_FLTR_LEN_MASK            0xFF
+#define  RBUF_FLTR_LEN_SHIFT           8
+
+#define TBUF_CTRL                      0x00
+#define TBUF_BP_MC                     0x0C
+
+#define TBUF_CTRL_V1                   0x80
+#define TBUF_BP_MC_V1                  0xA0
+
+#define HFB_CTRL                       0x00
+#define HFB_FLT_ENABLE_V3PLUS          0x04
+#define HFB_FLT_LEN_V2                 0x04
+#define HFB_FLT_LEN_V3PLUS             0x1C
+
+/* uniMac intrl2 registers */
+#define INTRL2_CPU_STAT                        0x00
+#define INTRL2_CPU_SET                 0x04
+#define INTRL2_CPU_CLEAR               0x08
+#define INTRL2_CPU_MASK_STATUS         0x0C
+#define INTRL2_CPU_MASK_SET            0x10
+#define INTRL2_CPU_MASK_CLEAR          0x14
+
+/* INTRL2 instance 0 definitions */
+#define UMAC_IRQ_SCB                   (1 << 0)
+#define UMAC_IRQ_EPHY                  (1 << 1)
+#define UMAC_IRQ_PHY_DET_R             (1 << 2)
+#define UMAC_IRQ_PHY_DET_F             (1 << 3)
+#define UMAC_IRQ_LINK_UP               (1 << 4)
+#define UMAC_IRQ_LINK_DOWN             (1 << 5)
+#define UMAC_IRQ_UMAC                  (1 << 6)
+#define UMAC_IRQ_UMAC_TSV              (1 << 7)
+#define UMAC_IRQ_TBUF_UNDERRUN         (1 << 8)
+#define UMAC_IRQ_RBUF_OVERFLOW         (1 << 9)
+#define UMAC_IRQ_HFB_SM                        (1 << 10)
+#define UMAC_IRQ_HFB_MM                        (1 << 11)
+#define UMAC_IRQ_MPD_R                 (1 << 12)
+#define UMAC_IRQ_RXDMA_MBDONE          (1 << 13)
+#define UMAC_IRQ_RXDMA_PDONE           (1 << 14)
+#define UMAC_IRQ_RXDMA_BDONE           (1 << 15)
+#define UMAC_IRQ_TXDMA_MBDONE          (1 << 16)
+#define UMAC_IRQ_TXDMA_PDONE           (1 << 17)
+#define UMAC_IRQ_TXDMA_BDONE           (1 << 18)
+/* Only valid for GENETv3+ */
+#define UMAC_IRQ_MDIO_DONE             (1 << 23)
+#define UMAC_IRQ_MDIO_ERROR            (1 << 24)
+
+/* Register block offsets */
+#define GENET_SYS_OFF                  0x0000
+#define GENET_GR_BRIDGE_OFF            0x0040
+#define GENET_EXT_OFF                  0x0080
+#define GENET_INTRL2_0_OFF             0x0200
+#define GENET_INTRL2_1_OFF             0x0240
+#define GENET_RBUF_OFF                 0x0300
+#define GENET_UMAC_OFF                 0x0800
+
+/* SYS block offsets and register definitions */
+#define SYS_REV_CTRL                   0x00
+#define SYS_PORT_CTRL                  0x04
+#define  PORT_MODE_INT_EPHY            0
+#define  PORT_MODE_INT_GPHY            1
+#define  PORT_MODE_EXT_EPHY            2
+#define  PORT_MODE_EXT_GPHY            3
+#define  PORT_MODE_EXT_RVMII_25                (4 | BIT(4))
+#define  PORT_MODE_EXT_RVMII_50                4
+#define  LED_ACT_SOURCE_MAC            (1 << 9)
+
+#define SYS_RBUF_FLUSH_CTRL            0x08
+#define SYS_TBUF_FLUSH_CTRL            0x0C
+#define RBUF_FLUSH_CTRL_V1             0x04
+
+/* Ext block register offsets and definitions */
+#define EXT_EXT_PWR_MGMT               0x00
+#define  EXT_PWR_DOWN_BIAS             (1 << 0)
+#define  EXT_PWR_DOWN_DLL              (1 << 1)
+#define  EXT_PWR_DOWN_PHY              (1 << 2)
+#define  EXT_PWR_DN_EN_LD              (1 << 3)
+#define  EXT_ENERGY_DET                        (1 << 4)
+#define  EXT_IDDQ_FROM_PHY             (1 << 5)
+#define  EXT_PHY_RESET                 (1 << 8)
+#define  EXT_ENERGY_DET_MASK           (1 << 12)
+
+#define EXT_RGMII_OOB_CTRL             0x0C
+#define  RGMII_MODE_EN                 (1 << 0)
+#define  RGMII_LINK                    (1 << 4)
+#define  OOB_DISABLE                   (1 << 5)
+#define  ID_MODE_DIS                   (1 << 16)
+
+#define EXT_GPHY_CTRL                  0x1C
+#define  EXT_CFG_IDDQ_BIAS             (1 << 0)
+#define  EXT_CFG_PWR_DOWN              (1 << 1)
+#define  EXT_GPHY_RESET                        (1 << 5)
+
+/* DMA rings size */
+#define DMA_RING_SIZE                  (0x40)
+#define DMA_RINGS_SIZE                 (DMA_RING_SIZE * (DESC_INDEX + 1))
+
+/* DMA registers common definitions */
+#define DMA_RW_POINTER_MASK            0x1FF
+#define DMA_P_INDEX_DISCARD_CNT_MASK   0xFFFF
+#define DMA_P_INDEX_DISCARD_CNT_SHIFT  16
+#define DMA_BUFFER_DONE_CNT_MASK       0xFFFF
+#define DMA_BUFFER_DONE_CNT_SHIFT      16
+#define DMA_P_INDEX_MASK               0xFFFF
+#define DMA_C_INDEX_MASK               0xFFFF
+
+/* DMA ring size register */
+#define DMA_RING_SIZE_MASK             0xFFFF
+#define DMA_RING_SIZE_SHIFT            16
+#define DMA_RING_BUFFER_SIZE_MASK      0xFFFF
+
+/* DMA interrupt threshold register */
+#define DMA_INTR_THRESHOLD_MASK                0x00FF
+
+/* DMA XON/XOFF register */
+#define DMA_XON_THREHOLD_MASK          0xFFFF
+#define DMA_XOFF_THRESHOLD_MASK                0xFFFF
+#define DMA_XOFF_THRESHOLD_SHIFT       16
+
+/* DMA flow period register */
+#define DMA_FLOW_PERIOD_MASK           0xFFFF
+#define DMA_MAX_PKT_SIZE_MASK          0xFFFF
+#define DMA_MAX_PKT_SIZE_SHIFT         16
+
+
+/* DMA control register */
+#define DMA_EN                         (1 << 0)
+#define DMA_RING_BUF_EN_SHIFT          0x01
+#define DMA_RING_BUF_EN_MASK           0xFFFF
+#define DMA_TSB_SWAP_EN                        (1 << 20)
+
+/* DMA status register */
+#define DMA_DISABLED                   (1 << 0)
+#define DMA_DESC_RAM_INIT_BUSY         (1 << 1)
+
+/* DMA SCB burst size register */
+#define DMA_SCB_BURST_SIZE_MASK                0x1F
+
+/* DMA activity vector register */
+#define DMA_ACTIVITY_VECTOR_MASK       0x1FFFF
+
+/* DMA backpressure mask register */
+#define DMA_BACKPRESSURE_MASK          0x1FFFF
+#define DMA_PFC_ENABLE                 (1 << 31)
+
+/* DMA backpressure status register */
+#define DMA_BACKPRESSURE_STATUS_MASK   0x1FFFF
+
+/* DMA override register */
+#define DMA_LITTLE_ENDIAN_MODE         (1 << 0)
+#define DMA_REGISTER_MODE              (1 << 1)
+
+/* DMA timeout register */
+#define DMA_TIMEOUT_MASK               0xFFFF
+#define DMA_TIMEOUT_VAL                        5000    /* micro seconds */
+
+/* TDMA rate limiting control register */
+#define DMA_RATE_LIMIT_EN_MASK         0xFFFF
+
+/* TDMA arbitration control register */
+#define DMA_ARBITER_MODE_MASK          0x03
+#define DMA_RING_BUF_PRIORITY_MASK     0x1F
+#define DMA_RING_BUF_PRIORITY_SHIFT    5
+#define DMA_RATE_ADJ_MASK              0xFF
+
+/* Tx/Rx Dma Descriptor common bits*/
+#define DMA_BUFLENGTH_MASK             0x0fff
+#define DMA_BUFLENGTH_SHIFT            16
+#define DMA_OWN                                0x8000
+#define DMA_EOP                                0x4000
+#define DMA_SOP                                0x2000
+#define DMA_WRAP                       0x1000
+/* Tx specific Dma descriptor bits */
+#define DMA_TX_UNDERRUN                        0x0200
+#define DMA_TX_APPEND_CRC              0x0040
+#define DMA_TX_OW_CRC                  0x0020
+#define DMA_TX_DO_CSUM                 0x0010
+#define DMA_TX_QTAG_SHIFT              7
+
+/* Rx Specific Dma descriptor bits */
+#define DMA_RX_CHK_V3PLUS              0x8000
+#define DMA_RX_CHK_V12                 0x1000
+#define DMA_RX_BRDCAST                 0x0040
+#define DMA_RX_MULT                    0x0020
+#define DMA_RX_LG                      0x0010
+#define DMA_RX_NO                      0x0008
+#define DMA_RX_RXER                    0x0004
+#define DMA_RX_CRC_ERROR               0x0002
+#define DMA_RX_OV                      0x0001
+#define DMA_RX_FI_MASK                 0x001F
+#define DMA_RX_FI_SHIFT                        0x0007
+#define DMA_DESC_ALLOC_MASK            0x00FF
+
+#define DMA_ARBITER_RR                 0x00
+#define DMA_ARBITER_WRR                        0x01
+#define DMA_ARBITER_SP                 0x02
+
+struct enet_cb {
+       struct sk_buff      *skb;
+       void __iomem *bd_addr;
+       DEFINE_DMA_UNMAP_ADDR(dma_addr);
+       DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* power management mode */
+enum bcmgenet_power_mode {
+       GENET_POWER_CABLE_SENSE = 0,
+       GENET_POWER_PASSIVE,
+};
+
+struct bcmgenet_priv;
+
+/* We support both runtime GENET detection and compile-time
+ * to optimize code-paths for a given hardware
+ */
+enum bcmgenet_version {
+       GENET_V1 = 1,
+       GENET_V2,
+       GENET_V3,
+       GENET_V4
+};
+
+#define GENET_IS_V1(p) ((p)->version == GENET_V1)
+#define GENET_IS_V2(p) ((p)->version == GENET_V2)
+#define GENET_IS_V3(p) ((p)->version == GENET_V3)
+#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+
+/* Hardware flags */
+#define GENET_HAS_40BITS       (1 << 0)
+#define GENET_HAS_EXT          (1 << 1)
+#define GENET_HAS_MDIO_INTR    (1 << 2)
+
+/* BCMGENET hardware parameters, keep this structure nicely aligned
+ * since it is going to be used in hot paths
+ */
+struct bcmgenet_hw_params {
+       u8              tx_queues;
+       u8              rx_queues;
+       u8              bds_cnt;
+       u8              bp_in_en_shift;
+       u32             bp_in_mask;
+       u8              hfb_filter_cnt;
+       u8              qtag_mask;
+       u16             tbuf_offset;
+       u32             hfb_offset;
+       u32             hfb_reg_offset;
+       u32             rdma_offset;
+       u32             tdma_offset;
+       u32             words_per_bd;
+       u32             flags;
+};
+
+struct bcmgenet_tx_ring {
+       spinlock_t      lock;           /* ring lock */
+       unsigned int    index;          /* ring index */
+       unsigned int    queue;          /* queue index */
+       struct enet_cb  *cbs;           /* tx ring buffer control block*/
+       unsigned int    size;           /* size of each tx ring */
+       unsigned int    c_index;        /* last consumer index of each ring*/
+       unsigned int    free_bds;       /* # of free bds for each ring */
+       unsigned int    write_ptr;      /* Tx ring write pointer SW copy */
+       unsigned int    prod_index;     /* Tx ring producer index SW copy */
+       unsigned int    cb_ptr;         /* Tx ring initial CB ptr */
+       unsigned int    end_ptr;        /* Tx ring end CB ptr */
+       void (*int_enable)(struct bcmgenet_priv *priv,
+                               struct bcmgenet_tx_ring *);
+       void (*int_disable)(struct bcmgenet_priv *priv,
+                               struct bcmgenet_tx_ring *);
+};
+
+/* device context */
+struct bcmgenet_priv {
+       void __iomem *base;
+       enum bcmgenet_version version;
+       struct net_device *dev;
+       u32 int0_mask;
+       u32 int1_mask;
+
+       /* NAPI for descriptor based rx */
+       struct napi_struct napi ____cacheline_aligned;
+
+       /* transmit variables */
+       void __iomem *tx_bds;
+       struct enet_cb *tx_cbs;
+       unsigned int num_tx_bds;
+
+       struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+
+       /* receive variables */
+       void __iomem *rx_bds;
+       void __iomem *rx_bd_assign_ptr;
+       int rx_bd_assign_index;
+       struct enet_cb *rx_cbs;
+       unsigned int num_rx_bds;
+       unsigned int rx_buf_len;
+       unsigned int rx_read_ptr;
+       unsigned int rx_c_index;
+
+       /* other misc variables */
+       struct bcmgenet_hw_params *hw_params;
+
+       /* MDIO bus variables */
+       wait_queue_head_t wq;
+       struct phy_device *phydev;
+       struct device_node *phy_dn;
+       struct mii_bus *mii_bus;
+
+       /* PHY device variables */
+       int old_duplex;
+       int old_link;
+       int old_pause;
+       phy_interface_t phy_interface;
+       int phy_addr;
+       int ext_phy;
+
+       /* Interrupt variables */
+       struct work_struct bcmgenet_irq_work;
+       int irq0;
+       int irq1;
+       unsigned int irq0_stat;
+       unsigned int irq1_stat;
+
+       /* HW descriptors/checksum variables */
+       bool desc_64b_en;
+       bool desc_rxchk_en;
+       bool crc_fwd_en;
+
+       unsigned int dma_rx_chk_bit;
+
+       u32 msg_enable;
+
+       struct clk *clk;
+       struct platform_device *pdev;
+
+       /* WOL */
+       unsigned long wol_enabled;
+       struct clk *clk_wol;
+       u32 wolopts;
+
+       struct bcmgenet_mib_counters mib;
+};
+
+#define GENET_IO_MACRO(name, offset)                                   \
+static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv,  \
+                                       u32 off)                        \
+{                                                                      \
+       return __raw_readl(priv->base + offset + off);                  \
+}                                                                      \
+static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv,        \
+                                       u32 val, u32 off)               \
+{                                                                      \
+       __raw_writel(val, priv->base + offset + off);                   \
+}
+
+GENET_IO_MACRO(ext, GENET_EXT_OFF);
+GENET_IO_MACRO(umac, GENET_UMAC_OFF);
+GENET_IO_MACRO(sys, GENET_SYS_OFF);
+
+/* interrupt l2 registers accessors */
+GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
+GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
+
+/* HFB register accessors  */
+GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
+
+/* GENET v2+ HFB control and filter len helpers */
+GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
+
+/* RBUF register accessors */
+GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
+
+/* MDIO routines */
+int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev);
+void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_mii_reset(struct net_device *dev);
+
+#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644 (file)
index 0000000..4608673
--- /dev/null
@@ -0,0 +1,464 @@
+/*
+ * Broadcom GENET MDIO routines
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/brcmphy.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+
+#include "bcmgenet.h"
+
+/* read a value from the MII */
+static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+       int ret;
+       struct net_device *dev = bus->priv;
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
+                       (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+       /* Start MDIO transaction*/
+       reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+       reg |= MDIO_START_BUSY;
+       bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+       wait_event_timeout(priv->wq,
+                       !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+                               & MDIO_START_BUSY),
+                       HZ / 100);
+       ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+
+       if (ret & MDIO_READ_FAIL)
+               return -EIO;
+
+       return ret & 0xffff;
+}
+
+/* write a value to the MII */
+static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
+                       int location, u16 val)
+{
+       struct net_device *dev = bus->priv;
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
+                       (location << MDIO_REG_SHIFT) | (0xffff & val)),
+                       UMAC_MDIO_CMD);
+       reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+       reg |= MDIO_START_BUSY;
+       bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+       wait_event_timeout(priv->wq,
+                       !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+                               MDIO_START_BUSY),
+                       HZ / 100);
+
+       return 0;
+}
+
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+static void bcmgenet_mii_setup(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       u32 reg, cmd_bits = 0;
+       unsigned int status_changed = 0;
+
+       if (priv->old_link != phydev->link) {
+               status_changed = 1;
+               priv->old_link = phydev->link;
+       }
+
+       if (phydev->link) {
+               /* program UMAC and RGMII block based on established link
+                * speed, pause, and duplex.
+                * the speed set in umac->cmd tell RGMII block which clock
+                * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit.
+                * receive clock is provided by PHY.
+                */
+               reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+               reg &= ~OOB_DISABLE;
+               reg |= RGMII_LINK;
+               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+               /* speed */
+               if (phydev->speed == SPEED_1000)
+                       cmd_bits = UMAC_SPEED_1000;
+               else if (phydev->speed == SPEED_100)
+                       cmd_bits = UMAC_SPEED_100;
+               else
+                       cmd_bits = UMAC_SPEED_10;
+               cmd_bits <<= CMD_SPEED_SHIFT;
+
+               if (priv->old_duplex != phydev->duplex) {
+                       status_changed = 1;
+                       priv->old_duplex = phydev->duplex;
+               }
+
+               /* duplex */
+               if (phydev->duplex != DUPLEX_FULL)
+                       cmd_bits |= CMD_HD_EN;
+
+               if (priv->old_pause != phydev->pause) {
+                       status_changed = 1;
+                       priv->old_pause = phydev->pause;
+               }
+
+               /* pause capability */
+               if (!phydev->pause)
+                       cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+               reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+               reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+                              CMD_HD_EN |
+                              CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+               reg |= cmd_bits;
+               bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       }
+
+       if (status_changed)
+               phy_print_status(phydev);
+}
+
+void bcmgenet_mii_reset(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       if (priv->phydev) {
+               phy_init_hw(priv->phydev);
+               phy_start_aneg(priv->phydev);
+       }
+}
+
+static void bcmgenet_ephy_power_up(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 reg = 0;
+
+       /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
+       if (!GENET_IS_V4(priv))
+               return;
+
+       reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+       reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+       reg |= EXT_GPHY_RESET;
+       bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+       mdelay(2);
+
+       reg &= ~EXT_GPHY_RESET;
+       bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+       udelay(20);
+}
+
+static void bcmgenet_internal_phy_setup(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       /* Power up EPHY */
+       bcmgenet_ephy_power_up(dev);
+       /* enable APD */
+       reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+       reg |= EXT_PWR_DN_EN_LD;
+       bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+       bcmgenet_mii_reset(dev);
+}
+
+static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+{
+       u32 reg;
+
+       /* Speed settings are set in bcmgenet_mii_setup() */
+       reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+       reg |= LED_ACT_SOURCE_MAC;
+       bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+}
+
+int bcmgenet_mii_config(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       struct device *kdev = &priv->pdev->dev;
+       const char *phy_name = NULL;
+       u32 id_mode_dis = 0;
+       u32 port_ctrl;
+       u32 reg;
+
+       priv->ext_phy = !phy_is_internal(priv->phydev) &&
+                       (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
+       if (phy_is_internal(priv->phydev))
+               priv->phy_interface = PHY_INTERFACE_MODE_NA;
+
+       switch (priv->phy_interface) {
+       case PHY_INTERFACE_MODE_NA:
+       case PHY_INTERFACE_MODE_MOCA:
+               /* Irrespective of the actually configured PHY speed (100 or
+                * 1000) GENETv4 only has an internal GPHY so we will just end
+                * up masking the Gigabit features from what we support, not
+                * switching to the EPHY
+                */
+               if (GENET_IS_V4(priv))
+                       port_ctrl = PORT_MODE_INT_GPHY;
+               else
+                       port_ctrl = PORT_MODE_INT_EPHY;
+
+               bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+               if (phy_is_internal(priv->phydev)) {
+                       phy_name = "internal PHY";
+                       bcmgenet_internal_phy_setup(dev);
+               } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+                       phy_name = "MoCA";
+                       bcmgenet_moca_phy_setup(priv);
+               }
+               break;
+
+       case PHY_INTERFACE_MODE_MII:
+               phy_name = "external MII";
+               phydev->supported &= PHY_BASIC_FEATURES;
+               bcmgenet_sys_writel(priv,
+                               PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+               break;
+
+       case PHY_INTERFACE_MODE_REVMII:
+               phy_name = "external RvMII";
+               /* of_mdiobus_register took care of reading the 'max-speed'
+                * PHY property for us, effectively limiting the PHY supported
+                * capabilities, use that knowledge to also configure the
+                * Reverse MII interface correctly.
+                */
+               if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+                               PHY_BASIC_FEATURES)
+                       port_ctrl = PORT_MODE_EXT_RVMII_25;
+               else
+                       port_ctrl = PORT_MODE_EXT_RVMII_50;
+               bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+               break;
+
+       case PHY_INTERFACE_MODE_RGMII:
+               /* RGMII_NO_ID: TXC transitions at the same time as TXD
+                *              (requires PCB or receiver-side delay)
+                * RGMII:       Add 2ns delay on TXC (90 degree shift)
+                *
+                * ID is implicitly disabled for 100Mbps (RG)MII operation.
+                */
+               id_mode_dis = BIT(16);
+               /* fall through */
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               if (id_mode_dis)
+                       phy_name = "external RGMII (no delay)";
+               else
+                       phy_name = "external RGMII (TX delay)";
+               reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+               reg |= RGMII_MODE_EN | id_mode_dis;
+               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+               bcmgenet_sys_writel(priv,
+                               PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+               break;
+       default:
+               dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
+               return -EINVAL;
+       }
+
+       dev_info(kdev, "configuring instance for %s\n", phy_name);
+
+       return 0;
+}
+
+static int bcmgenet_mii_probe(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct phy_device *phydev;
+       unsigned int phy_flags;
+       int ret;
+
+       if (priv->phydev) {
+               pr_info("PHY already attached\n");
+               return 0;
+       }
+
+       if (priv->phy_dn)
+               phydev = of_phy_connect(dev, priv->phy_dn,
+                                       bcmgenet_mii_setup, 0,
+                                       priv->phy_interface);
+       else
+               phydev = of_phy_connect_fixed_link(dev,
+                                       bcmgenet_mii_setup,
+                                       priv->phy_interface);
+
+       if (!phydev) {
+               pr_err("could not attach to PHY\n");
+               return -ENODEV;
+       }
+
+       priv->old_link = -1;
+       priv->old_duplex = -1;
+       priv->old_pause = -1;
+       priv->phydev = phydev;
+
+       /* Configure port multiplexer based on what the probed PHY device since
+        * reading the 'max-speed' property determines the maximum supported
+        * PHY speed which is needed for bcmgenet_mii_config() to configure
+        * things appropriately.
+        */
+       ret = bcmgenet_mii_config(dev);
+       if (ret) {
+               phy_disconnect(priv->phydev);
+               return ret;
+       }
+
+       phy_flags = PHY_BRCM_100MBPS_WAR;
+
+       /* workarounds are only needed for 100Mpbs PHYs, and
+        * never on GENET V1 hardware
+        */
+       if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
+               phy_flags = 0;
+
+       phydev->dev_flags |= phy_flags;
+       phydev->advertising = phydev->supported;
+
+       /* The internal PHY has its link interrupts routed to the
+        * Ethernet MAC ISRs
+        */
+       if (phy_is_internal(priv->phydev))
+               priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
+       else
+               priv->mii_bus->irq[phydev->addr] = PHY_POLL;
+
+       pr_info("attached PHY at address %d [%s]\n",
+                       phydev->addr, phydev->drv->name);
+
+       return 0;
+}
+
+static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
+{
+       struct mii_bus *bus;
+
+       if (priv->mii_bus)
+               return 0;
+
+       priv->mii_bus = mdiobus_alloc();
+       if (!priv->mii_bus) {
+               pr_err("failed to allocate\n");
+               return -ENOMEM;
+       }
+
+       bus = priv->mii_bus;
+       bus->priv = priv->dev;
+       bus->name = "bcmgenet MII bus";
+       bus->parent = &priv->pdev->dev;
+       bus->read = bcmgenet_mii_read;
+       bus->write = bcmgenet_mii_write;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
+                       priv->pdev->name, priv->pdev->id);
+
+       bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       if (!bus->irq) {
+               mdiobus_free(priv->mii_bus);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
+{
+       struct device_node *dn = priv->pdev->dev.of_node;
+       struct device *kdev = &priv->pdev->dev;
+       struct device_node *mdio_dn;
+       char *compat;
+       int ret;
+
+       compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
+       if (!compat)
+               return -ENOMEM;
+
+       mdio_dn = of_find_compatible_node(dn, NULL, compat);
+       kfree(compat);
+       if (!mdio_dn) {
+               dev_err(kdev, "unable to find MDIO bus node\n");
+               return -ENODEV;
+       }
+
+       ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
+       if (ret) {
+               dev_err(kdev, "failed to register MDIO bus\n");
+               return ret;
+       }
+
+       /* Fetch the PHY phandle */
+       priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+
+       /* Get the link mode */
+       priv->phy_interface = of_get_phy_mode(dn);
+
+       return 0;
+}
+
+int bcmgenet_mii_init(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int ret;
+
+       ret = bcmgenet_mii_alloc(priv);
+       if (ret)
+               return ret;
+
+       ret = bcmgenet_mii_of_init(priv);
+       if (ret)
+               goto out_free;
+
+       ret = bcmgenet_mii_probe(dev);
+       if (ret)
+               goto out;
+
+       return 0;
+
+out:
+       mdiobus_unregister(priv->mii_bus);
+out_free:
+       kfree(priv->mii_bus->irq);
+       mdiobus_free(priv->mii_bus);
+       return ret;
+}
+
+void bcmgenet_mii_exit(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       mdiobus_unregister(priv->mii_bus);
+       kfree(priv->mii_bus->irq);
+       mdiobus_free(priv->mii_bus);
+}
index 70a225c8df5c846a4d7045f2e7649925a5d8c50c..b9f7022f4e81e13a38c31a4f53653f2c77a0f4db 100644 (file)
@@ -1401,11 +1401,6 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
        return ret;
 }
 
-static int tg3_mdio_reset(struct mii_bus *bp)
-{
-       return 0;
-}
-
 static void tg3_mdio_config_5785(struct tg3 *tp)
 {
        u32 val;
@@ -1542,7 +1537,6 @@ static int tg3_mdio_init(struct tg3 *tp)
        tp->mdio_bus->parent   = &tp->pdev->dev;
        tp->mdio_bus->read     = &tg3_mdio_read;
        tp->mdio_bus->write    = &tg3_mdio_write;
-       tp->mdio_bus->reset    = &tg3_mdio_reset;
        tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
        tp->mdio_bus->irq      = &tp->mdio_irq[0];
 
@@ -6322,6 +6316,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
        .n_alarm        = 0,
        .n_ext_ts       = 0,
        .n_per_out      = 1,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = tg3_ptp_adjfreq,
        .adjtime        = tg3_ptp_adjtime,
@@ -6593,7 +6588,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
                pkts_compl++;
                bytes_compl += skb->len;
 
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
 
                if (unlikely(tx_bug)) {
                        tg3_tx_recover(tp);
@@ -6924,7 +6919,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 
                if (len > (tp->dev->mtu + ETH_HLEN) &&
                    skb->protocol != htons(ETH_P_8021Q)) {
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        goto drop_it_no_recycle;
                }
 
@@ -7807,7 +7802,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
                                          PCI_DMA_TODEVICE);
                /* Make sure the mapping succeeded */
                if (pci_dma_mapping_error(tp->pdev, new_addr)) {
-                       dev_kfree_skb(new_skb);
+                       dev_kfree_skb_any(new_skb);
                        ret = -1;
                } else {
                        u32 save_entry = *entry;
@@ -7822,13 +7817,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
                                            new_skb->len, base_flags,
                                            mss, vlan)) {
                                tg3_tx_skb_unmap(tnapi, save_entry, -1);
-                               dev_kfree_skb(new_skb);
+                               dev_kfree_skb_any(new_skb);
                                ret = -1;
                        }
                }
        }
 
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        *pskb = new_skb;
        return ret;
 }
@@ -7871,7 +7866,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
        } while (segs);
 
 tg3_tso_bug_end:
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
@@ -7923,8 +7918,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                struct iphdr *iph;
                u32 tcp_opt_len, hdr_len;
 
-               if (skb_header_cloned(skb) &&
-                   pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+               if (skb_cow_head(skb, 0))
                        goto drop;
 
                iph = ip_hdr(skb);
@@ -8093,7 +8087,7 @@ dma_error:
        tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
        tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
 drop:
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
 drop_nofree:
        tp->tx_dropped++;
        return NETDEV_TX_OK;
@@ -11361,12 +11355,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
                msix_ent[i].vector = 0;
        }
 
-       rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+       rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
        if (rc < 0) {
                return false;
-       } else if (rc != 0) {
-               if (pci_enable_msix(tp->pdev, msix_ent, rc))
-                       return false;
+       } else if (rc < tp->irq_cnt) {
                netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
                              tp->irq_cnt, rc);
                tp->irq_cnt = rc;
index 4ad1187e82fb463e9642fd3e73b2850cf1137a01..675550fe8ee90dfe7f2c704787d7b900107f06d4 100644 (file)
@@ -2496,12 +2496,10 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
 {
        int err;
 
-       if (skb_header_cloned(skb)) {
-               err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-               if (err) {
-                       BNAD_UPDATE_CTR(bnad, tso_err);
-                       return err;
-               }
+       err = skb_cow_head(skb, 0);
+       if (err < 0) {
+               BNAD_UPDATE_CTR(bnad, tso_err);
+               return err;
        }
 
        /*
@@ -2669,9 +2667,11 @@ bnad_enable_msix(struct bnad *bnad)
        for (i = 0; i < bnad->msix_num; i++)
                bnad->msix_table[i].entry = i;
 
-       ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
-       if (ret > 0) {
-               /* Not enough MSI-X vectors. */
+       ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
+                                   1, bnad->msix_num);
+       if (ret < 0) {
+               goto intx_mode;
+       } else if (ret < bnad->msix_num) {
                pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
                        ret, bnad->msix_num);
 
@@ -2684,18 +2684,11 @@ bnad_enable_msix(struct bnad *bnad)
                bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
                         BNAD_MAILBOX_MSIX_VECTORS;
 
-               if (bnad->msix_num > ret)
+               if (bnad->msix_num > ret) {
+                       pci_disable_msix(bnad->pcidev);
                        goto intx_mode;
-
-               /* Try once more with adjusted numbers */
-               /* If this fails, fall back to INTx */
-               ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
-                                     bnad->msix_num);
-               if (ret)
-                       goto intx_mode;
-
-       } else if (ret < 0)
-               goto intx_mode;
+               }
+       }
 
        pci_intx(bnad->pcidev, 0);
 
@@ -2850,13 +2843,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
                }
                if (unlikely((gso_size + skb_transport_offset(skb) +
                              tcp_hdrlen(skb)) >= skb->len)) {
-                       txqent->hdr.wi.opcode =
-                               __constant_htons(BNA_TXQ_WI_SEND);
+                       txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
                        txqent->hdr.wi.lso_mss = 0;
                        BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
                } else {
-                       txqent->hdr.wi.opcode =
-                               __constant_htons(BNA_TXQ_WI_SEND_LSO);
+                       txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
                        txqent->hdr.wi.lso_mss = htons(gso_size);
                }
 
@@ -2870,7 +2861,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
                        htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
                        tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
        } else  {
-               txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
+               txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
                txqent->hdr.wi.lso_mss = 0;
 
                if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
@@ -2881,11 +2872,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        u8 proto = 0;
 
-                       if (skb->protocol == __constant_htons(ETH_P_IP))
+                       if (skb->protocol == htons(ETH_P_IP))
                                proto = ip_hdr(skb)->protocol;
 #ifdef NETIF_F_IPV6_CSUM
-                       else if (skb->protocol ==
-                                __constant_htons(ETH_P_IPV6)) {
+                       else if (skb->protocol == htons(ETH_P_IPV6)) {
                                /* nexthdr may not be TCP immediately. */
                                proto = ipv6_hdr(skb)->nexthdr;
                        }
@@ -2954,17 +2944,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        /* Sanity checks for the skb */
 
        if (unlikely(skb->len <= ETH_HLEN)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
                return NETDEV_TX_OK;
        }
        if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
                return NETDEV_TX_OK;
        }
        if (unlikely(len == 0)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
                return NETDEV_TX_OK;
        }
@@ -2976,7 +2966,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
         * and the netif_tx_stop_all_queues() call.
         */
        if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
                return NETDEV_TX_OK;
        }
@@ -2989,7 +2979,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
 
        if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
                return NETDEV_TX_OK;
        }
@@ -3029,7 +3019,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        /* Program the opcode, flags, frame_len, num_vectors in WI */
        if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
        txqent->hdr.wi.reserved = 0;
@@ -3055,7 +3045,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                        /* Undo the changes starting at tcb->producer_index */
                        bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
                                tcb->producer_index);
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
                        return NETDEV_TX_OK;
                }
@@ -3067,8 +3057,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                        vect_id = 0;
                        BNA_QE_INDX_INC(prod, q_depth);
                        txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
-                       txqent->hdr.wi_ext.opcode =
-                               __constant_htons(BNA_TXQ_WI_EXTENSION);
+                       txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
                        unmap = &unmap_q[prod];
                }
 
@@ -3085,7 +3074,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        if (unlikely(len != skb->len)) {
                /* Undo the changes starting at tcb->producer_index */
                bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
                return NETDEV_TX_OK;
        }
index d0c38e01e99fdc70e802b33dc977e081d1a72f5c..ca97005e24b41217849beaf4b9a578fbaf1f2027 100644 (file)
@@ -199,11 +199,6 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        return 0;
 }
 
-static int macb_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 /**
  * macb_set_tx_clk() - Set a clock to a new frequency
  * @clk                Pointer to the clock to change
@@ -375,7 +370,6 @@ int macb_mii_init(struct macb *bp)
        bp->mii_bus->name = "MACB_mii_bus";
        bp->mii_bus->read = &macb_mdio_read;
        bp->mii_bus->write = &macb_mdio_write;
-       bp->mii_bus->reset = &macb_mdio_reset;
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                bp->pdev->name, bp->pdev->id);
        bp->mii_bus->priv = bp;
@@ -1045,7 +1039,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        mapping = dma_map_single(&bp->pdev->dev, skb->data,
                                 len, DMA_TO_DEVICE);
        if (dma_mapping_error(&bp->pdev->dev, mapping)) {
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                goto unlock;
        }
 
index d2a183c3a6cedeb4db48812c900afe8de49f05b4..521dfea44b837d57bc7a7297ac973cd4d8098d3e 100644 (file)
@@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
                /* Check tx error on the last segment */
                if (desc_get_tx_ls(p)) {
                        desc_get_tx_status(priv, p);
-                       dev_kfree_skb(skb);
+                       dev_consume_skb_any(skb);
                }
 
                priv->tx_skbuff[entry] = NULL;
@@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
        len = skb_headlen(skb);
        paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
        if (dma_mapping_error(priv->device, paddr)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
        priv->tx_skbuff[entry] = skb;
@@ -1169,7 +1169,7 @@ dma_err:
        desc = first;
        dma_unmap_single(priv->device, desc_get_buf_addr(desc),
                         desc_get_buf_len(desc), DMA_TO_DEVICE);
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
index 45d77334d7d9a6c1a053be81599edcbf8b74c3b7..07bbb711b7e5a716aba3e8d2e3e958e2ce8fa506 100644 (file)
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
 {
        struct msix_entry entries[SGE_QSETS + 1];
        int vectors;
-       int i, err;
+       int i;
 
        vectors = ARRAY_SIZE(entries);
        for (i = 0; i < vectors; ++i)
                entries[i].entry = i;
 
-       while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
-               vectors = err;
-
-       if (err < 0)
-               pci_disable_msix(adap->pdev);
-
-       if (!err && vectors < (adap->params.nports + 1)) {
-               pci_disable_msix(adap->pdev);
-               err = -1;
-       }
+       vectors = pci_enable_msix_range(adap->pdev, entries,
+                                       adap->params.nports + 1, vectors);
+       if (vectors < 0)
+               return vectors;
 
-       if (!err) {
-               for (i = 0; i < vectors; ++i)
-                       adap->msix_info[i].vec = entries[i].vector;
-               adap->msix_nvectors = vectors;
-       }
+       for (i = 0; i < vectors; ++i)
+               adap->msix_info[i].vec = entries[i].vector;
+       adap->msix_nvectors = vectors;
 
-       return err;
+       return 0;
 }
 
 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
index 632b318eb38a5852a77e94cb86f19e3a094ae774..8b069f96e920e4dec0e2c28cb197747ea56438c5 100644 (file)
@@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
                        if (need_unmap)
                                unmap_skb(d->skb, q, cidx, pdev);
                        if (d->eop) {
-                               kfree_skb(d->skb);
+                               dev_consume_skb_any(d->skb);
                                d->skb = NULL;
                        }
                }
@@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
                        cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
                                              V_WR_TID(q->token));
                        wr_gen2(d, gen);
-                       kfree_skb(skb);
+                       dev_consume_skb_any(skb);
                        return;
                }
 
@@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
         * anything shorter than an Ethernet header.
         */
        if (unlikely(skb->len < ETH_HLEN)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index 1f4b9b30b9ed0bbaa969014e4bf6c1162d439d98..32db37709263bc14e596056bc7a9df80cc65bb62 100644 (file)
@@ -66,6 +66,7 @@ enum {
        SERNUM_LEN = 24,    /* Serial # length */
        EC_LEN     = 16,    /* E/C length */
        ID_LEN     = 16,    /* ID length */
+       PN_LEN     = 16,    /* Part Number length */
 };
 
 enum {
@@ -254,6 +255,7 @@ struct vpd_params {
        u8 ec[EC_LEN + 1];
        u8 sn[SERNUM_LEN + 1];
        u8 id[ID_LEN + 1];
+       u8 pn[PN_LEN + 1];
 };
 
 struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
        unsigned char bypass;
 
        unsigned int ofldq_wr_cred;
+       bool ulptx_memwrite_dsgl;          /* use of T5 DSGL allowed */
 };
 
 #include "t4fw_api.h"
@@ -497,6 +500,7 @@ struct sge_txq {
        spinlock_t db_lock;
        int db_disabled;
        unsigned short db_pidx;
+       unsigned short db_pidx_inc;
        u64 udb;
 };
 
@@ -553,8 +557,13 @@ struct sge {
        u32 pktshift;               /* padding between CPL & packet data */
        u32 fl_align;               /* response queue message alignment */
        u32 fl_starve_thres;        /* Free List starvation threshold */
-       unsigned int starve_thres;
-       u8 idma_state[2];
+
+       /* State variables for detecting an SGE Ingress DMA hang */
+       unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
+       unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
+       unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
+       unsigned int idma_qid[2];   /* SGE IDMA Hung Ingress Queue ID */
+
        unsigned int egr_start;
        unsigned int ingr_start;
        void *egr_map[MAX_EGRQ];    /* qid->queue egress queue map */
@@ -957,7 +966,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
               u64 *parity);
 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
                u64 *parity);
-
+const char *t4_get_port_type_description(enum fw_port_type port_type);
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
@@ -1029,4 +1038,5 @@ void t4_db_dropped(struct adapter *adapter);
 int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
                         u32 addr, u32 val);
+void t4_sge_decode_idma_state(struct adapter *adapter, int state);
 #endif /* __CXGB4_H__ */
index 34e2488767d94d0c6eeaf677ffbfca493d871e54..6fe58913403ab24f61ddf7f7e8d02714e040d250 100644 (file)
@@ -254,6 +254,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
        CH_DEVICE(0x5011, 4),
        CH_DEVICE(0x5012, 4),
        CH_DEVICE(0x5013, 4),
+       CH_DEVICE(0x5014, 4),
+       CH_DEVICE(0x5015, 4),
+       CH_DEVICE(0x5080, 4),
+       CH_DEVICE(0x5081, 4),
+       CH_DEVICE(0x5082, 4),
+       CH_DEVICE(0x5083, 4),
+       CH_DEVICE(0x5084, 4),
+       CH_DEVICE(0x5085, 4),
        CH_DEVICE(0x5401, 4),
        CH_DEVICE(0x5402, 4),
        CH_DEVICE(0x5403, 4),
@@ -273,6 +281,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
        CH_DEVICE(0x5411, 4),
        CH_DEVICE(0x5412, 4),
        CH_DEVICE(0x5413, 4),
+       CH_DEVICE(0x5414, 4),
+       CH_DEVICE(0x5415, 4),
+       CH_DEVICE(0x5480, 4),
+       CH_DEVICE(0x5481, 4),
+       CH_DEVICE(0x5482, 4),
+       CH_DEVICE(0x5483, 4),
+       CH_DEVICE(0x5484, 4),
+       CH_DEVICE(0x5485, 4),
        { 0, }
 };
 
@@ -423,15 +439,18 @@ static void link_report(struct net_device *dev)
                const struct port_info *p = netdev_priv(dev);
 
                switch (p->link_cfg.speed) {
-               case SPEED_10000:
+               case 10000:
                        s = "10Gbps";
                        break;
-               case SPEED_1000:
+               case 1000:
                        s = "1000Mbps";
                        break;
-               case SPEED_100:
+               case 100:
                        s = "100Mbps";
                        break;
+               case 40000:
+                       s = "40Gbps";
+                       break;
                }
 
                netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2080,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
                0x40200, 0x40298,
                0x402ac, 0x4033c,
                0x403f8, 0x403fc,
-               0x41300, 0x413c4,
+               0x41304, 0x413c4,
                0x41400, 0x4141c,
                0x41480, 0x414d0,
                0x44000, 0x44078,
@@ -2089,7 +2108,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
                0x48200, 0x48298,
                0x482ac, 0x4833c,
                0x483f8, 0x483fc,
-               0x49300, 0x493c4,
+               0x49304, 0x493c4,
                0x49400, 0x4941c,
                0x49480, 0x494d0,
                0x4c000, 0x4c078,
@@ -2199,6 +2218,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
        else if (type == FW_PORT_TYPE_FIBER_XFI ||
                 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
                v |= SUPPORTED_FIBRE;
+       else if (type == FW_PORT_TYPE_BP40_BA)
+               v |= SUPPORTED_40000baseSR4_Full;
 
        if (caps & FW_PORT_CAP_ANEG)
                v |= SUPPORTED_Autoneg;
@@ -2215,6 +2236,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
                v |= FW_PORT_CAP_SPEED_1G;
        if (caps & ADVERTISED_10000baseT_Full)
                v |= FW_PORT_CAP_SPEED_10G;
+       if (caps & ADVERTISED_40000baseSR4_Full)
+               v |= FW_PORT_CAP_SPEED_40G;
        return v;
 }
 
@@ -2263,12 +2286,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 static unsigned int speed_to_caps(int speed)
 {
-       if (speed == SPEED_100)
+       if (speed == 100)
                return FW_PORT_CAP_SPEED_100M;
-       if (speed == SPEED_1000)
+       if (speed == 1000)
                return FW_PORT_CAP_SPEED_1G;
-       if (speed == SPEED_10000)
+       if (speed == 10000)
                return FW_PORT_CAP_SPEED_10G;
+       if (speed == 40000)
+               return FW_PORT_CAP_SPEED_40G;
        return 0;
 }
 
@@ -2296,8 +2321,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        if (cmd->autoneg == AUTONEG_DISABLE) {
                cap = speed_to_caps(speed);
 
-               if (!(lc->supported & cap) || (speed == SPEED_1000) ||
-                   (speed == SPEED_10000))
+               if (!(lc->supported & cap) ||
+                   (speed == 1000) ||
+                   (speed == 10000) ||
+                   (speed == 40000))
                        return -EINVAL;
                lc->requested_speed = cap;
                lc->advertising = 0;
@@ -3205,8 +3232,8 @@ static int cxgb4_clip_get(const struct net_device *dev,
        c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
                        FW_CMD_REQUEST | FW_CMD_WRITE);
        c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
-       *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
-       *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+       c.ip_hi = *(__be64 *)(lip->s6_addr);
+       c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 }
 
@@ -3221,8 +3248,8 @@ static int cxgb4_clip_release(const struct net_device *dev,
        c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
                        FW_CMD_REQUEST | FW_CMD_READ);
        c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
-       *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
-       *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+       c.ip_hi = *(__be64 *)(lip->s6_addr);
+       c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 }
 
@@ -3563,14 +3590,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
 
 static void disable_txq_db(struct sge_txq *q)
 {
-       spin_lock_irq(&q->db_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->db_lock, flags);
        q->db_disabled = 1;
-       spin_unlock_irq(&q->db_lock);
+       spin_unlock_irqrestore(&q->db_lock, flags);
 }
 
-static void enable_txq_db(struct sge_txq *q)
+static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
 {
        spin_lock_irq(&q->db_lock);
+       if (q->db_pidx_inc) {
+               /* Make sure that all writes to the TX descriptors
+                * are committed before we tell HW about them.
+                */
+               wmb();
+               t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+                            QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
+               q->db_pidx_inc = 0;
+       }
        q->db_disabled = 0;
        spin_unlock_irq(&q->db_lock);
 }
@@ -3592,11 +3630,32 @@ static void enable_dbs(struct adapter *adap)
        int i;
 
        for_each_ethrxq(&adap->sge, i)
-               enable_txq_db(&adap->sge.ethtxq[i].q);
+               enable_txq_db(adap, &adap->sge.ethtxq[i].q);
        for_each_ofldrxq(&adap->sge, i)
-               enable_txq_db(&adap->sge.ofldtxq[i].q);
+               enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
        for_each_port(adap, i)
-               enable_txq_db(&adap->sge.ctrlq[i].q);
+               enable_txq_db(adap, &adap->sge.ctrlq[i].q);
+}
+
+static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
+{
+       if (adap->uld_handle[CXGB4_ULD_RDMA])
+               ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
+                               cmd);
+}
+
+static void process_db_full(struct work_struct *work)
+{
+       struct adapter *adap;
+
+       adap = container_of(work, struct adapter, db_full_task);
+
+       drain_db_fifo(adap, dbfifo_drain_delay);
+       enable_dbs(adap);
+       notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
+       t4_set_reg_field(adap, SGE_INT_ENABLE3,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT,
+                        DBFIFO_HP_INT | DBFIFO_LP_INT);
 }
 
 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3604,7 +3663,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
        u16 hw_pidx, hw_cidx;
        int ret;
 
-       spin_lock_bh(&q->db_lock);
+       spin_lock_irq(&q->db_lock);
        ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
        if (ret)
                goto out;
@@ -3621,7 +3680,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
        }
 out:
        q->db_disabled = 0;
-       spin_unlock_bh(&q->db_lock);
+       q->db_pidx_inc = 0;
+       spin_unlock_irq(&q->db_lock);
        if (ret)
                CH_WARN(adap, "DB drop recovery failed.\n");
 }
@@ -3637,29 +3697,6 @@ static void recover_all_queues(struct adapter *adap)
                sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
 }
 
-static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
-{
-       mutex_lock(&uld_mutex);
-       if (adap->uld_handle[CXGB4_ULD_RDMA])
-               ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
-                               cmd);
-       mutex_unlock(&uld_mutex);
-}
-
-static void process_db_full(struct work_struct *work)
-{
-       struct adapter *adap;
-
-       adap = container_of(work, struct adapter, db_full_task);
-
-       notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
-       drain_db_fifo(adap, dbfifo_drain_delay);
-       t4_set_reg_field(adap, SGE_INT_ENABLE3,
-                        DBFIFO_HP_INT | DBFIFO_LP_INT,
-                        DBFIFO_HP_INT | DBFIFO_LP_INT);
-       notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-}
-
 static void process_db_drop(struct work_struct *work)
 {
        struct adapter *adap;
@@ -3667,11 +3704,13 @@ static void process_db_drop(struct work_struct *work)
        adap = container_of(work, struct adapter, db_drop_task);
 
        if (is_t4(adap->params.chip)) {
-               disable_dbs(adap);
+               drain_db_fifo(adap, dbfifo_drain_delay);
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
-               drain_db_fifo(adap, 1);
+               drain_db_fifo(adap, dbfifo_drain_delay);
                recover_all_queues(adap);
+               drain_db_fifo(adap, dbfifo_drain_delay);
                enable_dbs(adap);
+               notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
        } else {
                u32 dropped_db = t4_read_reg(adap, 0x010ac);
                u16 qid = (dropped_db >> 15) & 0x1ffff;
@@ -3712,6 +3751,8 @@ static void process_db_drop(struct work_struct *work)
 void t4_db_full(struct adapter *adap)
 {
        if (is_t4(adap->params.chip)) {
+               disable_dbs(adap);
+               notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
                t4_set_reg_field(adap, SGE_INT_ENABLE3,
                                 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
                queue_work(workq, &adap->db_full_task);
@@ -3720,8 +3761,11 @@ void t4_db_full(struct adapter *adap)
 
 void t4_db_dropped(struct adapter *adap)
 {
-       if (is_t4(adap->params.chip))
-               queue_work(workq, &adap->db_drop_task);
+       if (is_t4(adap->params.chip)) {
+               disable_dbs(adap);
+               notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
+       }
+       queue_work(workq, &adap->db_drop_task);
 }
 
 static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3765,6 +3809,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.dbfifo_int_thresh = dbfifo_int_thresh;
        lli.sge_pktshift = adap->sge.pktshift;
        lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+       lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
 
        handle = ulds[uld].add(&lli);
        if (IS_ERR(handle)) {
@@ -5369,6 +5414,21 @@ static int adap_init0(struct adapter *adap)
        val[0] = 1;
        (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
 
+       /*
+        * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+        * capability.  Earlier versions of the firmware didn't have the
+        * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+        * permission to use ULPTX MEMWRITE DSGL.
+        */
+       if (is_t4(adap->params.chip)) {
+               adap->params.ulptx_memwrite_dsgl = false;
+       } else {
+               params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+               ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+                                     1, params, val);
+               adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+       }
+
        /*
         * Get device capabilities so we can determine what resources we need
         * to manage.
@@ -5603,9 +5663,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
        .resume         = eeh_resume,
 };
 
-static inline bool is_10g_port(const struct link_config *lc)
+static inline bool is_x_10g_port(const struct link_config *lc)
 {
-       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+              (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
 }
 
 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5690,7 @@ static void cfg_queues(struct adapter *adap)
        int i, q10g = 0, n10g = 0, qidx = 0;
 
        for_each_port(adap, i)
-               n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+               n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
 
        /*
         * We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5705,7 @@ static void cfg_queues(struct adapter *adap)
                struct port_info *pi = adap2pinfo(adap, i);
 
                pi->first_qset = qidx;
-               pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+               pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
                qidx += pi->nqsets;
        }
 
@@ -5737,7 +5798,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
 static int enable_msix(struct adapter *adap)
 {
        int ofld_need = 0;
-       int i, err, want, need;
+       int i, want, need;
        struct sge *s = &adap->sge;
        unsigned int nchan = adap->params.nports;
        struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5814,30 @@ static int enable_msix(struct adapter *adap)
        }
        need = adap->params.nports + EXTRA_VECS + ofld_need;
 
-       while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
-               want = err;
+       want = pci_enable_msix_range(adap->pdev, entries, need, want);
+       if (want < 0)
+               return want;
 
-       if (!err) {
-               /*
-                * Distribute available vectors to the various queue groups.
-                * Every group gets its minimum requirement and NIC gets top
-                * priority for leftovers.
-                */
-               i = want - EXTRA_VECS - ofld_need;
-               if (i < s->max_ethqsets) {
-                       s->max_ethqsets = i;
-                       if (i < s->ethqsets)
-                               reduce_ethqs(adap, i);
-               }
-               if (is_offload(adap)) {
-                       i = want - EXTRA_VECS - s->max_ethqsets;
-                       i -= ofld_need - nchan;
-                       s->ofldqsets = (i / nchan) * nchan;  /* round down */
-               }
-               for (i = 0; i < want; ++i)
-                       adap->msix_info[i].vec = entries[i].vector;
-       } else if (err > 0)
-               dev_info(adap->pdev_dev,
-                        "only %d MSI-X vectors left, not using MSI-X\n", err);
-       return err;
+       /*
+        * Distribute available vectors to the various queue groups.
+        * Every group gets its minimum requirement and NIC gets top
+        * priority for leftovers.
+        */
+       i = want - EXTRA_VECS - ofld_need;
+       if (i < s->max_ethqsets) {
+               s->max_ethqsets = i;
+               if (i < s->ethqsets)
+                       reduce_ethqs(adap, i);
+       }
+       if (is_offload(adap)) {
+               i = want - EXTRA_VECS - s->max_ethqsets;
+               i -= ofld_need - nchan;
+               s->ofldqsets = (i / nchan) * nchan;  /* round down */
+       }
+       for (i = 0; i < want; ++i)
+               adap->msix_info[i].vec = entries[i].vector;
+
+       return 0;
 }
 
 #undef EXTRA_VECS
@@ -5801,11 +5860,6 @@ static int init_rss(struct adapter *adap)
 
 static void print_port_info(const struct net_device *dev)
 {
-       static const char *base[] = {
-               "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
-               "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
-       };
-
        char buf[80];
        char *bufp = buf;
        const char *spd = "";
@@ -5823,9 +5877,11 @@ static void print_port_info(const struct net_device *dev)
                bufp += sprintf(bufp, "1000/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
                bufp += sprintf(bufp, "10G/");
+       if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+               bufp += sprintf(bufp, "40G/");
        if (bufp != buf)
                --bufp;
-       sprintf(bufp, "BASE-%s", base[pi->port_type]);
+       sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
 
        netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
                    adap->params.vpd.id,
@@ -5833,8 +5889,8 @@ static void print_port_info(const struct net_device *dev)
                    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
                    (adap->flags & USING_MSIX) ? " MSI-X" :
                    (adap->flags & USING_MSI) ? " MSI" : "");
-       netdev_info(dev, "S/N: %s, E/C: %s\n",
-                   adap->params.vpd.sn, adap->params.vpd.ec);
+       netdev_info(dev, "S/N: %s, P/N: %s\n",
+                   adap->params.vpd.sn, adap->params.vpd.pn);
 }
 
 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
index 4dd0a82533e442f8b330c5ede554359ea450a087..e274a047528fca6ed11c9773db2d0a188e16da14 100644 (file)
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
                                             /* packet data */
        bool enable_fw_ofld_conn;            /* Enable connection through fw */
                                             /* WR */
+       bool ulptx_memwrite_dsgl;            /* use of T5 DSGL allowed */
 };
 
 struct cxgb4_uld_info {
index 47ffa64fcf19e89a495191252006cb21eb40d993..ca95cf2954eb33f62719130a8b0432fbb324c2b6 100644 (file)
  */
 #define TX_QCHECK_PERIOD (HZ / 2)
 
+/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
+ * (in RX_QCHECK_PERIOD multiples).  If we find one of the SGE Ingress DMA
+ * State Machines in the same state for this amount of time (in HZ) then we'll
+ * issue a warning about a potential hang.  We'll repeat the warning as the
+ * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
+ * the situation clears.  If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH (1 * HZ)
+#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
+
 /*
  * Max number of Tx descriptors to be reclaimed by the Tx timer.
  */
@@ -373,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
                if (d->skb) {                       /* an SGL is present */
                        if (unmap)
                                unmap_sgl(dev, d->skb, d->sgl, q);
-                       kfree_skb(d->skb);
+                       dev_consume_skb_any(d->skb);
                        d->skb = NULL;
                }
                ++d;
@@ -706,11 +716,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
  *     @skb: the packet
  *
  *     Returns whether an Ethernet packet is small enough to fit as
- *     immediate data.
+ *     immediate data. Return value corresponds to headroom required.
  */
 static inline int is_eth_imm(const struct sk_buff *skb)
 {
-       return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+       int hdrlen = skb_shinfo(skb)->gso_size ?
+                       sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+       hdrlen += sizeof(struct cpl_tx_pkt);
+       if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+               return hdrlen;
+       return 0;
 }
 
 /**
@@ -723,9 +739,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
 {
        unsigned int flits;
+       int hdrlen = is_eth_imm(skb);
 
-       if (is_eth_imm(skb))
-               return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+       if (hdrlen)
+               return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
 
        flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
        if (skb_shinfo(skb)->gso_size)
@@ -843,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
 {
        unsigned int *wr, index;
+       unsigned long flags;
 
        wmb();            /* write descriptors before telling HW */
-       spin_lock(&q->db_lock);
+       spin_lock_irqsave(&q->db_lock, flags);
        if (!q->db_disabled) {
                if (is_t4(adap->params.chip)) {
                        t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
@@ -861,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
                                writel(n,  adap->bar2 + q->udb + 8);
                        wmb();
                }
-       }
+       } else
+               q->db_pidx_inc += n;
        q->db_pidx = q->pidx;
-       spin_unlock(&q->db_lock);
+       spin_unlock_irqrestore(&q->db_lock, flags);
 }
 
 /**
@@ -971,6 +990,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
  */
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+       int len;
        u32 wr_mid;
        u64 cntrl, *end;
        int qidx, credits;
@@ -982,13 +1002,14 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        struct cpl_tx_pkt_core *cpl;
        const struct skb_shared_info *ssi;
        dma_addr_t addr[MAX_SKB_FRAGS + 1];
+       bool immediate = false;
 
        /*
         * The chip min packet length is 10 octets but play safe and reject
         * anything shorter than an Ethernet header.
         */
        if (unlikely(skb->len < ETH_HLEN)) {
-out_free:      dev_kfree_skb(skb);
+out_free:      dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -1011,7 +1032,10 @@ out_free:        dev_kfree_skb(skb);
                return NETDEV_TX_BUSY;
        }
 
-       if (!is_eth_imm(skb) &&
+       if (is_eth_imm(skb))
+               immediate = true;
+
+       if (!immediate &&
            unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
                q->mapping_err++;
                goto out_free;
@@ -1028,6 +1052,7 @@ out_free: dev_kfree_skb(skb);
        wr->r3 = cpu_to_be64(0);
        end = (u64 *)wr + flits;
 
+       len = immediate ? skb->len : 0;
        ssi = skb_shinfo(skb);
        if (ssi->gso_size) {
                struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1060,9 @@ out_free: dev_kfree_skb(skb);
                int l3hdr_len = skb_network_header_len(skb);
                int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
 
+               len += sizeof(*lso);
                wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
-                                      FW_WR_IMMDLEN(sizeof(*lso)));
+                                      FW_WR_IMMDLEN(len));
                lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
                                        LSO_FIRST_SLICE | LSO_LAST_SLICE |
                                        LSO_IPV6(v6) |
@@ -1054,9 +1080,7 @@ out_free: dev_kfree_skb(skb);
                q->tso++;
                q->tx_cso += ssi->gso_segs;
        } else {
-               int len;
-
-               len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+               len += sizeof(*cpl);
                wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
                                       FW_WR_IMMDLEN(len));
                cpl = (void *)(wr + 1);
@@ -1078,9 +1102,9 @@ out_free: dev_kfree_skb(skb);
        cpl->len = htons(skb->len);
        cpl->ctrl1 = cpu_to_be64(cntrl);
 
-       if (is_eth_imm(skb)) {
+       if (immediate) {
                inline_tx_skb(skb, &q->q, cpl + 1);
-               dev_kfree_skb(skb);
+               dev_consume_skb_any(skb);
        } else {
                int last_desc;
 
@@ -1467,8 +1491,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
 {
        unsigned int idx = skb_txq(skb);
 
-       if (unlikely(is_ctrl_pkt(skb)))
+       if (unlikely(is_ctrl_pkt(skb))) {
+               /* Single ctrl queue is a requirement for LE workaround path */
+               if (adap->tids.nsftids)
+                       idx = 0;
                return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+       }
        return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
 }
 
@@ -1992,7 +2020,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
 static void sge_rx_timer_cb(unsigned long data)
 {
        unsigned long m;
-       unsigned int i, cnt[2];
+       unsigned int i, idma_same_state_cnt[2];
        struct adapter *adap = (struct adapter *)data;
        struct sge *s = &adap->sge;
 
@@ -2015,21 +2043,64 @@ static void sge_rx_timer_cb(unsigned long data)
                }
 
        t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
-       cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
-       cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
-
-       for (i = 0; i < 2; i++)
-               if (cnt[i] >= s->starve_thres) {
-                       if (s->idma_state[i] || cnt[i] == 0xffffffff)
-                               continue;
-                       s->idma_state[i] = 1;
-                       t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
-                       m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
-                       dev_warn(adap->pdev_dev,
-                                "SGE idma%u starvation detected for "
-                                "queue %lu\n", i, m & 0xffff);
-               } else if (s->idma_state[i])
-                       s->idma_state[i] = 0;
+       idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+       idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+       for (i = 0; i < 2; i++) {
+               u32 debug0, debug11;
+
+               /* If the Ingress DMA Same State Counter ("timer") is less
+                * than 1s, then we can reset our synthesized Stall Timer and
+                * continue.  If we have previously emitted warnings about a
+                * potential stalled Ingress Queue, issue a note indicating
+                * that the Ingress Queue has resumed forward progress.
+                */
+               if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
+                       if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
+                               CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
+                                       i, s->idma_qid[i],
+                                       s->idma_stalled[i]/HZ);
+                       s->idma_stalled[i] = 0;
+                       continue;
+               }
+
+               /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+                * domain.  The first time we get here it'll be because we
+                * passed the 1s Threshold; each additional time it'll be
+                * because the RX Timer Callback is being fired on its regular
+                * schedule.
+                *
+                * If the stall is below our Potential Hung Ingress Queue
+                * Warning Threshold, continue.
+                */
+               if (s->idma_stalled[i] == 0)
+                       s->idma_stalled[i] = HZ;
+               else
+                       s->idma_stalled[i] += RX_QCHECK_PERIOD;
+
+               if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
+                       continue;
+
+               /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
+               if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
+                       continue;
+
+               /* Read and save the SGE IDMA State and Queue ID information.
+                * We do this every time in case it changes across time ...
+                */
+               t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
+               debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+               s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+               t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+               debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+               s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+               CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
+                       i, s->idma_qid[i], s->idma_state[i],
+                       s->idma_stalled[i]/HZ, debug0, debug11);
+               t4_sge_decode_idma_state(adap, s->idma_state[i]);
+       }
 
        mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
 }
@@ -2580,11 +2651,19 @@ static int t4_sge_init_soft(struct adapter *adap)
        fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
        fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
 
+       /* We only bother using the Large Page logic if the Large Page Buffer
+        * is larger than our Page Size Buffer.
+        */
+       if (fl_large_pg <= fl_small_pg)
+               fl_large_pg = 0;
+
        #undef READ_FL_BUF
 
+       /* The Page Size Buffer must be exactly equal to our Page Size and the
+        * Large Page Size Buffer should be 0 (per above) or a power of 2.
+        */
        if (fl_small_pg != PAGE_SIZE ||
-           (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
-                                 (fl_large_pg & (fl_large_pg-1)) != 0))) {
+           (fl_large_pg & (fl_large_pg-1)) != 0) {
                dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
                        fl_small_pg, fl_large_pg);
                return -EINVAL;
@@ -2699,8 +2778,8 @@ static int t4_sge_init_hard(struct adapter *adap)
 int t4_sge_init(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
-       u32 sge_control;
-       int ret;
+       u32 sge_control, sge_conm_ctrl;
+       int ret, egress_threshold;
 
        /*
         * Ingress Padding Boundary and Egress Status Page Size are set up by
@@ -2725,15 +2804,24 @@ int t4_sge_init(struct adapter *adap)
         * SGE's Egress Congestion Threshold.  If it isn't, then we can get
         * stuck waiting for new packets while the SGE is waiting for us to
         * give it more Free List entries.  (Note that the SGE's Egress
-        * Congestion Threshold is in units of 2 Free List pointers.)
+        * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+        * there was only a single field to control this.  For T5 there's the
+        * original field which now only applies to Unpacked Mode Free List
+        * buffers and a new field which only applies to Packed Mode Free List
+        * buffers.
         */
-       s->fl_starve_thres
-               = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
+       sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
+       if (is_t4(adap->params.chip))
+               egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
+       else
+               egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
+       s->fl_starve_thres = 2*egress_threshold + 1;
 
        setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
        setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
-       s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
-       s->idma_state[0] = s->idma_state[1] = 0;
+       s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
+       s->idma_stalled[0] = 0;
+       s->idma_stalled[1] = 0;
        spin_lock_init(&s->intrq_lock);
 
        return 0;
index 2c109343d57083a651bcaf9ef08772dd8029ec1f..fb2fe65903c2b7675701e1911a53b6e484566a73 100644 (file)
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 {
        u32 cclk_param, cclk_val;
        int i, ret, addr;
-       int ec, sn;
+       int ec, sn, pn;
        u8 *vpd, csum;
        unsigned int vpdr_len, kw_offset, id_len;
 
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 
        FIND_VPD_KW(ec, "EC");
        FIND_VPD_KW(sn, "SN");
+       FIND_VPD_KW(pn, "PN");
 #undef FIND_VPD_KW
 
        memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
        strim(p->sn);
+       memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+       strim(p->pn);
 
        /*
         * Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
 }
 
 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+                    FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+                    FW_PORT_CAP_ANEG)
 
 /**
  *     t4_link_start - apply link configuration to MAC/PHY
@@ -2246,6 +2250,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
        return 1 << idx;
 }
 
+/**
+ *      t4_get_port_type_description - return Port Type string description
+ *      @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+       static const char *const port_type_description[] = {
+               "R XFI",
+               "R XAUI",
+               "T SGMII",
+               "T XFI",
+               "T XAUI",
+               "KX4",
+               "CX4",
+               "KX",
+               "KR",
+               "R SFP+",
+               "KR/KX",
+               "KR/KX/KX4",
+               "R QSFP_10G",
+               "",
+               "R QSFP",
+               "R BP40_BA",
+       };
+
+       if (port_type < ARRAY_SIZE(port_type_description))
+               return port_type_description[port_type];
+       return "UNKNOWN";
+}
+
 /**
  *     t4_get_port_stats - collect port statistics
  *     @adap: the adapter
@@ -2562,6 +2596,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *     t4_sge_decode_idma_state - decode the idma state
+ *     @adap: the adapter
+ *     @state: the state idma is stuck in
+ */
+void t4_sge_decode_idma_state(struct adapter *adapter, int state)
+{
+       static const char * const t4_decode[] = {
+               "IDMA_IDLE",
+               "IDMA_PUSH_MORE_CPL_FIFO",
+               "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+               "Not used",
+               "IDMA_PHYSADDR_SEND_PCIEHDR",
+               "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+               "IDMA_PHYSADDR_SEND_PAYLOAD",
+               "IDMA_SEND_FIFO_TO_IMSG",
+               "IDMA_FL_REQ_DATA_FL_PREP",
+               "IDMA_FL_REQ_DATA_FL",
+               "IDMA_FL_DROP",
+               "IDMA_FL_H_REQ_HEADER_FL",
+               "IDMA_FL_H_SEND_PCIEHDR",
+               "IDMA_FL_H_PUSH_CPL_FIFO",
+               "IDMA_FL_H_SEND_CPL",
+               "IDMA_FL_H_SEND_IP_HDR_FIRST",
+               "IDMA_FL_H_SEND_IP_HDR",
+               "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+               "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+               "IDMA_FL_H_SEND_IP_HDR_PADDING",
+               "IDMA_FL_D_SEND_PCIEHDR",
+               "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+               "IDMA_FL_D_REQ_NEXT_DATA_FL",
+               "IDMA_FL_SEND_PCIEHDR",
+               "IDMA_FL_PUSH_CPL_FIFO",
+               "IDMA_FL_SEND_CPL",
+               "IDMA_FL_SEND_PAYLOAD_FIRST",
+               "IDMA_FL_SEND_PAYLOAD",
+               "IDMA_FL_REQ_NEXT_DATA_FL",
+               "IDMA_FL_SEND_NEXT_PCIEHDR",
+               "IDMA_FL_SEND_PADDING",
+               "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+               "IDMA_FL_SEND_FIFO_TO_IMSG",
+               "IDMA_FL_REQ_DATAFL_DONE",
+               "IDMA_FL_REQ_HEADERFL_DONE",
+       };
+       static const char * const t5_decode[] = {
+               "IDMA_IDLE",
+               "IDMA_ALMOST_IDLE",
+               "IDMA_PUSH_MORE_CPL_FIFO",
+               "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+               "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+               "IDMA_PHYSADDR_SEND_PCIEHDR",
+               "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+               "IDMA_PHYSADDR_SEND_PAYLOAD",
+               "IDMA_SEND_FIFO_TO_IMSG",
+               "IDMA_FL_REQ_DATA_FL",
+               "IDMA_FL_DROP",
+               "IDMA_FL_DROP_SEND_INC",
+               "IDMA_FL_H_REQ_HEADER_FL",
+               "IDMA_FL_H_SEND_PCIEHDR",
+               "IDMA_FL_H_PUSH_CPL_FIFO",
+               "IDMA_FL_H_SEND_CPL",
+               "IDMA_FL_H_SEND_IP_HDR_FIRST",
+               "IDMA_FL_H_SEND_IP_HDR",
+               "IDMA_FL_H_REQ_NEXT_HEADER_FL",
+               "IDMA_FL_H_SEND_NEXT_PCIEHDR",
+               "IDMA_FL_H_SEND_IP_HDR_PADDING",
+               "IDMA_FL_D_SEND_PCIEHDR",
+               "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+               "IDMA_FL_D_REQ_NEXT_DATA_FL",
+               "IDMA_FL_SEND_PCIEHDR",
+               "IDMA_FL_PUSH_CPL_FIFO",
+               "IDMA_FL_SEND_CPL",
+               "IDMA_FL_SEND_PAYLOAD_FIRST",
+               "IDMA_FL_SEND_PAYLOAD",
+               "IDMA_FL_REQ_NEXT_DATA_FL",
+               "IDMA_FL_SEND_NEXT_PCIEHDR",
+               "IDMA_FL_SEND_PADDING",
+               "IDMA_FL_SEND_COMPLETION_TO_IMSG",
+       };
+       static const u32 sge_regs[] = {
+               SGE_DEBUG_DATA_LOW_INDEX_2,
+               SGE_DEBUG_DATA_LOW_INDEX_3,
+               SGE_DEBUG_DATA_HIGH_INDEX_10,
+       };
+       const char **sge_idma_decode;
+       int sge_idma_decode_nstates;
+       int i;
+
+       if (is_t4(adapter->params.chip)) {
+               sge_idma_decode = (const char **)t4_decode;
+               sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+       } else {
+               sge_idma_decode = (const char **)t5_decode;
+               sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+       }
+
+       if (state < sge_idma_decode_nstates)
+               CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
+       else
+               CH_WARN(adapter, "idma state %d unknown\n", state);
+
+       for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
+               CH_WARN(adapter, "SGE register %#x value %#x\n",
+                       sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
+}
+
 /**
  *      t4_fw_hello - establish communication with FW
  *      @adap: the adapter
@@ -3533,11 +3673,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
                if (stat & FW_PORT_CMD_TXPAUSE)
                        fc |= PAUSE_TX;
                if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
-                       speed = SPEED_100;
+                       speed = 100;
                else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
-                       speed = SPEED_1000;
+                       speed = 1000;
                else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
-                       speed = SPEED_10000;
+                       speed = 10000;
+               else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+                       speed = 40000;
 
                if (link_ok != lc->link_ok || speed != lc->speed ||
                    fc != lc->fc) {                    /* something changed */
index cd6874b571ee2585c3ef1c9f8c418c7c643b8d46..f2738c7107898f2cf9112c74e33edc2f5ab16053 100644 (file)
@@ -116,6 +116,7 @@ enum CPL_error {
        CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
        CPL_ERR_RTX_NEG_ADVICE     = 35,
        CPL_ERR_PERSIST_NEG_ADVICE = 36,
+       CPL_ERR_KEEPALV_NEG_ADVICE = 37,
        CPL_ERR_ABORT_FAILED       = 42,
        CPL_ERR_IWARP_FLM          = 50,
 };
index 4082522d81408bf0a23cff8eaf9700bef14533a9..225ad8a5722de026bf0302b4f4f49886100e66f5 100644 (file)
 #define  EGRTHRESHOLD(x)     ((x) << EGRTHRESHOLDshift)
 #define  EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
 
+#define EGRTHRESHOLDPACKING_MASK       0x3fU
+#define EGRTHRESHOLDPACKING_SHIFT      14
+#define EGRTHRESHOLDPACKING(x)         ((x) << EGRTHRESHOLDPACKING_SHIFT)
+#define EGRTHRESHOLDPACKING_GET(x)     (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
+                                         EGRTHRESHOLDPACKING_MASK)
+
 #define SGE_DBFIFO_STATUS 0x10a4
 #define  HP_INT_THRESH_SHIFT 28
 #define  HP_INT_THRESH_MASK  0xfU
 #define SGE_DEBUG_INDEX 0x10cc
 #define SGE_DEBUG_DATA_HIGH 0x10d0
 #define SGE_DEBUG_DATA_LOW 0x10d4
+#define SGE_DEBUG_DATA_LOW_INDEX_2     0x12c8
+#define SGE_DEBUG_DATA_LOW_INDEX_3     0x12cc
+#define SGE_DEBUG_DATA_HIGH_INDEX_10   0x12a8
 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
 
 #define S_HP_INT_THRESH    28
index 74fea74ce0aa25a676045868fa0b48c4a3800860..9cc973fbcf26761b322810fa91dcf61f528b3fc7 100644 (file)
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
        FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
        FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
        FW_PARAMS_PARAM_DEV_CF = 0x0D,
+       FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
 };
 
 /*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
        FW_PORT_TYPE_SFP,
        FW_PORT_TYPE_BP_AP,
        FW_PORT_TYPE_BP4_AP,
+       FW_PORT_TYPE_QSFP_10G,
+       FW_PORT_TYPE_QSFP,
+       FW_PORT_TYPE_BP40_BA,
 
        FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
 };
index 0899c098359446346f7abe1939ac214705fd0acb..52859288de7b4d5b8c550c9a1e53fe5845225da0 100644 (file)
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
  */
 static int enable_msix(struct adapter *adapter)
 {
-       int i, err, want, need;
+       int i, want, need, nqsets;
        struct msix_entry entries[MSIX_ENTRIES];
        struct sge *s = &adapter->sge;
 
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
         */
        want = s->max_ethqsets + MSIX_EXTRAS;
        need = adapter->params.nports + MSIX_EXTRAS;
-       while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
-               want = err;
 
-       if (err == 0) {
-               int nqsets = want - MSIX_EXTRAS;
-               if (nqsets < s->max_ethqsets) {
-                       dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
-                                " for %d Queue Sets\n", nqsets);
-                       s->max_ethqsets = nqsets;
-                       if (nqsets < s->ethqsets)
-                               reduce_ethqs(adapter, nqsets);
-               }
-               for (i = 0; i < want; ++i)
-                       adapter->msix_info[i].vec = entries[i].vector;
-       } else if (err > 0) {
-               pci_disable_msix(adapter->pdev);
-               dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
-                        " not using MSI-X\n", err);
+       want = pci_enable_msix_range(adapter->pdev, entries, need, want);
+       if (want < 0)
+               return want;
+
+       nqsets = want - MSIX_EXTRAS;
+       if (nqsets < s->max_ethqsets) {
+               dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+                        " for %d Queue Sets\n", nqsets);
+               s->max_ethqsets = nqsets;
+               if (nqsets < s->ethqsets)
+                       reduce_ethqs(adapter, nqsets);
        }
-       return err;
+       for (i = 0; i < want; ++i)
+               adapter->msix_info[i].vec = entries[i].vector;
+
+       return 0;
 }
 
 static const struct net_device_ops cxgb4vf_netdev_ops  = {
@@ -2947,6 +2944,14 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
        CH_DEVICE(0x5811, 0),   /* T520-lp-cr */
        CH_DEVICE(0x5812, 0),   /* T560-cr */
        CH_DEVICE(0x5813, 0),   /* T580-cr */
+       CH_DEVICE(0x5814, 0),   /* T580-so-cr */
+       CH_DEVICE(0x5815, 0),   /* T502-bt */
+       CH_DEVICE(0x5880, 0),
+       CH_DEVICE(0x5881, 0),
+       CH_DEVICE(0x5882, 0),
+       CH_DEVICE(0x5883, 0),
+       CH_DEVICE(0x5884, 0),
+       CH_DEVICE(0x5885, 0),
        { 0, }
 };
 
index 0a89963c48ce78148d1abe6cd86151e850c24062..9cfa4b4bb089d398a1b687a71d32f0856d15fa47 100644 (file)
@@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
                if (sdesc->skb) {
                        if (need_unmap)
                                unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
-                       kfree_skb(sdesc->skb);
+                       dev_consume_skb_any(sdesc->skb);
                        sdesc->skb = NULL;
                }
 
@@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                 * need it any longer.
                 */
                inline_tx_skb(skb, &txq->q, cpl + 1);
-               dev_kfree_skb(skb);
+               dev_consume_skb_any(skb);
        } else {
                /*
                 * Write the skb's Scatter/Gather list into the TX Packet CPL
@@ -1354,7 +1354,7 @@ out_free:
         * An error of some sort happened.  Free the TX skb and tell the
         * OS that we've "dealt" with the packet ...
         */
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
index 19f642a45f40859874a94663cd3fc503f58014ed..fe84fbabc0d4d95761d35c1f55a5c04d60f63221 100644 (file)
@@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
        writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
        spin_unlock_irqrestore(&lp->lock, flags);
        dev->stats.tx_bytes += skb->len;
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        /* We DO NOT call netif_wake_queue() here.
         * We also DO NOT call netif_start_queue().
index b740bfce72ef371a82077fe4b3c7f498c9430423..2945718ce8068e4355628852ed200d539c9c2273 100644 (file)
@@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
        unsigned int txq_map;
 
        if (skb->len <= 0) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
        if (skb_shinfo(skb)->gso_size == 0 &&
            skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
            skb_linearize(skb)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -1086,14 +1086,15 @@ static int enic_poll(struct napi_struct *napi, int budget)
        unsigned int intr = enic_legacy_io_intr();
        unsigned int rq_work_to_do = budget;
        unsigned int wq_work_to_do = -1; /* no limit */
-       unsigned int  work_done, rq_work_done, wq_work_done;
+       unsigned int  work_done, rq_work_done = 0, wq_work_done;
        int err;
 
        /* Service RQ (first) and WQ
         */
 
-       rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
-               rq_work_to_do, enic_rq_service, NULL);
+       if (budget > 0)
+               rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
+                       rq_work_to_do, enic_rq_service, NULL);
 
        wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
                wq_work_to_do, enic_wq_service, NULL);
@@ -1141,14 +1142,15 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
        unsigned int cq = enic_cq_rq(enic, rq);
        unsigned int intr = enic_msix_rq_intr(enic, rq);
        unsigned int work_to_do = budget;
-       unsigned int work_done;
+       unsigned int work_done = 0;
        int err;
 
        /* Service RQ
         */
 
-       work_done = vnic_cq_service(&enic->cq[cq],
-               work_to_do, enic_rq_service, NULL);
+       if (budget > 0)
+               work_done = vnic_cq_service(&enic->cq[cq],
+                       work_to_do, enic_rq_service, NULL);
 
        /* Return intr event credits for this polling
         * cycle.  An intr event is the completion of a
@@ -1796,7 +1798,8 @@ static int enic_set_intr_mode(struct enic *enic)
            enic->cq_count >= n + m &&
            enic->intr_count >= n + m + 2) {
 
-               if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+               if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+                                         n + m + 2, n + m + 2) > 0) {
 
                        enic->rq_count = n;
                        enic->wq_count = m;
@@ -1815,7 +1818,8 @@ static int enic_set_intr_mode(struct enic *enic)
            enic->wq_count >= m &&
            enic->cq_count >= 1 + m &&
            enic->intr_count >= 1 + m + 2) {
-               if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
+               if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+                                         1 + m + 2, 1 + m + 2) > 0) {
 
                        enic->rq_count = 1;
                        enic->wq_count = m;
index a1a2b4028a5c6675cbf84e35c1cbca9d8f2663f0..8c4b93be333bc704a736fa8333977f7b76800aad 100644 (file)
@@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
        spin_unlock_irqrestore(&db->lock, flags);
 
        /* free this SKB */
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index 5ad9e3e3c0b8a5095564ae4c4369c3ac531c9591..53f0c618045c937b7edfbcbdb35083914c92bdd9 100644 (file)
@@ -696,7 +696,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
        /* Too large packet check */
        if (skb->len > MAX_PACKET_SIZE) {
                pr_err("big packet = %d\n", (u16)skb->len);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -743,7 +743,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
        dw32(DCR7, db->cr7_data);
 
        /* free this SKB */
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index aa4ee385091faa850c0ddd9e6f89146be4c401cf..aa801a6af7b9904752c7f3acebc2d01940527128 100644 (file)
@@ -607,7 +607,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
        /* Too large packet check */
        if (skb->len > MAX_PACKET_SIZE) {
                netdev_err(dev, "big packet = %d\n", (u16)skb->len);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -648,7 +648,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
        uw32(DCR7, db->cr7_data);
 
        /* free this SKB */
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index 113cd799a131f925fcbd3b5946d4f185249d376f..d9e5ca0d48c125c88e55b319975fcab5e0a4ad99 100644 (file)
@@ -1137,7 +1137,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 drop_frame:
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        np->tx_skbuff[entry] = NULL;
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
index 8a79a32a5674127e58992bac133fadce0d049ef1..e9b0faba3078aa5757f80be9d3355bc21efdd1db 100644 (file)
@@ -170,11 +170,6 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        return 0;
 }
 
-static int dnet_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static void dnet_handle_link_change(struct net_device *dev)
 {
        struct dnet *bp = netdev_priv(dev);
@@ -322,7 +317,6 @@ static int dnet_mii_init(struct dnet *bp)
        bp->mii_bus->name = "dnet_mii_bus";
        bp->mii_bus->read = &dnet_mdio_read;
        bp->mii_bus->write = &dnet_mdio_write;
-       bp->mii_bus->reset = &dnet_mdio_reset;
 
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                bp->pdev->name, bp->pdev->id);
index 05529e273050e3718c0ca1e105ce05174fdfe618..8ccaa2520dc3e9f154ae10b5e866e99173531274 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "10.0.600.0u"
+#define DRV_VER                        "10.2u"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
@@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev)
 #define BE_MIN_MTU             256
 
 #define BE_NUM_VLANS_SUPPORTED 64
-#define BE_UMC_NUM_VLANS_SUPPORTED     15
 #define BE_MAX_EQD             128u
 #define        BE_MAX_TX_FRAG_COUNT    30
 
@@ -262,9 +261,10 @@ struct be_tx_obj {
 /* Struct to remember the pages posted for rx frags */
 struct be_rx_page_info {
        struct page *page;
+       /* set to page-addr for last frag of the page & frag-addr otherwise */
        DEFINE_DMA_UNMAP_ADDR(bus);
        u16 page_offset;
-       bool last_page_user;
+       bool last_frag;         /* last frag of the page */
 };
 
 struct be_rx_stats {
@@ -293,9 +293,10 @@ struct be_rx_compl_info {
        u8 ip_csum;
        u8 l4_csum;
        u8 ipv6;
-       u8 vtm;
+       u8 qnq;
        u8 pkt_type;
        u8 ip_frag;
+       u8 tunneled;
 };
 
 struct be_rx_obj {
@@ -359,6 +360,7 @@ struct be_vf_cfg {
        int pmac_id;
        u16 vlan_tag;
        u32 tx_rate;
+       u32 plink_tracking;
 };
 
 enum vf_state {
@@ -370,10 +372,11 @@ enum vf_state {
 #define BE_FLAGS_WORKER_SCHEDULED              (1 << 3)
 #define BE_FLAGS_VLAN_PROMISC                  (1 << 4)
 #define BE_FLAGS_NAPI_ENABLED                  (1 << 9)
-#define BE_UC_PMAC_COUNT               30
-#define BE_VF_UC_PMAC_COUNT            2
 #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD            (1 << 11)
+#define BE_FLAGS_VXLAN_OFFLOADS                        (1 << 12)
 
+#define BE_UC_PMAC_COUNT                       30
+#define BE_VF_UC_PMAC_COUNT                    2
 /* Ethtool set_dump flags */
 #define LANCER_INITIATE_FW_DUMP                        0x1
 
@@ -467,6 +470,7 @@ struct be_adapter {
 
        u32 port_num;
        bool promiscuous;
+       u8 mc_type;
        u32 function_mode;
        u32 function_caps;
        u32 rx_fc;              /* Rx flow control */
@@ -492,6 +496,7 @@ struct be_adapter {
        u32 sli_family;
        u8 hba_port_num;
        u16 pvid;
+       __be16 vxlan_port;
        struct phy_info phy;
        u8 wol_cap;
        bool wol_en;
@@ -536,6 +541,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
        return min_t(u16, num, num_online_cpus());
 }
 
+/* Is BE in pvid_tagging mode */
+#define be_pvid_tagging_enabled(adapter)       (adapter->pvid)
+
+/* Is BE in QNQ multi-channel mode */
+#define be_is_qnq_mode(adapter)                (adapter->mc_type == FLEX10 ||  \
+                                        adapter->mc_type == vNIC1 ||   \
+                                        adapter->mc_type == UFP)
+
 #define lancer_chip(adapter)   (adapter->pdev->device == OC_DEVICE_ID3 || \
                                 adapter->pdev->device == OC_DEVICE_ID4)
 
index 48076a6370c3546df9fadef933337d9b77ffca6c..d1ec15af0d2482f46c425bb7d2f6c99976c16329 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -202,8 +202,12 @@ static void be_async_link_state_process(struct be_adapter *adapter,
        /* When link status changes, link speed must be re-queried from FW */
        adapter->phy.link_speed = -1;
 
-       /* Ignore physical link event */
-       if (lancer_chip(adapter) &&
+       /* On BEx the FW does not send a separate link status
+        * notification for physical and logical link.
+        * On other chips just process the logical link
+        * status notification
+        */
+       if (!BEx_chip(adapter) &&
            !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
                return;
 
@@ -211,7 +215,8 @@ static void be_async_link_state_process(struct be_adapter *adapter,
         * it may not be received in some cases.
         */
        if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
-               be_link_status_update(adapter, evt->port_link_status);
+               be_link_status_update(adapter,
+                                     evt->port_link_status & LINK_STATUS_MASK);
 }
 
 /* Grp5 CoS Priority evt */
@@ -239,10 +244,12 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
                struct be_async_event_grp5_pvid_state *evt)
 {
-       if (evt->enabled)
+       if (evt->enabled) {
                adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
-       else
+               dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
+       } else {
                adapter->pvid = 0;
+       }
 }
 
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
@@ -3296,6 +3303,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
        return NULL;
 }
 
+static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
+{
+       struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+       int i;
+
+       for (i = 0; i < desc_count; i++) {
+               if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
+                       return (struct be_port_res_desc *)hdr;
+
+               hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+               hdr = (void *)hdr + hdr->desc_len;
+       }
+       return NULL;
+}
+
 static void be_copy_nic_desc(struct be_resources *res,
                             struct be_nic_res_desc *desc)
 {
@@ -3439,6 +3461,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
 {
        struct be_cmd_resp_get_profile_config *resp;
        struct be_pcie_res_desc *pcie;
+       struct be_port_res_desc *port;
        struct be_nic_res_desc *nic;
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        struct be_dma_mem cmd;
@@ -3466,6 +3489,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        if (pcie)
                res->max_vfs = le16_to_cpu(pcie->num_vfs);
 
+       port = be_get_port_desc(resp->func_param, desc_count);
+       if (port)
+               adapter->mc_type = port->mc_type;
+
        nic = be_get_nic_desc(resp->func_param, desc_count);
        if (nic)
                be_copy_nic_desc(res, nic);
@@ -3476,14 +3503,11 @@ err:
        return status;
 }
 
-/* Currently only Lancer uses this command and it supports version 0 only
- * Uses sync mcc
- */
-int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
-                             u8 domain)
+int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+                             int size, u8 version, u8 domain)
 {
-       struct be_mcc_wrb *wrb;
        struct be_cmd_req_set_profile_config *req;
+       struct be_mcc_wrb *wrb;
        int status;
 
        spin_lock_bh(&adapter->mcc_lock);
@@ -3495,44 +3519,116 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
        }
 
        req = embedded_payload(wrb);
-
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                               OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
                               wrb, NULL);
+       req->hdr.version = version;
        req->hdr.domain = domain;
        req->desc_count = cpu_to_le32(1);
-       req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
-       req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
-       req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
-       req->nic_desc.pf_num = adapter->pf_number;
-       req->nic_desc.vf_num = domain;
-
-       /* Mark fields invalid */
-       req->nic_desc.unicast_mac_count = 0xFFFF;
-       req->nic_desc.mcc_count = 0xFFFF;
-       req->nic_desc.vlan_count = 0xFFFF;
-       req->nic_desc.mcast_mac_count = 0xFFFF;
-       req->nic_desc.txq_count = 0xFFFF;
-       req->nic_desc.rq_count = 0xFFFF;
-       req->nic_desc.rssq_count = 0xFFFF;
-       req->nic_desc.lro_count = 0xFFFF;
-       req->nic_desc.cq_count = 0xFFFF;
-       req->nic_desc.toe_conn_count = 0xFFFF;
-       req->nic_desc.eq_count = 0xFFFF;
-       req->nic_desc.link_param = 0xFF;
-       req->nic_desc.bw_min = 0xFFFFFFFF;
-       req->nic_desc.acpi_params = 0xFF;
-       req->nic_desc.wol_param = 0x0F;
-
-       /* Change BW */
-       req->nic_desc.bw_min = cpu_to_le32(bps);
-       req->nic_desc.bw_max = cpu_to_le32(bps);
+       memcpy(req->desc, desc, size);
+
        status = be_mcc_notify_wait(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
 
+/* Mark all fields invalid */
+void be_reset_nic_desc(struct be_nic_res_desc *nic)
+{
+       memset(nic, 0, sizeof(*nic));
+       nic->unicast_mac_count = 0xFFFF;
+       nic->mcc_count = 0xFFFF;
+       nic->vlan_count = 0xFFFF;
+       nic->mcast_mac_count = 0xFFFF;
+       nic->txq_count = 0xFFFF;
+       nic->rq_count = 0xFFFF;
+       nic->rssq_count = 0xFFFF;
+       nic->lro_count = 0xFFFF;
+       nic->cq_count = 0xFFFF;
+       nic->toe_conn_count = 0xFFFF;
+       nic->eq_count = 0xFFFF;
+       nic->link_param = 0xFF;
+       nic->acpi_params = 0xFF;
+       nic->wol_param = 0x0F;
+       nic->bw_min = 0xFFFFFFFF;
+       nic->bw_max = 0xFFFFFFFF;
+}
+
+int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
+{
+       if (lancer_chip(adapter)) {
+               struct be_nic_res_desc nic_desc;
+
+               be_reset_nic_desc(&nic_desc);
+               nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
+               nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
+               nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
+                                       (1 << NOSV_SHIFT);
+               nic_desc.pf_num = adapter->pf_number;
+               nic_desc.vf_num = domain;
+               nic_desc.bw_max = cpu_to_le32(bps);
+
+               return be_cmd_set_profile_config(adapter, &nic_desc,
+                                                RESOURCE_DESC_SIZE_V0,
+                                                0, domain);
+       } else {
+               return be_cmd_set_qos(adapter, bps, domain);
+       }
+}
+
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_manage_iface_filters *req;
+       int status;
+
+       if (iface == 0xFFFFFFFF)
+               return -1;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+       req = embedded_payload(wrb);
+
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                              OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
+                              wrb, NULL);
+       req->op = op;
+       req->target_iface_id = cpu_to_le32(iface);
+
+       status = be_mcc_notify_wait(adapter);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
+{
+       struct be_port_res_desc port_desc;
+
+       memset(&port_desc, 0, sizeof(port_desc));
+       port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
+       port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+       port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+       port_desc.link_num = adapter->hba_port_num;
+       if (port) {
+               port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
+                                       (1 << RCVID_SHIFT);
+               port_desc.nv_port = swab16(port);
+       } else {
+               port_desc.nv_flags = NV_TYPE_DISABLED;
+               port_desc.nv_port = 0;
+       }
+
+       return be_cmd_set_profile_config(adapter, &port_desc,
+                                        RESOURCE_DESC_SIZE_V1, 1, 0);
+}
+
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
                     int vf_num)
 {
@@ -3723,6 +3819,45 @@ err:
        return status;
 }
 
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+                                  int link_state, u8 domain)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_set_ll_link *req;
+       int status;
+
+       if (BEx_chip(adapter) || lancer_chip(adapter))
+               return 0;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = embedded_payload(wrb);
+
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                              OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
+                              sizeof(*req), wrb, NULL);
+
+       req->hdr.version = 1;
+       req->hdr.domain = domain;
+
+       if (link_state == IFLA_VF_LINK_STATE_ENABLE)
+               req->link_config |= 1;
+
+       if (link_state == IFLA_VF_LINK_STATE_AUTO)
+               req->link_config |= 1 << PLINK_TRACK_SHIFT;
+
+       status = be_mcc_notify_wait(adapter);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
                        int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
 {
index fc4e076dc202624721c7d505a482ca64d6c984d4..b60e4d53c1c9a9f29be9d1c2c07099dab28b378a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_BEACON_STATE                 70
 #define OPCODE_COMMON_READ_TRANSRECV_DATA              73
 #define OPCODE_COMMON_GET_PORT_NAME                    77
+#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG          80
 #define OPCODE_COMMON_SET_INTERRUPT_ENABLE             89
 #define OPCODE_COMMON_SET_FN_PRIVILEGES                        100
 #define OPCODE_COMMON_GET_PHY_DETAILS                  102
@@ -221,6 +222,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_FN_PRIVILEGES                        170
 #define OPCODE_COMMON_READ_OBJECT                      171
 #define OPCODE_COMMON_WRITE_OBJECT                     172
+#define OPCODE_COMMON_MANAGE_IFACE_FILTERS             193
 #define OPCODE_COMMON_GET_IFACE_LIST                   194
 #define OPCODE_COMMON_ENABLE_DISABLE_VF                        196
 
@@ -1098,14 +1100,6 @@ struct be_cmd_resp_query_fw_cfg {
        u32 function_caps;
 };
 
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter)
-{
-       return adapter->function_mode & FLEX10_MODE ||
-               adapter->function_mode & VNIC_MODE ||
-               adapter->function_mode & UMC_ENABLED;
-}
-
 /******************** RSS Config ****************************************/
 /* RSS type            Input parameters used to compute RX hash
  * RSS_ENABLE_IPV4     SRC IPv4, DST IPv4
@@ -1828,20 +1822,36 @@ struct be_cmd_req_set_ext_fat_caps {
 #define NIC_RESOURCE_DESC_TYPE_V0              0x41
 #define PCIE_RESOURCE_DESC_TYPE_V1             0x50
 #define NIC_RESOURCE_DESC_TYPE_V1              0x51
+#define PORT_RESOURCE_DESC_TYPE_V1             0x55
 #define MAX_RESOURCE_DESC                      264
 
-/* QOS unit number */
-#define QUN                                    4
-/* Immediate */
-#define IMM                                    6
-/* No save */
-#define NOSV                                   7
+#define IMM_SHIFT                              6       /* Immediate */
+#define NOSV_SHIFT                             7       /* No save */
 
 struct be_res_desc_hdr {
        u8 desc_type;
        u8 desc_len;
 } __packed;
 
+struct be_port_res_desc {
+       struct be_res_desc_hdr hdr;
+       u8 rsvd0;
+       u8 flags;
+       u8 link_num;
+       u8 mc_type;
+       u16 rsvd1;
+
+#define NV_TYPE_MASK                           0x3     /* bits 0-1 */
+#define NV_TYPE_DISABLED                       1
+#define NV_TYPE_VXLAN                          3
+#define SOCVID_SHIFT                           2       /* Strip outer vlan */
+#define RCVID_SHIFT                            4       /* Report vlan */
+       u8 nv_flags;
+       u8 rsvd2;
+       __le16 nv_port;                                 /* vxlan/gre port */
+       u32 rsvd3[19];
+} __packed;
+
 struct be_pcie_res_desc {
        struct be_res_desc_hdr hdr;
        u8 rsvd0;
@@ -1862,6 +1872,8 @@ struct be_pcie_res_desc {
 struct be_nic_res_desc {
        struct be_res_desc_hdr hdr;
        u8 rsvd1;
+
+#define QUN_SHIFT                              4 /* QoS is in absolute units */
        u8 flags;
        u8 vf_num;
        u8 rsvd2;
@@ -1891,6 +1903,23 @@ struct be_nic_res_desc {
        u32 rsvd8[7];
 } __packed;
 
+/************ Multi-Channel type ***********/
+enum mc_type {
+       MC_NONE = 0x01,
+       UMC = 0x02,
+       FLEX10 = 0x03,
+       vNIC1 = 0x04,
+       nPAR = 0x05,
+       UFP = 0x06,
+       vNIC2 = 0x07
+};
+
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+       return adapter->mc_type > MC_NONE;
+}
+
 struct be_cmd_req_get_func_config {
        struct be_cmd_req_hdr hdr;
 };
@@ -1919,7 +1948,7 @@ struct be_cmd_req_set_profile_config {
        struct be_cmd_req_hdr hdr;
        u32 rsvd;
        u32 desc_count;
-       struct be_nic_res_desc nic_desc;
+       u8 desc[RESOURCE_DESC_SIZE_V1];
 };
 
 struct be_cmd_resp_set_profile_config {
@@ -1971,6 +2000,33 @@ struct be_cmd_resp_get_iface_list {
        struct be_if_desc if_desc;
 };
 
+/*************** Set logical link ********************/
+#define PLINK_TRACK_SHIFT      8
+struct be_cmd_req_set_ll_link {
+       struct be_cmd_req_hdr hdr;
+       u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */
+};
+
+/************** Manage IFACE Filters *******************/
+#define OP_CONVERT_NORMAL_TO_TUNNEL            0
+#define OP_CONVERT_TUNNEL_TO_NORMAL            1
+
+struct be_cmd_req_manage_iface_filters {
+       struct be_cmd_req_hdr hdr;
+       u8  op;
+       u8  rsvd0;
+       u8  flags;
+       u8  rsvd1;
+       u32 tunnel_iface_id;
+       u32 target_iface_id;
+       u8  mac[6];
+       u16 vlan_tag;
+       u32 tenant_id;
+       u32 filter_id;
+       u32 cap_flags;
+       u32 cap_control_flags;
+} __packed;
+
 int be_pci_fnum_get(struct be_adapter *adapter);
 int be_fw_wait_ready(struct be_adapter *adapter);
 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2045,7 +2101,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
                        u8 loopback_type, u8 enable);
 int be_cmd_get_phy_info(struct be_adapter *adapter);
-int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
 void be_detect_error(struct be_adapter *adapter);
 int be_cmd_get_die_temperature(struct be_adapter *adapter);
 int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
@@ -2086,9 +2142,14 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain);
-int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+                             int size, u8 version, u8 domain);
 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
                     int vf_num);
 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_logical_link_config(struct be_adapter *adapter,
+                                         int link_state, u8 domain);
+int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
+int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
index 05be0070f55fd453ed1cb35427c44d921ba08443..15ba96cba65df1ba051cc3b2e49b72640d92c217 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -357,10 +357,10 @@ be_get_ethtool_stats(struct net_device *netdev,
                struct be_rx_stats *stats = rx_stats(rxo);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&stats->sync);
+                       start = u64_stats_fetch_begin_irq(&stats->sync);
                        data[base] = stats->rx_bytes;
                        data[base + 1] = stats->rx_pkts;
-               } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+               } while (u64_stats_fetch_retry_irq(&stats->sync, start));
 
                for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
                        p = (u8 *)stats + et_rx_stats[i].offset;
@@ -373,19 +373,19 @@ be_get_ethtool_stats(struct net_device *netdev,
                struct be_tx_stats *stats = tx_stats(txo);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&stats->sync_compl);
+                       start = u64_stats_fetch_begin_irq(&stats->sync_compl);
                        data[base] = stats->tx_compl;
-               } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start));
+               } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
 
                do {
-                       start = u64_stats_fetch_begin_bh(&stats->sync);
+                       start = u64_stats_fetch_begin_irq(&stats->sync);
                        for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
                                p = (u8 *)stats + et_tx_stats[i].offset;
                                data[base + i] =
                                        (et_tx_stats[i].size == sizeof(u64)) ?
                                                *(u64 *)p : *(u32 *)p;
                        }
-               } while (u64_stats_fetch_retry_bh(&stats->sync, start));
+               } while (u64_stats_fetch_retry_irq(&stats->sync, start));
                base += ETHTOOL_TXSTATS_NUM;
        }
 }
@@ -802,16 +802,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
 
        if (test->flags & ETH_TEST_FL_OFFLINE) {
                if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
-                                               &data[0]) != 0) {
+                                    &data[0]) != 0)
                        test->flags |= ETH_TEST_FL_FAILED;
-               }
+
                if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
-                                               &data[1]) != 0) {
-                       test->flags |= ETH_TEST_FL_FAILED;
-               }
-               if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
-                                               &data[2]) != 0) {
+                                    &data[1]) != 0)
                        test->flags |= ETH_TEST_FL_FAILED;
+
+               if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+                       if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+                                            &data[2]) != 0)
+                               test->flags |= ETH_TEST_FL_FAILED;
+                       test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
                }
        }
 
index dc88782185f26f000e4e58cd793f90f6064bbfb6..3bd198550edbb95a64602e28535e8eb56758a220 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 {
        u8 numfrags[3];         /* dword 1 */
        u8 rss_flush;           /* dword 2 */
        u8 cast_enc[2];         /* dword 2 */
-       u8 vtm;                 /* dword 2 */
+       u8 qnq;                 /* dword 2 */
        u8 rss_bank;            /* dword 2 */
        u8 rsvd1[23];           /* dword 2 */
        u8 lro_pkt;             /* dword 2 */
@@ -401,13 +401,14 @@ struct amap_eth_rx_compl_v1 {
        u8 numfrags[3];         /* dword 1 */
        u8 rss_flush;           /* dword 2 */
        u8 cast_enc[2];         /* dword 2 */
-       u8 vtm;                 /* dword 2 */
+       u8 qnq;                 /* dword 2 */
        u8 rss_bank;            /* dword 2 */
        u8 port[2];             /* dword 2 */
        u8 vntagp;              /* dword 2 */
        u8 header_len[8];       /* dword 2 */
        u8 header_split[2];     /* dword 2 */
-       u8 rsvd1[13];           /* dword 2 */
+       u8 rsvd1[12];           /* dword 2 */
+       u8 tunneled;
        u8 valid;               /* dword 2 */
        u8 rsshash[32];         /* dword 3 */
 } __packed;
index 36c80612e21a3ebe6ee52447e9075dc4022f005e..c89dc85ad8d60439f02039f96f7d756591415bf6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -23,6 +23,7 @@
 #include <linux/aer.h>
 #include <linux/if_bridge.h>
 #include <net/busy_poll.h>
+#include <net/vxlan.h>
 
 MODULE_VERSION(DRV_VER);
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -591,10 +592,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
        for_all_rx_queues(adapter, rxo, i) {
                const struct be_rx_stats *rx_stats = rx_stats(rxo);
                do {
-                       start = u64_stats_fetch_begin_bh(&rx_stats->sync);
+                       start = u64_stats_fetch_begin_irq(&rx_stats->sync);
                        pkts = rx_stats(rxo)->rx_pkts;
                        bytes = rx_stats(rxo)->rx_bytes;
-               } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
+               } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
                stats->rx_packets += pkts;
                stats->rx_bytes += bytes;
                stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -605,10 +606,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
        for_all_tx_queues(adapter, txo, i) {
                const struct be_tx_stats *tx_stats = tx_stats(txo);
                do {
-                       start = u64_stats_fetch_begin_bh(&tx_stats->sync);
+                       start = u64_stats_fetch_begin_irq(&tx_stats->sync);
                        pkts = tx_stats(txo)->tx_pkts;
                        bytes = tx_stats(txo)->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
+               } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
                stats->tx_packets += pkts;
                stats->tx_bytes += bytes;
        }
@@ -652,7 +653,7 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
                adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
        }
 
-       if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+       if (link_status)
                netif_carrier_on(netdev);
        else
                netif_carrier_off(netdev);
@@ -718,10 +719,23 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
        return vlan_tag;
 }
 
+/* Used only for IP tunnel packets */
+static u16 skb_inner_ip_proto(struct sk_buff *skb)
+{
+       return (inner_ip_hdr(skb)->version == 4) ?
+               inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
+}
+
+static u16 skb_ip_proto(struct sk_buff *skb)
+{
+       return (ip_hdr(skb)->version == 4) ?
+               ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
 {
-       u16 vlan_tag;
+       u16 vlan_tag, proto;
 
        memset(hdr, 0, sizeof(*hdr));
 
@@ -734,9 +748,15 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
                if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               if (is_tcp_pkt(skb))
+               if (skb->encapsulation) {
+                       AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+                       proto = skb_inner_ip_proto(skb);
+               } else {
+                       proto = skb_ip_proto(skb);
+               }
+               if (proto == IPPROTO_TCP)
                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
-               else if (is_udp_pkt(skb))
+               else if (proto == IPPROTO_UDP)
                        AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
        }
 
@@ -935,9 +955,9 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
        }
 
        /* If vlan tag is already inlined in the packet, skip HW VLAN
-        * tagging in UMC mode
+        * tagging in pvid-tagging mode
         */
-       if ((adapter->function_mode & UMC_ENABLED) &&
+       if (be_pvid_tagging_enabled(adapter) &&
            veh->h_vlan_proto == htons(ETH_P_8021Q))
                        *skip_hw_vlan = true;
 
@@ -1138,7 +1158,10 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
-               goto ret;
+               return status;
+
+       if (adapter->vlan_tag[vid])
+               return status;
 
        adapter->vlan_tag[vid] = 1;
        adapter->vlans_added++;
@@ -1148,7 +1171,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
                adapter->vlans_added--;
                adapter->vlan_tag[vid] = 0;
        }
-ret:
+
        return status;
 }
 
@@ -1288,6 +1311,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
        vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
        vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
+       vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
 
        return 0;
 }
@@ -1342,11 +1366,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
                return -EINVAL;
        }
 
-       if (lancer_chip(adapter))
-               status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
-       else
-               status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
-
+       status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
        if (status)
                dev_err(&adapter->pdev->dev,
                                "tx rate %d on VF %d failed\n", rate, vf);
@@ -1354,6 +1374,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
                adapter->vf_cfg[vf].tx_rate = rate;
        return status;
 }
+static int be_set_vf_link_state(struct net_device *netdev, int vf,
+                               int link_state)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       int status;
+
+       if (!sriov_enabled(adapter))
+               return -EPERM;
+
+       if (vf >= adapter->num_vfs)
+               return -EINVAL;
+
+       status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
+       if (!status)
+               adapter->vf_cfg[vf].plink_tracking = link_state;
+
+       return status;
+}
 
 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
                          ulong now)
@@ -1386,15 +1424,15 @@ static void be_eqd_update(struct be_adapter *adapter)
 
                rxo = &adapter->rx_obj[eqo->idx];
                do {
-                       start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+                       start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
                        rx_pkts = rxo->stats.rx_pkts;
-               } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
+               } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
 
                txo = &adapter->tx_obj[eqo->idx];
                do {
-                       start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+                       start = u64_stats_fetch_begin_irq(&txo->stats.sync);
                        tx_pkts = txo->stats.tx_reqs;
-               } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
+               } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
 
 
                /* Skip, if wrapped around or first calculation */
@@ -1449,9 +1487,10 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
 {
        /* L4 checksum is not reliable for non TCP/UDP packets.
-        * Also ignore ipcksm for ipv6 pkts */
+        * Also ignore ipcksm for ipv6 pkts
+        */
        return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
-                               (rxcp->ip_csum || rxcp->ipv6);
+               (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
 }
 
 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
@@ -1464,11 +1503,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
        rx_page_info = &rxo->page_info_tbl[frag_idx];
        BUG_ON(!rx_page_info->page);
 
-       if (rx_page_info->last_page_user) {
+       if (rx_page_info->last_frag) {
                dma_unmap_page(&adapter->pdev->dev,
                               dma_unmap_addr(rx_page_info, bus),
                               adapter->big_page_size, DMA_FROM_DEVICE);
-               rx_page_info->last_page_user = false;
+               rx_page_info->last_frag = false;
+       } else {
+               dma_sync_single_for_cpu(&adapter->pdev->dev,
+                                       dma_unmap_addr(rx_page_info, bus),
+                                       rx_frag_size, DMA_FROM_DEVICE);
        }
 
        queue_tail_inc(rxq);
@@ -1590,6 +1633,8 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
        if (netdev->features & NETIF_F_RXHASH)
                skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+       skb->encapsulation = rxcp->tunneled;
        skb_mark_napi_id(skb, napi);
 
        if (rxcp->vlanf)
@@ -1646,6 +1691,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
        skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
        if (adapter->netdev->features & NETIF_F_RXHASH)
                skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
+
+       skb->encapsulation = rxcp->tunneled;
        skb_mark_napi_id(skb, napi);
 
        if (rxcp->vlanf)
@@ -1676,12 +1723,14 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
        rxcp->rss_hash =
                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
        if (rxcp->vlanf) {
-               rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+               rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
                                          compl);
                rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
                                               compl);
        }
        rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
+       rxcp->tunneled =
+               AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
 }
 
 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
@@ -1706,7 +1755,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
        rxcp->rss_hash =
                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
        if (rxcp->vlanf) {
-               rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+               rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
                                          compl);
                rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
                                               compl);
@@ -1739,9 +1788,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
                rxcp->l4_csum = 0;
 
        if (rxcp->vlanf) {
-               /* vlanf could be wrongly set in some cards.
-                * ignore if vtm is not set */
-               if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
+               /* In QNQ modes, if qnq bit is not set, then the packet was
+                * tagged only with the transparent outer vlan-tag and must
+                * not be treated as a vlan packet by host
+                */
+               if (be_is_qnq_mode(adapter) && !rxcp->qnq)
                        rxcp->vlanf = 0;
 
                if (!lancer_chip(adapter))
@@ -1800,17 +1851,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
                                rx_stats(rxo)->rx_post_fail++;
                                break;
                        }
-                       page_info->page_offset = 0;
+                       page_offset = 0;
                } else {
                        get_page(pagep);
-                       page_info->page_offset = page_offset + rx_frag_size;
+                       page_offset += rx_frag_size;
                }
-               page_offset = page_info->page_offset;
+               page_info->page_offset = page_offset;
                page_info->page = pagep;
-               dma_unmap_addr_set(page_info, bus, page_dmaaddr);
-               frag_dmaaddr = page_dmaaddr + page_info->page_offset;
 
                rxd = queue_head_node(rxq);
+               frag_dmaaddr = page_dmaaddr + page_info->page_offset;
                rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
                rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
 
@@ -1818,15 +1868,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
                if ((page_offset + rx_frag_size + rx_frag_size) >
                                        adapter->big_page_size) {
                        pagep = NULL;
-                       page_info->last_page_user = true;
+                       page_info->last_frag = true;
+                       dma_unmap_addr_set(page_info, bus, page_dmaaddr);
+               } else {
+                       dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
                }
 
                prev_page_info = page_info;
                queue_head_inc(rxq);
                page_info = &rxo->page_info_tbl[rxq->head];
        }
-       if (pagep)
-               prev_page_info->last_page_user = true;
+
+       /* Mark the last frag of a page when we break out of the above loop
+        * with no more slots available in the RXQ
+        */
+       if (pagep) {
+               prev_page_info->last_frag = true;
+               dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
+       }
 
        if (posted) {
                atomic_add(posted, &rxq->used);
@@ -1883,7 +1942,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
                queue_tail_inc(txq);
        } while (cur_index != last_index);
 
-       kfree_skb(sent_skb);
+       dev_kfree_skb_any(sent_skb);
        return num_wrbs;
 }
 
@@ -2439,6 +2498,9 @@ void be_detect_error(struct be_adapter *adapter)
        u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        u32 i;
+       bool error_detected = false;
+       struct device *dev = &adapter->pdev->dev;
+       struct net_device *netdev = adapter->netdev;
 
        if (be_hw_error(adapter))
                return;
@@ -2450,6 +2512,21 @@ void be_detect_error(struct be_adapter *adapter)
                                        SLIPORT_ERROR1_OFFSET);
                        sliport_err2 = ioread32(adapter->db +
                                        SLIPORT_ERROR2_OFFSET);
+                       adapter->hw_error = true;
+                       /* Do not log error messages if its a FW reset */
+                       if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+                           sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+                               dev_info(dev, "Firmware update in progress\n");
+                       } else {
+                               error_detected = true;
+                               dev_err(dev, "Error detected in the card\n");
+                               dev_err(dev, "ERR: sliport status 0x%x\n",
+                                       sliport_status);
+                               dev_err(dev, "ERR: sliport error1 0x%x\n",
+                                       sliport_err1);
+                               dev_err(dev, "ERR: sliport error2 0x%x\n",
+                                       sliport_err2);
+                       }
                }
        } else {
                pci_read_config_dword(adapter->pdev,
@@ -2463,51 +2540,33 @@ void be_detect_error(struct be_adapter *adapter)
 
                ue_lo = (ue_lo & ~ue_lo_mask);
                ue_hi = (ue_hi & ~ue_hi_mask);
-       }
-
-       /* On certain platforms BE hardware can indicate spurious UEs.
-        * Allow the h/w to stop working completely in case of a real UE.
-        * Hence not setting the hw_error for UE detection.
-        */
-       if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
-               adapter->hw_error = true;
-               /* Do not log error messages if its a FW reset */
-               if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
-                   sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
-                       dev_info(&adapter->pdev->dev,
-                                "Firmware update in progress\n");
-                       return;
-               } else {
-                       dev_err(&adapter->pdev->dev,
-                               "Error detected in the card\n");
-               }
-       }
-
-       if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
-               dev_err(&adapter->pdev->dev,
-                       "ERR: sliport status 0x%x\n", sliport_status);
-               dev_err(&adapter->pdev->dev,
-                       "ERR: sliport error1 0x%x\n", sliport_err1);
-               dev_err(&adapter->pdev->dev,
-                       "ERR: sliport error2 0x%x\n", sliport_err2);
-       }
 
-       if (ue_lo) {
-               for (i = 0; ue_lo; ue_lo >>= 1, i++) {
-                       if (ue_lo & 1)
-                               dev_err(&adapter->pdev->dev,
-                               "UE: %s bit set\n", ue_status_low_desc[i]);
-               }
-       }
+               /* On certain platforms BE hardware can indicate spurious UEs.
+                * Allow HW to stop working completely in case of a real UE.
+                * Hence not setting the hw_error for UE detection.
+                */
 
-       if (ue_hi) {
-               for (i = 0; ue_hi; ue_hi >>= 1, i++) {
-                       if (ue_hi & 1)
-                               dev_err(&adapter->pdev->dev,
-                               "UE: %s bit set\n", ue_status_hi_desc[i]);
+               if (ue_lo || ue_hi) {
+                       error_detected = true;
+                       dev_err(dev,
+                               "Unrecoverable Error detected in the adapter");
+                       dev_err(dev, "Please reboot server to recover");
+                       if (skyhawk_chip(adapter))
+                               adapter->hw_error = true;
+                       for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+                               if (ue_lo & 1)
+                                       dev_err(dev, "UE: %s bit set\n",
+                                               ue_status_low_desc[i]);
+                       }
+                       for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+                               if (ue_hi & 1)
+                                       dev_err(dev, "UE: %s bit set\n",
+                                               ue_status_hi_desc[i]);
+                       }
                }
        }
-
+       if (error_detected)
+               netif_carrier_off(netdev);
 }
 
 static void be_msix_disable(struct be_adapter *adapter)
@@ -2521,7 +2580,7 @@ static void be_msix_disable(struct be_adapter *adapter)
 
 static int be_msix_enable(struct be_adapter *adapter)
 {
-       int i, status, num_vec;
+       int i, num_vec;
        struct device *dev = &adapter->pdev->dev;
 
        /* If RoCE is supported, program the max number of NIC vectors that
@@ -2537,24 +2596,11 @@ static int be_msix_enable(struct be_adapter *adapter)
        for (i = 0; i < num_vec; i++)
                adapter->msix_entries[i].entry = i;
 
-       status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
-       if (status == 0) {
-               goto done;
-       } else if (status >= MIN_MSIX_VECTORS) {
-               num_vec = status;
-               status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-                                        num_vec);
-               if (!status)
-                       goto done;
-       }
-
-       dev_warn(dev, "MSIx enable failed\n");
+       num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+                                       MIN_MSIX_VECTORS, num_vec);
+       if (num_vec < 0)
+               goto fail;
 
-       /* INTx is not supported in VFs, so fail probe if enable_msix fails */
-       if (!be_physfn(adapter))
-               return status;
-       return 0;
-done:
        if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
                adapter->num_msix_roce_vec = num_vec / 2;
                dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
@@ -2566,6 +2612,14 @@ done:
        dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
                 adapter->num_msix_vec);
        return 0;
+
+fail:
+       dev_warn(dev, "MSIx enable failed\n");
+
+       /* INTx is not supported in VFs, so fail probe if enable_msix fails */
+       if (!be_physfn(adapter))
+               return num_vec;
+       return 0;
 }
 
 static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -2807,6 +2861,9 @@ static int be_open(struct net_device *netdev)
 
        netif_tx_start_all_queues(netdev);
        be_roce_dev_open(adapter);
+
+       if (skyhawk_chip(adapter))
+               vxlan_get_rx_port(netdev);
        return 0;
 err:
        be_close(adapter->netdev);
@@ -2962,6 +3019,19 @@ static void be_mac_clear(struct be_adapter *adapter)
        }
 }
 
+static void be_disable_vxlan_offloads(struct be_adapter *adapter)
+{
+       if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
+               be_cmd_manage_iface(adapter, adapter->if_handle,
+                                   OP_CONVERT_TUNNEL_TO_NORMAL);
+
+       if (adapter->vxlan_port)
+               be_cmd_set_vxlan_port(adapter, 0);
+
+       adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
+       adapter->vxlan_port = 0;
+}
+
 static int be_clear(struct be_adapter *adapter)
 {
        be_cancel_worker(adapter);
@@ -2969,6 +3039,8 @@ static int be_clear(struct be_adapter *adapter)
        if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
+       be_disable_vxlan_offloads(adapter);
+
        /* delete the primary mac along with the uc-mac list */
        be_mac_clear(adapter);
 
@@ -3093,15 +3165,19 @@ static int be_vf_setup(struct be_adapter *adapter)
                 * Allow full available bandwidth
                 */
                if (BE3_chip(adapter) && !old_vfs)
-                       be_cmd_set_qos(adapter, 1000, vf+1);
+                       be_cmd_config_qos(adapter, 1000, vf + 1);
 
                status = be_cmd_link_status_query(adapter, &lnk_speed,
                                                  NULL, vf + 1);
                if (!status)
                        vf_cfg->tx_rate = lnk_speed;
 
-               if (!old_vfs)
+               if (!old_vfs) {
                        be_cmd_enable_vf(adapter, vf + 1);
+                       be_cmd_set_logical_link_config(adapter,
+                                                      IFLA_VF_LINK_STATE_AUTO,
+                                                      vf+1);
+               }
        }
 
        if (!old_vfs) {
@@ -3119,19 +3195,38 @@ err:
        return status;
 }
 
+/* Converting function_mode bits on BE3 to SH mc_type enums */
+
+static u8 be_convert_mc_type(u32 function_mode)
+{
+       if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+               return vNIC1;
+       else if (function_mode & FLEX10_MODE)
+               return FLEX10;
+       else if (function_mode & VNIC_MODE)
+               return vNIC2;
+       else if (function_mode & UMC_ENABLED)
+               return UMC;
+       else
+               return MC_NONE;
+}
+
 /* On BE2/BE3 FW does not suggest the supported limits */
 static void BEx_get_resources(struct be_adapter *adapter,
                              struct be_resources *res)
 {
        struct pci_dev *pdev = adapter->pdev;
        bool use_sriov = false;
-       int max_vfs;
-
-       max_vfs = pci_sriov_get_totalvfs(pdev);
-
-       if (BE3_chip(adapter) && sriov_want(adapter)) {
-               res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-               use_sriov = res->max_vfs;
+       int max_vfs = 0;
+
+       if (be_physfn(adapter) && BE3_chip(adapter)) {
+               be_cmd_get_profile_config(adapter, res, 0);
+               /* Some old versions of BE3 FW don't report max_vfs value */
+               if (res->max_vfs == 0) {
+                       max_vfs = pci_sriov_get_totalvfs(pdev);
+                       res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+               }
+               use_sriov = res->max_vfs && sriov_want(adapter);
        }
 
        if (be_physfn(adapter))
@@ -3139,17 +3234,32 @@ static void BEx_get_resources(struct be_adapter *adapter,
        else
                res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
 
-       if (adapter->function_mode & FLEX10_MODE)
-               res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
-       else if (adapter->function_mode & UMC_ENABLED)
-               res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
-       else
+       adapter->mc_type = be_convert_mc_type(adapter->function_mode);
+
+       if (be_is_mc(adapter)) {
+               /* Assuming that there are 4 channels per port,
+                * when multi-channel is enabled
+                */
+               if (be_is_qnq_mode(adapter))
+                       res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+               else
+                       /* In a non-qnq multichannel mode, the pvid
+                        * takes up one vlan entry
+                        */
+                       res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
+       } else {
                res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+       }
+
        res->max_mcast_mac = BE_MAX_MC;
 
-       /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
-       if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
-           !be_physfn(adapter) || (adapter->port_num > 1))
+       /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
+        * 2) Create multiple TX rings on a BE3-R multi-channel interface
+        *    *only* if it is RSS-capable.
+        */
+       if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
+           !be_physfn(adapter) || (be_is_mc(adapter) &&
+           !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
                res->max_tx_qs = 1;
        else
                res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3161,7 +3271,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
        res->max_rx_qs = res->max_rss_qs + 1;
 
        if (be_physfn(adapter))
-               res->max_evt_qs = (max_vfs > 0) ?
+               res->max_evt_qs = (res->max_vfs > 0) ?
                                        BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
        else
                res->max_evt_qs = 1;
@@ -3252,9 +3362,8 @@ static int be_get_config(struct be_adapter *adapter)
        if (status)
                return status;
 
-       /* primary mac needs 1 pmac entry */
-       adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
-                                  GFP_KERNEL);
+       adapter->pmac_id = kcalloc(be_max_uc(adapter),
+                                  sizeof(*adapter->pmac_id), GFP_KERNEL);
        if (!adapter->pmac_id)
                return -ENOMEM;
 
@@ -3428,6 +3537,10 @@ static int be_setup(struct be_adapter *adapter)
                be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                        adapter->rx_fc);
 
+       if (be_physfn(adapter))
+               be_cmd_set_logical_link_config(adapter,
+                                              IFLA_VF_LINK_STATE_AUTO, 0);
+
        if (sriov_want(adapter)) {
                if (be_max_vfs(adapter))
                        be_vf_setup(adapter);
@@ -4052,6 +4165,65 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
 }
 
+static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
+                             __be16 port)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       struct device *dev = &adapter->pdev->dev;
+       int status;
+
+       if (lancer_chip(adapter) || BEx_chip(adapter))
+               return;
+
+       if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
+               dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
+                        be16_to_cpu(port));
+               dev_info(dev,
+                        "Only one UDP port supported for VxLAN offloads\n");
+               return;
+       }
+
+       status = be_cmd_manage_iface(adapter, adapter->if_handle,
+                                    OP_CONVERT_NORMAL_TO_TUNNEL);
+       if (status) {
+               dev_warn(dev, "Failed to convert normal interface to tunnel\n");
+               goto err;
+       }
+
+       status = be_cmd_set_vxlan_port(adapter, port);
+       if (status) {
+               dev_warn(dev, "Failed to add VxLAN port\n");
+               goto err;
+       }
+       adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
+       adapter->vxlan_port = port;
+
+       dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
+                be16_to_cpu(port));
+       return;
+err:
+       be_disable_vxlan_offloads(adapter);
+       return;
+}
+
+static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
+                             __be16 port)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+
+       if (lancer_chip(adapter) || BEx_chip(adapter))
+               return;
+
+       if (adapter->vxlan_port != port)
+               return;
+
+       be_disable_vxlan_offloads(adapter);
+
+       dev_info(&adapter->pdev->dev,
+                "Disabled VxLAN offloads for UDP port %d\n",
+                be16_to_cpu(port));
+}
+
 static const struct net_device_ops be_netdev_ops = {
        .ndo_open               = be_open,
        .ndo_stop               = be_close,
@@ -4067,20 +4239,29 @@ static const struct net_device_ops be_netdev_ops = {
        .ndo_set_vf_vlan        = be_set_vf_vlan,
        .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
        .ndo_get_vf_config      = be_get_vf_config,
+       .ndo_set_vf_link_state  = be_set_vf_link_state,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = be_netpoll,
 #endif
        .ndo_bridge_setlink     = be_ndo_bridge_setlink,
        .ndo_bridge_getlink     = be_ndo_bridge_getlink,
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = be_busy_poll
+       .ndo_busy_poll          = be_busy_poll,
 #endif
+       .ndo_add_vxlan_port     = be_add_vxlan_port,
+       .ndo_del_vxlan_port     = be_del_vxlan_port,
 };
 
 static void be_netdev_init(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
+       if (skyhawk_chip(adapter)) {
+               netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                          NETIF_F_TSO | NETIF_F_TSO6 |
+                                          NETIF_F_GSO_UDP_TUNNEL;
+               netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+       }
        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
                NETIF_F_HW_VLAN_CTAG_TX;
@@ -4427,14 +4608,32 @@ static bool be_reset_required(struct be_adapter *adapter)
 
 static char *mc_name(struct be_adapter *adapter)
 {
-       if (adapter->function_mode & FLEX10_MODE)
-               return "FLEX10";
-       else if (adapter->function_mode & VNIC_MODE)
-               return "vNIC";
-       else if (adapter->function_mode & UMC_ENABLED)
-               return "UMC";
-       else
-               return "";
+       char *str = ""; /* default */
+
+       switch (adapter->mc_type) {
+       case UMC:
+               str = "UMC";
+               break;
+       case FLEX10:
+               str = "FLEX10";
+               break;
+       case vNIC1:
+               str = "vNIC-1";
+               break;
+       case nPAR:
+               str = "nPAR";
+               break;
+       case UFP:
+               str = "UFP";
+               break;
+       case vNIC2:
+               str = "vNIC-2";
+               break;
+       default:
+               str = "";
+       }
+
+       return str;
 }
 
 static inline char *func_name(struct be_adapter *adapter)
index 9cd5415fe017dc359d2aae2e679eddc40e75f5e0..a5dae4a62bb37923c5d1df5f2aa5fdfa343b64e0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 2cd1129e19af960185a3f706d9ab5ed165e409a7..a3ef8f804b9e68f79a40691509725c817a1eb927 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 55e0fa03dc90d1323bacaf129f00e745c5a52c58..8b70ca7e342b6f16a8d6624293ee1b940bcff3d5 100644 (file)
@@ -660,11 +660,6 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
        return -EBUSY;
 }
 
-static int ethoc_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static void ethoc_mdio_poll(struct net_device *dev)
 {
 }
@@ -1210,7 +1205,6 @@ static int ethoc_probe(struct platform_device *pdev)
                        priv->mdio->name, pdev->id);
        priv->mdio->read = ethoc_mdio_read;
        priv->mdio->write = ethoc_mdio_write;
-       priv->mdio->reset = ethoc_mdio_reset;
        priv->mdio->priv = priv;
 
        priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
index c11ecbc98149ccd7b3e7a884979d362c8da724cf..68069eabc4f855c0636d12b69854e44919ffb863 100644 (file)
@@ -940,11 +940,6 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
        return -EIO;
 }
 
-static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 /******************************************************************************
  * struct ethtool_ops functions
  *****************************************************************************/
@@ -1262,7 +1257,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
        priv->mii_bus->priv = netdev;
        priv->mii_bus->read = ftgmac100_mdiobus_read;
        priv->mii_bus->write = ftgmac100_mdiobus_write;
-       priv->mii_bus->reset = ftgmac100_mdiobus_reset;
        priv->mii_bus->irq = priv->phy_irq;
 
        for (i = 0; i < PHY_MAX_ADDR; i++)
index 549ce13b92acee78b6559e360c427c8720b7493b..71debd1c18c9923a895f313c71f3b4617974433f 100644 (file)
@@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
 gianfar_driver-objs := gianfar.o \
-               gianfar_ethtool.o \
-               gianfar_sysfs.o
+               gianfar_ethtool.o
 obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
 ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
index 03a351300013c82999474fb411731f1487755550..8d69e439f0c518d4b3e46c9ae21d85e4013b7e06 100644 (file)
@@ -338,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        /* Protocol checksum off-load for TCP and UDP. */
        if (fec_enet_clear_csum(skb, ndev)) {
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -1255,11 +1255,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        return 0;
 }
 
-static int fec_enet_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static int fec_enet_mii_probe(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1384,7 +1379,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->name = "fec_enet_mii_bus";
        fep->mii_bus->read = fec_enet_mdio_read;
        fep->mii_bus->write = fec_enet_mdio_write;
-       fep->mii_bus->reset = fec_enet_mdio_reset;
        snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                pdev->name, fep->dev_id + 1);
        fep->mii_bus->priv = fep;
@@ -1904,10 +1898,11 @@ fec_set_mac_address(struct net_device *ndev, void *p)
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct sockaddr *addr = p;
 
-       if (!is_valid_ether_addr(addr->sa_data))
-               return -EADDRNOTAVAIL;
-
-       memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+       if (addr) {
+               if (!is_valid_ether_addr(addr->sa_data))
+                       return -EADDRNOTAVAIL;
+               memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+       }
 
        writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
                (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
@@ -2006,6 +2001,8 @@ static int fec_enet_init(struct net_device *ndev)
 
        /* Get the Ethernet address */
        fec_get_mac(ndev);
+       /* make sure MAC we just acquired is programmed into the hw */
+       fec_set_mac_address(ndev, NULL);
 
        /* init the tx & rx ring size */
        fep->tx_ring_size = TX_RING_SIZE;
index 89ccb5b087080005f1629605804957a7487d71a8..82386b29914a8640bd2e17f956bdd0946fc5c3ce 100644 (file)
@@ -372,6 +372,7 @@ void fec_ptp_init(struct platform_device *pdev)
        fep->ptp_caps.n_alarm = 0;
        fep->ptp_caps.n_ext_ts = 0;
        fep->ptp_caps.n_per_out = 0;
+       fep->ptp_caps.n_pins = 0;
        fep->ptp_caps.pps = 0;
        fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
        fep->ptp_caps.adjtime = fec_ptp_adjtime;
index 62f042d4aaa93cc85e79b079272ac26150b263a3..dc80db41d6b3397388b0210283c4c7fd3ce07680 100644 (file)
@@ -91,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
        u16 pkt_len, sc;
        int curidx;
 
+       if (budget <= 0)
+               return received;
+
        /*
         * First, grab all of the stats for the incoming packet.
         * These get messed up if we get called due to a busy condition.
index 7e69c983d12a81d35f2a9863e7e914892266f0ca..ebf5d6429a8df569ad1d024fb270caac8f7aced8 100644 (file)
@@ -95,12 +95,6 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
 
 }
 
-static int fs_enet_fec_mii_reset(struct mii_bus *bus)
-{
-       /* nothing here - for now */
-       return 0;
-}
-
 static struct of_device_id fs_enet_mdio_fec_match[];
 static int fs_enet_mdio_probe(struct platform_device *ofdev)
 {
@@ -128,7 +122,6 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
        new_bus->name = "FEC MII Bus";
        new_bus->read = &fs_enet_fec_mii_read;
        new_bus->write = &fs_enet_fec_mii_write;
-       new_bus->reset = &fs_enet_fec_mii_reset;
 
        ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
        if (ret)
index ad5a5aadc7e15901a3722ac673f36a25a3a99884..9125d9abf0998d31e3179bd9c712af487855d5a9 100644 (file)
@@ -9,7 +9,7 @@
  * Maintainer: Kumar Gala
  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
  * Copyright 2007 MontaVista Software, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -121,7 +121,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
 static irqreturn_t gfar_transmit(int irq, void *dev_id);
 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
 static void adjust_link(struct net_device *dev);
-static void init_registers(struct net_device *dev);
 static int init_phy(struct net_device *dev);
 static int gfar_probe(struct platform_device *ofdev);
 static int gfar_remove(struct platform_device *ofdev);
@@ -129,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv);
 static void gfar_set_multi(struct net_device *dev);
 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 static void gfar_configure_serdes(struct net_device *dev);
-static int gfar_poll(struct napi_struct *napi, int budget);
-static int gfar_poll_sq(struct napi_struct *napi, int budget);
+static int gfar_poll_rx(struct napi_struct *napi, int budget);
+static int gfar_poll_tx(struct napi_struct *napi, int budget);
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void gfar_netpoll(struct net_device *dev);
 #endif
@@ -138,9 +139,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
 static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
                               int amount_pull, struct napi_struct *napi);
-void gfar_halt(struct net_device *dev);
-static void gfar_halt_nodisable(struct net_device *dev);
-void gfar_start(struct net_device *dev);
+static void gfar_halt_nodisable(struct gfar_private *priv);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
                                  const u8 *addr);
@@ -332,72 +331,76 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
        }
 }
 
-static void gfar_init_mac(struct net_device *ndev)
+static void gfar_rx_buff_size_config(struct gfar_private *priv)
 {
-       struct gfar_private *priv = netdev_priv(ndev);
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       u32 rctrl = 0;
-       u32 tctrl = 0;
-       u32 attrs = 0;
-
-       /* write the tx/rx base registers */
-       gfar_init_tx_rx_base(priv);
-
-       /* Configure the coalescing support */
-       gfar_configure_coalescing_all(priv);
+       int frame_size = priv->ndev->mtu + ETH_HLEN;
 
        /* set this when rx hw offload (TOE) functions are being used */
        priv->uses_rxfcb = 0;
 
+       if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
+               priv->uses_rxfcb = 1;
+
+       if (priv->hwts_rx_en)
+               priv->uses_rxfcb = 1;
+
+       if (priv->uses_rxfcb)
+               frame_size += GMAC_FCB_LEN;
+
+       frame_size += priv->padding;
+
+       frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+                    INCREMENTAL_BUFFER_SIZE;
+
+       priv->rx_buffer_size = frame_size;
+}
+
+static void gfar_mac_rx_config(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 rctrl = 0;
+
        if (priv->rx_filer_enable) {
                rctrl |= RCTRL_FILREN;
                /* Program the RIR0 reg with the required distribution */
-               gfar_write(&regs->rir0, DEFAULT_RIR0);
+               if (priv->poll_mode == GFAR_SQ_POLLING)
+                       gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
+               else /* GFAR_MQ_POLLING */
+                       gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
        }
 
        /* Restore PROMISC mode */
-       if (ndev->flags & IFF_PROMISC)
+       if (priv->ndev->flags & IFF_PROMISC)
                rctrl |= RCTRL_PROM;
 
-       if (ndev->features & NETIF_F_RXCSUM) {
+       if (priv->ndev->features & NETIF_F_RXCSUM)
                rctrl |= RCTRL_CHECKSUMMING;
-               priv->uses_rxfcb = 1;
-       }
-
-       if (priv->extended_hash) {
-               rctrl |= RCTRL_EXTHASH;
 
-               gfar_clear_exact_match(ndev);
-               rctrl |= RCTRL_EMEN;
-       }
+       if (priv->extended_hash)
+               rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
 
        if (priv->padding) {
                rctrl &= ~RCTRL_PAL_MASK;
                rctrl |= RCTRL_PADDING(priv->padding);
        }
 
-       /* Insert receive time stamps into padding alignment bytes */
-       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
-               rctrl &= ~RCTRL_PAL_MASK;
-               rctrl |= RCTRL_PADDING(8);
-               priv->padding = 8;
-       }
-
        /* Enable HW time stamping if requested from user space */
-       if (priv->hwts_rx_en) {
+       if (priv->hwts_rx_en)
                rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
-               priv->uses_rxfcb = 1;
-       }
 
-       if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+       if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
                rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
-               priv->uses_rxfcb = 1;
-       }
 
        /* Init rctrl based on our settings */
        gfar_write(&regs->rctrl, rctrl);
+}
+
+static void gfar_mac_tx_config(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 tctrl = 0;
 
-       if (ndev->features & NETIF_F_IP_CSUM)
+       if (priv->ndev->features & NETIF_F_IP_CSUM)
                tctrl |= TCTRL_INIT_CSUM;
 
        if (priv->prio_sched_en)
@@ -408,30 +411,51 @@ static void gfar_init_mac(struct net_device *ndev)
                gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
        }
 
-       gfar_write(&regs->tctrl, tctrl);
+       if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+               tctrl |= TCTRL_VLINS;
 
-       /* Set the extraction length and index */
-       attrs = ATTRELI_EL(priv->rx_stash_size) |
-               ATTRELI_EI(priv->rx_stash_index);
+       gfar_write(&regs->tctrl, tctrl);
+}
 
-       gfar_write(&regs->attreli, attrs);
+static void gfar_configure_coalescing(struct gfar_private *priv,
+                              unsigned long tx_mask, unsigned long rx_mask)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 __iomem *baddr;
 
-       /* Start with defaults, and add stashing or locking
-        * depending on the approprate variables
-        */
-       attrs = ATTR_INIT_SETTINGS;
+       if (priv->mode == MQ_MG_MODE) {
+               int i = 0;
 
-       if (priv->bd_stash_en)
-               attrs |= ATTR_BDSTASH;
+               baddr = &regs->txic0;
+               for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->tx_queue[i]->txcoalescing))
+                               gfar_write(baddr + i, priv->tx_queue[i]->txic);
+               }
 
-       if (priv->rx_stash_size != 0)
-               attrs |= ATTR_BUFSTASH;
+               baddr = &regs->rxic0;
+               for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+                       gfar_write(baddr + i, 0);
+                       if (likely(priv->rx_queue[i]->rxcoalescing))
+                               gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+               }
+       } else {
+               /* Backward compatible case -- even if we enable
+                * multiple queues, there's only single reg to program
+                */
+               gfar_write(&regs->txic, 0);
+               if (likely(priv->tx_queue[0]->txcoalescing))
+                       gfar_write(&regs->txic, priv->tx_queue[0]->txic);
 
-       gfar_write(&regs->attr, attrs);
+               gfar_write(&regs->rxic, 0);
+               if (unlikely(priv->rx_queue[0]->rxcoalescing))
+                       gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+       }
+}
 
-       gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
-       gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
-       gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+       gfar_configure_coalescing(priv, 0xFF, 0xFF);
 }
 
 static struct net_device_stats *gfar_get_stats(struct net_device *dev)
@@ -479,12 +503,27 @@ static const struct net_device_ops gfar_netdev_ops = {
 #endif
 };
 
-void lock_rx_qs(struct gfar_private *priv)
+static void gfar_ints_disable(struct gfar_private *priv)
 {
        int i;
+       for (i = 0; i < priv->num_grps; i++) {
+               struct gfar __iomem *regs = priv->gfargrp[i].regs;
+               /* Clear IEVENT */
+               gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
 
-       for (i = 0; i < priv->num_rx_queues; i++)
-               spin_lock(&priv->rx_queue[i]->rxlock);
+               /* Initialize IMASK */
+               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       }
+}
+
+static void gfar_ints_enable(struct gfar_private *priv)
+{
+       int i;
+       for (i = 0; i < priv->num_grps; i++) {
+               struct gfar __iomem *regs = priv->gfargrp[i].regs;
+               /* Unmask the interrupts we look for */
+               gfar_write(&regs->imask, IMASK_DEFAULT);
+       }
 }
 
 void lock_tx_qs(struct gfar_private *priv)
@@ -495,23 +534,50 @@ void lock_tx_qs(struct gfar_private *priv)
                spin_lock(&priv->tx_queue[i]->txlock);
 }
 
-void unlock_rx_qs(struct gfar_private *priv)
+void unlock_tx_qs(struct gfar_private *priv)
 {
        int i;
 
-       for (i = 0; i < priv->num_rx_queues; i++)
-               spin_unlock(&priv->rx_queue[i]->rxlock);
+       for (i = 0; i < priv->num_tx_queues; i++)
+               spin_unlock(&priv->tx_queue[i]->txlock);
 }
 
-void unlock_tx_qs(struct gfar_private *priv)
+static int gfar_alloc_tx_queues(struct gfar_private *priv)
 {
        int i;
 
-       for (i = 0; i < priv->num_tx_queues; i++)
-               spin_unlock(&priv->tx_queue[i]->txlock);
+       for (i = 0; i < priv->num_tx_queues; i++) {
+               priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+                                           GFP_KERNEL);
+               if (!priv->tx_queue[i])
+                       return -ENOMEM;
+
+               priv->tx_queue[i]->tx_skbuff = NULL;
+               priv->tx_queue[i]->qindex = i;
+               priv->tx_queue[i]->dev = priv->ndev;
+               spin_lock_init(&(priv->tx_queue[i]->txlock));
+       }
+       return 0;
 }
 
-static void free_tx_pointers(struct gfar_private *priv)
+static int gfar_alloc_rx_queues(struct gfar_private *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->num_rx_queues; i++) {
+               priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+                                           GFP_KERNEL);
+               if (!priv->rx_queue[i])
+                       return -ENOMEM;
+
+               priv->rx_queue[i]->rx_skbuff = NULL;
+               priv->rx_queue[i]->qindex = i;
+               priv->rx_queue[i]->dev = priv->ndev;
+       }
+       return 0;
+}
+
+static void gfar_free_tx_queues(struct gfar_private *priv)
 {
        int i;
 
@@ -519,7 +585,7 @@ static void free_tx_pointers(struct gfar_private *priv)
                kfree(priv->tx_queue[i]);
 }
 
-static void free_rx_pointers(struct gfar_private *priv)
+static void gfar_free_rx_queues(struct gfar_private *priv)
 {
        int i;
 
@@ -553,23 +619,26 @@ static void disable_napi(struct gfar_private *priv)
 {
        int i;
 
-       for (i = 0; i < priv->num_grps; i++)
-               napi_disable(&priv->gfargrp[i].napi);
+       for (i = 0; i < priv->num_grps; i++) {
+               napi_disable(&priv->gfargrp[i].napi_rx);
+               napi_disable(&priv->gfargrp[i].napi_tx);
+       }
 }
 
 static void enable_napi(struct gfar_private *priv)
 {
        int i;
 
-       for (i = 0; i < priv->num_grps; i++)
-               napi_enable(&priv->gfargrp[i].napi);
+       for (i = 0; i < priv->num_grps; i++) {
+               napi_enable(&priv->gfargrp[i].napi_rx);
+               napi_enable(&priv->gfargrp[i].napi_tx);
+       }
 }
 
 static int gfar_parse_group(struct device_node *np,
                            struct gfar_private *priv, const char *model)
 {
        struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
-       u32 *queue_mask;
        int i;
 
        for (i = 0; i < GFAR_NUM_IRQS; i++) {
@@ -598,16 +667,52 @@ static int gfar_parse_group(struct device_node *np,
        grp->priv = priv;
        spin_lock_init(&grp->grplock);
        if (priv->mode == MQ_MG_MODE) {
-               queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
-               grp->rx_bit_map = queue_mask ?
-                       *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
-               queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
-               grp->tx_bit_map = queue_mask ?
-                       *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+               u32 *rxq_mask, *txq_mask;
+               rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+               txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+
+               if (priv->poll_mode == GFAR_SQ_POLLING) {
+                       /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
+                       grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+                       grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
+               } else { /* GFAR_MQ_POLLING */
+                       grp->rx_bit_map = rxq_mask ?
+                       *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+                       grp->tx_bit_map = txq_mask ?
+                       *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
+               }
        } else {
                grp->rx_bit_map = 0xFF;
                grp->tx_bit_map = 0xFF;
        }
+
+       /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
+        * right to left, so we need to revert the 8 bits to get the q index
+        */
+       grp->rx_bit_map = bitrev8(grp->rx_bit_map);
+       grp->tx_bit_map = bitrev8(grp->tx_bit_map);
+
+       /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+        * also assign queues to groups
+        */
+       for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+               if (!grp->rx_queue)
+                       grp->rx_queue = priv->rx_queue[i];
+               grp->num_rx_queues++;
+               grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
+               priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+               priv->rx_queue[i]->grp = grp;
+       }
+
+       for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+               if (!grp->tx_queue)
+                       grp->tx_queue = priv->tx_queue[i];
+               grp->num_tx_queues++;
+               grp->tstat |= (TSTAT_CLEAR_THALT >> i);
+               priv->tqueue |= (TQUEUE_EN0 >> i);
+               priv->tx_queue[i]->grp = grp;
+       }
+
        priv->num_grps++;
 
        return 0;
@@ -628,13 +733,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
        const u32 *stash_idx;
        unsigned int num_tx_qs, num_rx_qs;
        u32 *tx_queues, *rx_queues;
+       unsigned short mode, poll_mode;
 
        if (!np || !of_device_is_available(np))
                return -ENODEV;
 
-       /* parse the num of tx and rx queues */
+       if (of_device_is_compatible(np, "fsl,etsec2")) {
+               mode = MQ_MG_MODE;
+               poll_mode = GFAR_SQ_POLLING;
+       } else {
+               mode = SQ_SG_MODE;
+               poll_mode = GFAR_SQ_POLLING;
+       }
+
+       /* parse the num of HW tx and rx queues */
        tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
-       num_tx_qs = tx_queues ? *tx_queues : 1;
+       rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
+
+       if (mode == SQ_SG_MODE) {
+               num_tx_qs = 1;
+               num_rx_qs = 1;
+       } else { /* MQ_MG_MODE */
+               /* get the actual number of supported groups */
+               unsigned int num_grps = of_get_available_child_count(np);
+
+               if (num_grps == 0 || num_grps > MAXGROUPS) {
+                       dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
+                               num_grps);
+                       pr_err("Cannot do alloc_etherdev, aborting\n");
+                       return -EINVAL;
+               }
+
+               if (poll_mode == GFAR_SQ_POLLING) {
+                       num_tx_qs = num_grps; /* one txq per int group */
+                       num_rx_qs = num_grps; /* one rxq per int group */
+               } else { /* GFAR_MQ_POLLING */
+                       num_tx_qs = tx_queues ? *tx_queues : 1;
+                       num_rx_qs = rx_queues ? *rx_queues : 1;
+               }
+       }
 
        if (num_tx_qs > MAX_TX_QS) {
                pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
@@ -643,9 +780,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                return -EINVAL;
        }
 
-       rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
-       num_rx_qs = rx_queues ? *rx_queues : 1;
-
        if (num_rx_qs > MAX_RX_QS) {
                pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
                       num_rx_qs, MAX_RX_QS);
@@ -661,10 +795,20 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
        priv = netdev_priv(dev);
        priv->ndev = dev;
 
+       priv->mode = mode;
+       priv->poll_mode = poll_mode;
+
        priv->num_tx_queues = num_tx_qs;
        netif_set_real_num_rx_queues(dev, num_rx_qs);
        priv->num_rx_queues = num_rx_qs;
-       priv->num_grps = 0x0;
+
+       err = gfar_alloc_tx_queues(priv);
+       if (err)
+               goto tx_alloc_failed;
+
+       err = gfar_alloc_rx_queues(priv);
+       if (err)
+               goto rx_alloc_failed;
 
        /* Init Rx queue filer rule set linked list */
        INIT_LIST_HEAD(&priv->rx_list.list);
@@ -677,52 +821,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                priv->gfargrp[i].regs = NULL;
 
        /* Parse and initialize group specific information */
-       if (of_device_is_compatible(np, "fsl,etsec2")) {
-               priv->mode = MQ_MG_MODE;
+       if (priv->mode == MQ_MG_MODE) {
                for_each_child_of_node(np, child) {
                        err = gfar_parse_group(child, priv, model);
                        if (err)
                                goto err_grp_init;
                }
-       } else {
-               priv->mode = SQ_SG_MODE;
+       } else { /* SQ_SG_MODE */
                err = gfar_parse_group(np, priv, model);
                if (err)
                        goto err_grp_init;
        }
 
-       for (i = 0; i < priv->num_tx_queues; i++)
-               priv->tx_queue[i] = NULL;
-       for (i = 0; i < priv->num_rx_queues; i++)
-               priv->rx_queue[i] = NULL;
-
-       for (i = 0; i < priv->num_tx_queues; i++) {
-               priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
-                                           GFP_KERNEL);
-               if (!priv->tx_queue[i]) {
-                       err = -ENOMEM;
-                       goto tx_alloc_failed;
-               }
-               priv->tx_queue[i]->tx_skbuff = NULL;
-               priv->tx_queue[i]->qindex = i;
-               priv->tx_queue[i]->dev = dev;
-               spin_lock_init(&(priv->tx_queue[i]->txlock));
-       }
-
-       for (i = 0; i < priv->num_rx_queues; i++) {
-               priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
-                                           GFP_KERNEL);
-               if (!priv->rx_queue[i]) {
-                       err = -ENOMEM;
-                       goto rx_alloc_failed;
-               }
-               priv->rx_queue[i]->rx_skbuff = NULL;
-               priv->rx_queue[i]->qindex = i;
-               priv->rx_queue[i]->dev = dev;
-               spin_lock_init(&(priv->rx_queue[i]->rxlock));
-       }
-
-
        stash = of_get_property(np, "bd-stash", NULL);
 
        if (stash) {
@@ -749,17 +859,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
                memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
        if (model && !strcasecmp(model, "TSEC"))
-               priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+               priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
                                     FSL_GIANFAR_DEV_HAS_COALESCE |
                                     FSL_GIANFAR_DEV_HAS_RMON |
                                     FSL_GIANFAR_DEV_HAS_MULTI_INTR;
 
        if (model && !strcasecmp(model, "eTSEC"))
-               priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+               priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
                                     FSL_GIANFAR_DEV_HAS_COALESCE |
                                     FSL_GIANFAR_DEV_HAS_RMON |
                                     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
-                                    FSL_GIANFAR_DEV_HAS_PADDING |
                                     FSL_GIANFAR_DEV_HAS_CSUM |
                                     FSL_GIANFAR_DEV_HAS_VLAN |
                                     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
@@ -784,12 +893,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
 
        return 0;
 
-rx_alloc_failed:
-       free_rx_pointers(priv);
-tx_alloc_failed:
-       free_tx_pointers(priv);
 err_grp_init:
        unmap_group_regs(priv);
+rx_alloc_failed:
+       gfar_free_rx_queues(priv);
+tx_alloc_failed:
+       gfar_free_tx_queues(priv);
        free_gfar_dev(priv);
        return err;
 }
@@ -822,18 +931,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                if (priv->hwts_rx_en) {
-                       stop_gfar(netdev);
                        priv->hwts_rx_en = 0;
-                       startup_gfar(netdev);
+                       reset_gfar(netdev);
                }
                break;
        default:
                if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
                        return -ERANGE;
                if (!priv->hwts_rx_en) {
-                       stop_gfar(netdev);
                        priv->hwts_rx_en = 1;
-                       startup_gfar(netdev);
+                       reset_gfar(netdev);
                }
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
@@ -875,19 +982,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        return phy_mii_ioctl(priv->phydev, rq, cmd);
 }
 
-static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
-{
-       unsigned int new_bit_map = 0x0;
-       int mask = 0x1 << (max_qs - 1), i;
-
-       for (i = 0; i < max_qs; i++) {
-               if (bit_map & mask)
-                       new_bit_map = new_bit_map + (1 << i);
-               mask = mask >> 0x1;
-       }
-       return new_bit_map;
-}
-
 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
                                   u32 class)
 {
@@ -995,14 +1089,185 @@ static void gfar_detect_errata(struct gfar_private *priv)
        /* no plans to fix */
        priv->errata |= GFAR_ERRATA_A002;
 
-       if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
-               __gfar_detect_errata_85xx(priv);
-       else /* non-mpc85xx parts, i.e. e300 core based */
-               __gfar_detect_errata_83xx(priv);
+       if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+               __gfar_detect_errata_85xx(priv);
+       else /* non-mpc85xx parts, i.e. e300 core based */
+               __gfar_detect_errata_83xx(priv);
+
+       if (priv->errata)
+               dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
+                        priv->errata);
+}
+
+void gfar_mac_reset(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 tempval;
+
+       /* Reset MAC layer */
+       gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
+
+       /* We need to delay at least 3 TX clocks */
+       udelay(3);
+
+       /* the soft reset bit is not self-resetting, so we need to
+        * clear it before resuming normal operation
+        */
+       gfar_write(&regs->maccfg1, 0);
+
+       udelay(3);
+
+       /* Compute rx_buff_size based on config flags */
+       gfar_rx_buff_size_config(priv);
+
+       /* Initialize the max receive frame/buffer lengths */
+       gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+       gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+       /* Initialize the Minimum Frame Length Register */
+       gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
+
+       /* Initialize MACCFG2. */
+       tempval = MACCFG2_INIT_SETTINGS;
+
+       /* If the mtu is larger than the max size for standard
+        * ethernet frames (ie, a jumbo frame), then set maccfg2
+        * to allow huge frames, and to check the length
+        */
+       if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+           gfar_has_errata(priv, GFAR_ERRATA_74))
+               tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
+       gfar_write(&regs->maccfg2, tempval);
+
+       /* Clear mac addr hash registers */
+       gfar_write(&regs->igaddr0, 0);
+       gfar_write(&regs->igaddr1, 0);
+       gfar_write(&regs->igaddr2, 0);
+       gfar_write(&regs->igaddr3, 0);
+       gfar_write(&regs->igaddr4, 0);
+       gfar_write(&regs->igaddr5, 0);
+       gfar_write(&regs->igaddr6, 0);
+       gfar_write(&regs->igaddr7, 0);
+
+       gfar_write(&regs->gaddr0, 0);
+       gfar_write(&regs->gaddr1, 0);
+       gfar_write(&regs->gaddr2, 0);
+       gfar_write(&regs->gaddr3, 0);
+       gfar_write(&regs->gaddr4, 0);
+       gfar_write(&regs->gaddr5, 0);
+       gfar_write(&regs->gaddr6, 0);
+       gfar_write(&regs->gaddr7, 0);
+
+       if (priv->extended_hash)
+               gfar_clear_exact_match(priv->ndev);
+
+       gfar_mac_rx_config(priv);
+
+       gfar_mac_tx_config(priv);
+
+       gfar_set_mac_address(priv->ndev);
+
+       gfar_set_multi(priv->ndev);
+
+       /* clear ievent and imask before configuring coalescing */
+       gfar_ints_disable(priv);
+
+       /* Configure the coalescing support */
+       gfar_configure_coalescing_all(priv);
+}
+
+static void gfar_hw_init(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 attrs;
+
+       /* Stop the DMA engine now, in case it was running before
+        * (The firmware could have used it, and left it running).
+        */
+       gfar_halt(priv);
+
+       gfar_mac_reset(priv);
+
+       /* Zero out the rmon mib registers if it has them */
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+               memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
+
+               /* Mask off the CAM interrupts */
+               gfar_write(&regs->rmon.cam1, 0xffffffff);
+               gfar_write(&regs->rmon.cam2, 0xffffffff);
+       }
+
+       /* Initialize ECNTRL */
+       gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
+
+       /* Set the extraction length and index */
+       attrs = ATTRELI_EL(priv->rx_stash_size) |
+               ATTRELI_EI(priv->rx_stash_index);
+
+       gfar_write(&regs->attreli, attrs);
+
+       /* Start with defaults, and add stashing
+        * depending on driver parameters
+        */
+       attrs = ATTR_INIT_SETTINGS;
+
+       if (priv->bd_stash_en)
+               attrs |= ATTR_BDSTASH;
+
+       if (priv->rx_stash_size != 0)
+               attrs |= ATTR_BUFSTASH;
+
+       gfar_write(&regs->attr, attrs);
+
+       /* FIFO configs */
+       gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
+       gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
+       gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
+
+       /* Program the interrupt steering regs, only for MG devices */
+       if (priv->num_grps > 1)
+               gfar_write_isrg(priv);
+}
+
+static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
+               priv->extended_hash = 1;
+               priv->hash_width = 9;
+
+               priv->hash_regs[0] = &regs->igaddr0;
+               priv->hash_regs[1] = &regs->igaddr1;
+               priv->hash_regs[2] = &regs->igaddr2;
+               priv->hash_regs[3] = &regs->igaddr3;
+               priv->hash_regs[4] = &regs->igaddr4;
+               priv->hash_regs[5] = &regs->igaddr5;
+               priv->hash_regs[6] = &regs->igaddr6;
+               priv->hash_regs[7] = &regs->igaddr7;
+               priv->hash_regs[8] = &regs->gaddr0;
+               priv->hash_regs[9] = &regs->gaddr1;
+               priv->hash_regs[10] = &regs->gaddr2;
+               priv->hash_regs[11] = &regs->gaddr3;
+               priv->hash_regs[12] = &regs->gaddr4;
+               priv->hash_regs[13] = &regs->gaddr5;
+               priv->hash_regs[14] = &regs->gaddr6;
+               priv->hash_regs[15] = &regs->gaddr7;
+
+       } else {
+               priv->extended_hash = 0;
+               priv->hash_width = 8;
 
-       if (priv->errata)
-               dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
-                        priv->errata);
+               priv->hash_regs[0] = &regs->gaddr0;
+               priv->hash_regs[1] = &regs->gaddr1;
+               priv->hash_regs[2] = &regs->gaddr2;
+               priv->hash_regs[3] = &regs->gaddr3;
+               priv->hash_regs[4] = &regs->gaddr4;
+               priv->hash_regs[5] = &regs->gaddr5;
+               priv->hash_regs[6] = &regs->gaddr6;
+               priv->hash_regs[7] = &regs->gaddr7;
+       }
 }
 
 /* Set up the ethernet device structure, private data,
@@ -1010,14 +1275,9 @@ static void gfar_detect_errata(struct gfar_private *priv)
  */
 static int gfar_probe(struct platform_device *ofdev)
 {
-       u32 tempval;
        struct net_device *dev = NULL;
        struct gfar_private *priv = NULL;
-       struct gfar __iomem *regs = NULL;
-       int err = 0, i, grp_idx = 0;
-       u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
-       u32 isrg = 0;
-       u32 __iomem *baddr;
+       int err = 0, i;
 
        err = gfar_of_init(ofdev, &dev);
 
@@ -1034,42 +1294,11 @@ static int gfar_probe(struct platform_device *ofdev)
        INIT_WORK(&priv->reset_task, gfar_reset_task);
 
        platform_set_drvdata(ofdev, priv);
-       regs = priv->gfargrp[0].regs;
 
        gfar_detect_errata(priv);
 
-       /* Stop the DMA engine now, in case it was running before
-        * (The firmware could have used it, and left it running).
-        */
-       gfar_halt(dev);
-
-       /* Reset MAC layer */
-       gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
-
-       /* We need to delay at least 3 TX clocks */
-       udelay(2);
-
-       tempval = 0;
-       if (!priv->pause_aneg_en && priv->tx_pause_en)
-               tempval |= MACCFG1_TX_FLOW;
-       if (!priv->pause_aneg_en && priv->rx_pause_en)
-               tempval |= MACCFG1_RX_FLOW;
-       /* the soft reset bit is not self-resetting, so we need to
-        * clear it before resuming normal operation
-        */
-       gfar_write(&regs->maccfg1, tempval);
-
-       /* Initialize MACCFG2. */
-       tempval = MACCFG2_INIT_SETTINGS;
-       if (gfar_has_errata(priv, GFAR_ERRATA_74))
-               tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
-       gfar_write(&regs->maccfg2, tempval);
-
-       /* Initialize ECNTRL */
-       gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
-
        /* Set the dev->base_addr to the gfar reg region */
-       dev->base_addr = (unsigned long) regs;
+       dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
 
        /* Fill in the dev structure */
        dev->watchdog_timeo = TX_TIMEOUT;
@@ -1078,13 +1307,19 @@ static int gfar_probe(struct platform_device *ofdev)
        dev->ethtool_ops = &gfar_ethtool_ops;
 
        /* Register for napi ...We are registering NAPI for each grp */
-       if (priv->mode == SQ_SG_MODE)
-               netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
-                              GFAR_DEV_WEIGHT);
-       else
-               for (i = 0; i < priv->num_grps; i++)
-                       netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
-                                      GFAR_DEV_WEIGHT);
+       for (i = 0; i < priv->num_grps; i++) {
+               if (priv->poll_mode == GFAR_SQ_POLLING) {
+                       netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+                                      gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
+                       netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+                                      gfar_poll_tx_sq, 2);
+               } else {
+                       netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
+                                      gfar_poll_rx, GFAR_DEV_WEIGHT);
+                       netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
+                                      gfar_poll_tx, 2);
+               }
+       }
 
        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
                dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -1099,103 +1334,16 @@ static int gfar_probe(struct platform_device *ofdev)
                dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
        }
 
-       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
-               priv->extended_hash = 1;
-               priv->hash_width = 9;
-
-               priv->hash_regs[0] = &regs->igaddr0;
-               priv->hash_regs[1] = &regs->igaddr1;
-               priv->hash_regs[2] = &regs->igaddr2;
-               priv->hash_regs[3] = &regs->igaddr3;
-               priv->hash_regs[4] = &regs->igaddr4;
-               priv->hash_regs[5] = &regs->igaddr5;
-               priv->hash_regs[6] = &regs->igaddr6;
-               priv->hash_regs[7] = &regs->igaddr7;
-               priv->hash_regs[8] = &regs->gaddr0;
-               priv->hash_regs[9] = &regs->gaddr1;
-               priv->hash_regs[10] = &regs->gaddr2;
-               priv->hash_regs[11] = &regs->gaddr3;
-               priv->hash_regs[12] = &regs->gaddr4;
-               priv->hash_regs[13] = &regs->gaddr5;
-               priv->hash_regs[14] = &regs->gaddr6;
-               priv->hash_regs[15] = &regs->gaddr7;
-
-       } else {
-               priv->extended_hash = 0;
-               priv->hash_width = 8;
-
-               priv->hash_regs[0] = &regs->gaddr0;
-               priv->hash_regs[1] = &regs->gaddr1;
-               priv->hash_regs[2] = &regs->gaddr2;
-               priv->hash_regs[3] = &regs->gaddr3;
-               priv->hash_regs[4] = &regs->gaddr4;
-               priv->hash_regs[5] = &regs->gaddr5;
-               priv->hash_regs[6] = &regs->gaddr6;
-               priv->hash_regs[7] = &regs->gaddr7;
-       }
+       gfar_init_addr_hash_table(priv);
 
-       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
-               priv->padding = DEFAULT_PADDING;
-       else
-               priv->padding = 0;
+       /* Insert receive time stamps into padding alignment bytes */
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+               priv->padding = 8;
 
        if (dev->features & NETIF_F_IP_CSUM ||
            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->needed_headroom = GMAC_FCB_LEN;
 
-       /* Program the isrg regs only if number of grps > 1 */
-       if (priv->num_grps > 1) {
-               baddr = &regs->isrg0;
-               for (i = 0; i < priv->num_grps; i++) {
-                       isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
-                       isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
-                       gfar_write(baddr, isrg);
-                       baddr++;
-                       isrg = 0x0;
-               }
-       }
-
-       /* Need to reverse the bit maps as  bit_map's MSB is q0
-        * but, for_each_set_bit parses from right to left, which
-        * basically reverses the queue numbers
-        */
-       for (i = 0; i< priv->num_grps; i++) {
-               priv->gfargrp[i].tx_bit_map =
-                       reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
-               priv->gfargrp[i].rx_bit_map =
-                       reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
-       }
-
-       /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
-        * also assign queues to groups
-        */
-       for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
-               priv->gfargrp[grp_idx].num_rx_queues = 0x0;
-
-               for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
-                                priv->num_rx_queues) {
-                       priv->gfargrp[grp_idx].num_rx_queues++;
-                       priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
-                       rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
-                       rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
-               }
-               priv->gfargrp[grp_idx].num_tx_queues = 0x0;
-
-               for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
-                                priv->num_tx_queues) {
-                       priv->gfargrp[grp_idx].num_tx_queues++;
-                       priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
-                       tstat = tstat | (TSTAT_CLEAR_THALT >> i);
-                       tqueue = tqueue | (TQUEUE_EN0 >> i);
-               }
-               priv->gfargrp[grp_idx].rstat = rstat;
-               priv->gfargrp[grp_idx].tstat = tstat;
-               rstat = tstat =0;
-       }
-
-       gfar_write(&regs->rqueue, rqueue);
-       gfar_write(&regs->tqueue, tqueue);
-
        priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
 
        /* Initializing some of the rx/tx queue level parameters */
@@ -1220,8 +1368,9 @@ static int gfar_probe(struct platform_device *ofdev)
        if (priv->num_tx_queues == 1)
                priv->prio_sched_en = 1;
 
-       /* Carrier starts down, phylib will bring it up */
-       netif_carrier_off(dev);
+       set_bit(GFAR_DOWN, &priv->state);
+
+       gfar_hw_init(priv);
 
        err = register_netdev(dev);
 
@@ -1230,6 +1379,9 @@ static int gfar_probe(struct platform_device *ofdev)
                goto register_fail;
        }
 
+       /* Carrier starts down, phylib will bring it up */
+       netif_carrier_off(dev);
+
        device_init_wakeup(&dev->dev,
                           priv->device_flags &
                           FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1251,9 +1403,6 @@ static int gfar_probe(struct platform_device *ofdev)
        /* Initialize the filer table */
        gfar_init_filer_table(priv);
 
-       /* Create all the sysfs files */
-       gfar_init_sysfs(dev);
-
        /* Print out the device info */
        netdev_info(dev, "mac: %pM\n", dev->dev_addr);
 
@@ -1272,8 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
 
 register_fail:
        unmap_group_regs(priv);
-       free_tx_pointers(priv);
-       free_rx_pointers(priv);
+       gfar_free_rx_queues(priv);
+       gfar_free_tx_queues(priv);
        if (priv->phy_node)
                of_node_put(priv->phy_node);
        if (priv->tbi_node)
@@ -1293,6 +1442,8 @@ static int gfar_remove(struct platform_device *ofdev)
 
        unregister_netdev(priv->ndev);
        unmap_group_regs(priv);
+       gfar_free_rx_queues(priv);
+       gfar_free_tx_queues(priv);
        free_gfar_dev(priv);
 
        return 0;
@@ -1318,9 +1469,8 @@ static int gfar_suspend(struct device *dev)
 
                local_irq_save(flags);
                lock_tx_qs(priv);
-               lock_rx_qs(priv);
 
-               gfar_halt_nodisable(ndev);
+               gfar_halt_nodisable(priv);
 
                /* Disable Tx, and Rx if wake-on-LAN is disabled. */
                tempval = gfar_read(&regs->maccfg1);
@@ -1332,7 +1482,6 @@ static int gfar_suspend(struct device *dev)
 
                gfar_write(&regs->maccfg1, tempval);
 
-               unlock_rx_qs(priv);
                unlock_tx_qs(priv);
                local_irq_restore(flags);
 
@@ -1378,15 +1527,13 @@ static int gfar_resume(struct device *dev)
         */
        local_irq_save(flags);
        lock_tx_qs(priv);
-       lock_rx_qs(priv);
 
        tempval = gfar_read(&regs->maccfg2);
        tempval &= ~MACCFG2_MPEN;
        gfar_write(&regs->maccfg2, tempval);
 
-       gfar_start(ndev);
+       gfar_start(priv);
 
-       unlock_rx_qs(priv);
        unlock_tx_qs(priv);
        local_irq_restore(flags);
 
@@ -1413,10 +1560,11 @@ static int gfar_restore(struct device *dev)
                return -ENOMEM;
        }
 
-       init_registers(ndev);
-       gfar_set_mac_address(ndev);
-       gfar_init_mac(ndev);
-       gfar_start(ndev);
+       gfar_mac_reset(priv);
+
+       gfar_init_tx_rx_base(priv);
+
+       gfar_start(priv);
 
        priv->oldlink = 0;
        priv->oldspeed = 0;
@@ -1574,57 +1722,6 @@ static void gfar_configure_serdes(struct net_device *dev)
                  BMCR_SPEED1000);
 }
 
-static void init_registers(struct net_device *dev)
-{
-       struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = NULL;
-       int i;
-
-       for (i = 0; i < priv->num_grps; i++) {
-               regs = priv->gfargrp[i].regs;
-               /* Clear IEVENT */
-               gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
-
-               /* Initialize IMASK */
-               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
-       }
-
-       regs = priv->gfargrp[0].regs;
-       /* Init hash registers to zero */
-       gfar_write(&regs->igaddr0, 0);
-       gfar_write(&regs->igaddr1, 0);
-       gfar_write(&regs->igaddr2, 0);
-       gfar_write(&regs->igaddr3, 0);
-       gfar_write(&regs->igaddr4, 0);
-       gfar_write(&regs->igaddr5, 0);
-       gfar_write(&regs->igaddr6, 0);
-       gfar_write(&regs->igaddr7, 0);
-
-       gfar_write(&regs->gaddr0, 0);
-       gfar_write(&regs->gaddr1, 0);
-       gfar_write(&regs->gaddr2, 0);
-       gfar_write(&regs->gaddr3, 0);
-       gfar_write(&regs->gaddr4, 0);
-       gfar_write(&regs->gaddr5, 0);
-       gfar_write(&regs->gaddr6, 0);
-       gfar_write(&regs->gaddr7, 0);
-
-       /* Zero out the rmon mib registers if it has them */
-       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
-               memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
-
-               /* Mask off the CAM interrupts */
-               gfar_write(&regs->rmon.cam1, 0xffffffff);
-               gfar_write(&regs->rmon.cam2, 0xffffffff);
-       }
-
-       /* Initialize the max receive buffer length */
-       gfar_write(&regs->mrblr, priv->rx_buffer_size);
-
-       /* Initialize the Minimum Frame Length Register */
-       gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
-}
-
 static int __gfar_is_rx_idle(struct gfar_private *priv)
 {
        u32 res;
@@ -1648,23 +1745,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
 }
 
 /* Halt the receive and transmit queues */
-static void gfar_halt_nodisable(struct net_device *dev)
+static void gfar_halt_nodisable(struct gfar_private *priv)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = NULL;
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
-       int i;
-
-       for (i = 0; i < priv->num_grps; i++) {
-               regs = priv->gfargrp[i].regs;
-               /* Mask all interrupts */
-               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
 
-               /* Clear all interrupts */
-               gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
-       }
+       gfar_ints_disable(priv);
 
-       regs = priv->gfargrp[0].regs;
        /* Stop the DMA, and wait for it to stop */
        tempval = gfar_read(&regs->dmactrl);
        if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
@@ -1685,56 +1772,41 @@ static void gfar_halt_nodisable(struct net_device *dev)
 }
 
 /* Halt the receive and transmit queues */
-void gfar_halt(struct net_device *dev)
+void gfar_halt(struct gfar_private *priv)
 {
-       struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
 
-       gfar_halt_nodisable(dev);
+       /* Dissable the Rx/Tx hw queues */
+       gfar_write(&regs->rqueue, 0);
+       gfar_write(&regs->tqueue, 0);
 
-       /* Disable Rx and Tx */
+       mdelay(10);
+
+       gfar_halt_nodisable(priv);
+
+       /* Disable Rx/Tx DMA */
        tempval = gfar_read(&regs->maccfg1);
        tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
        gfar_write(&regs->maccfg1, tempval);
 }
 
-static void free_grp_irqs(struct gfar_priv_grp *grp)
-{
-       free_irq(gfar_irq(grp, TX)->irq, grp);
-       free_irq(gfar_irq(grp, RX)->irq, grp);
-       free_irq(gfar_irq(grp, ER)->irq, grp);
-}
-
 void stop_gfar(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       unsigned long flags;
-       int i;
-
-       phy_stop(priv->phydev);
 
+       netif_tx_stop_all_queues(dev);
 
-       /* Lock it down */
-       local_irq_save(flags);
-       lock_tx_qs(priv);
-       lock_rx_qs(priv);
+       smp_mb__before_clear_bit();
+       set_bit(GFAR_DOWN, &priv->state);
+       smp_mb__after_clear_bit();
 
-       gfar_halt(dev);
+       disable_napi(priv);
 
-       unlock_rx_qs(priv);
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
+       /* disable ints and gracefully shut down Rx/Tx DMA */
+       gfar_halt(priv);
 
-       /* Free the IRQs */
-       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
-               for (i = 0; i < priv->num_grps; i++)
-                       free_grp_irqs(&priv->gfargrp[i]);
-       } else {
-               for (i = 0; i < priv->num_grps; i++)
-                       free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
-                                &priv->gfargrp[i]);
-       }
+       phy_stop(priv->phydev);
 
        free_skb_resources(priv);
 }
@@ -1825,17 +1897,15 @@ static void free_skb_resources(struct gfar_private *priv)
                          priv->tx_queue[0]->tx_bd_dma_base);
 }
 
-void gfar_start(struct net_device *dev)
+void gfar_start(struct gfar_private *priv)
 {
-       struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
        int i = 0;
 
-       /* Enable Rx and Tx in MACCFG1 */
-       tempval = gfar_read(&regs->maccfg1);
-       tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
-       gfar_write(&regs->maccfg1, tempval);
+       /* Enable Rx/Tx hw queues */
+       gfar_write(&regs->rqueue, priv->rqueue);
+       gfar_write(&regs->tqueue, priv->tqueue);
 
        /* Initialize DMACTRL to have WWR and WOP */
        tempval = gfar_read(&regs->dmactrl);
@@ -1852,52 +1922,23 @@ void gfar_start(struct net_device *dev)
                /* Clear THLT/RHLT, so that the DMA starts polling now */
                gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
                gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
-               /* Unmask the interrupts we look for */
-               gfar_write(&regs->imask, IMASK_DEFAULT);
-       }
-
-       dev->trans_start = jiffies; /* prevent tx timeout */
-}
-
-static void gfar_configure_coalescing(struct gfar_private *priv,
-                              unsigned long tx_mask, unsigned long rx_mask)
-{
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       u32 __iomem *baddr;
-
-       if (priv->mode == MQ_MG_MODE) {
-               int i = 0;
-
-               baddr = &regs->txic0;
-               for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
-                       gfar_write(baddr + i, 0);
-                       if (likely(priv->tx_queue[i]->txcoalescing))
-                               gfar_write(baddr + i, priv->tx_queue[i]->txic);
-               }
-
-               baddr = &regs->rxic0;
-               for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
-                       gfar_write(baddr + i, 0);
-                       if (likely(priv->rx_queue[i]->rxcoalescing))
-                               gfar_write(baddr + i, priv->rx_queue[i]->rxic);
-               }
-       } else {
-               /* Backward compatible case -- even if we enable
-                * multiple queues, there's only single reg to program
-                */
-               gfar_write(&regs->txic, 0);
-               if (likely(priv->tx_queue[0]->txcoalescing))
-                       gfar_write(&regs->txic, priv->tx_queue[0]->txic);
-
-               gfar_write(&regs->rxic, 0);
-               if (unlikely(priv->rx_queue[0]->rxcoalescing))
-                       gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
        }
+
+       /* Enable Rx/Tx DMA */
+       tempval = gfar_read(&regs->maccfg1);
+       tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+       gfar_write(&regs->maccfg1, tempval);
+
+       gfar_ints_enable(priv);
+
+       priv->ndev->trans_start = jiffies; /* prevent tx timeout */
 }
 
-void gfar_configure_coalescing_all(struct gfar_private *priv)
+static void free_grp_irqs(struct gfar_priv_grp *grp)
 {
-       gfar_configure_coalescing(priv, 0xFF, 0xFF);
+       free_irq(gfar_irq(grp, TX)->irq, grp);
+       free_irq(gfar_irq(grp, RX)->irq, grp);
+       free_irq(gfar_irq(grp, ER)->irq, grp);
 }
 
 static int register_grp_irqs(struct gfar_priv_grp *grp)
@@ -1956,46 +1997,65 @@ err_irq_fail:
 
 }
 
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *ndev)
+static void gfar_free_irq(struct gfar_private *priv)
 {
-       struct gfar_private *priv = netdev_priv(ndev);
-       struct gfar __iomem *regs = NULL;
-       int err, i, j;
+       int i;
 
-       for (i = 0; i < priv->num_grps; i++) {
-               regs= priv->gfargrp[i].regs;
-               gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+       /* Free the IRQs */
+       if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+               for (i = 0; i < priv->num_grps; i++)
+                       free_grp_irqs(&priv->gfargrp[i]);
+       } else {
+               for (i = 0; i < priv->num_grps; i++)
+                       free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+                                &priv->gfargrp[i]);
        }
+}
 
-       regs= priv->gfargrp[0].regs;
-       err = gfar_alloc_skb_resources(ndev);
-       if (err)
-               return err;
-
-       gfar_init_mac(ndev);
+static int gfar_request_irq(struct gfar_private *priv)
+{
+       int err, i, j;
 
        for (i = 0; i < priv->num_grps; i++) {
                err = register_grp_irqs(&priv->gfargrp[i]);
                if (err) {
                        for (j = 0; j < i; j++)
                                free_grp_irqs(&priv->gfargrp[j]);
-                       goto irq_fail;
+                       return err;
                }
        }
 
-       /* Start the controller */
-       gfar_start(ndev);
+       return 0;
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
+       int err;
+
+       gfar_mac_reset(priv);
+
+       err = gfar_alloc_skb_resources(ndev);
+       if (err)
+               return err;
+
+       gfar_init_tx_rx_base(priv);
+
+       smp_mb__before_clear_bit();
+       clear_bit(GFAR_DOWN, &priv->state);
+       smp_mb__after_clear_bit();
+
+       /* Start Rx/Tx DMA and enable the interrupts */
+       gfar_start(priv);
 
        phy_start(priv->phydev);
 
-       gfar_configure_coalescing_all(priv);
+       enable_napi(priv);
 
-       return 0;
+       netif_tx_wake_all_queues(ndev);
 
-irq_fail:
-       free_skb_resources(priv);
-       return err;
+       return 0;
 }
 
 /* Called when something needs to use the ethernet device
@@ -2006,27 +2066,17 @@ static int gfar_enet_open(struct net_device *dev)
        struct gfar_private *priv = netdev_priv(dev);
        int err;
 
-       enable_napi(priv);
-
-       /* Initialize a bunch of registers */
-       init_registers(dev);
-
-       gfar_set_mac_address(dev);
-
        err = init_phy(dev);
+       if (err)
+               return err;
 
-       if (err) {
-               disable_napi(priv);
+       err = gfar_request_irq(priv);
+       if (err)
                return err;
-       }
 
        err = startup_gfar(dev);
-       if (err) {
-               disable_napi(priv);
+       if (err)
                return err;
-       }
-
-       netif_tx_start_all_queues(dev);
 
        device_set_wakeup_enable(&dev->dev, priv->wol_en);
 
@@ -2152,13 +2202,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
                skb_new = skb_realloc_headroom(skb, fcb_len);
                if (!skb_new) {
                        dev->stats.tx_errors++;
-                       kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        return NETDEV_TX_OK;
                }
 
                if (skb->sk)
                        skb_set_owner_w(skb_new, skb->sk);
-               consume_skb(skb);
+               dev_consume_skb_any(skb);
                skb = skb_new;
        }
 
@@ -2351,8 +2401,6 @@ static int gfar_close(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
 
-       disable_napi(priv);
-
        cancel_work_sync(&priv->reset_task);
        stop_gfar(dev);
 
@@ -2360,7 +2408,7 @@ static int gfar_close(struct net_device *dev)
        phy_disconnect(priv->phydev);
        priv->phydev = NULL;
 
-       netif_tx_stop_all_queues(dev);
+       gfar_free_irq(priv);
 
        return 0;
 }
@@ -2373,77 +2421,9 @@ static int gfar_set_mac_address(struct net_device *dev)
        return 0;
 }
 
-/* Check if rx parser should be activated */
-void gfar_check_rx_parser_mode(struct gfar_private *priv)
-{
-       struct gfar __iomem *regs;
-       u32 tempval;
-
-       regs = priv->gfargrp[0].regs;
-
-       tempval = gfar_read(&regs->rctrl);
-       /* If parse is no longer required, then disable parser */
-       if (tempval & RCTRL_REQ_PARSER) {
-               tempval |= RCTRL_PRSDEP_INIT;
-               priv->uses_rxfcb = 1;
-       } else {
-               tempval &= ~RCTRL_PRSDEP_INIT;
-               priv->uses_rxfcb = 0;
-       }
-       gfar_write(&regs->rctrl, tempval);
-}
-
-/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
-{
-       struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = NULL;
-       unsigned long flags;
-       u32 tempval;
-
-       regs = priv->gfargrp[0].regs;
-       local_irq_save(flags);
-       lock_rx_qs(priv);
-
-       if (features & NETIF_F_HW_VLAN_CTAG_TX) {
-               /* Enable VLAN tag insertion */
-               tempval = gfar_read(&regs->tctrl);
-               tempval |= TCTRL_VLINS;
-               gfar_write(&regs->tctrl, tempval);
-       } else {
-               /* Disable VLAN tag insertion */
-               tempval = gfar_read(&regs->tctrl);
-               tempval &= ~TCTRL_VLINS;
-               gfar_write(&regs->tctrl, tempval);
-       }
-
-       if (features & NETIF_F_HW_VLAN_CTAG_RX) {
-               /* Enable VLAN tag extraction */
-               tempval = gfar_read(&regs->rctrl);
-               tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
-               gfar_write(&regs->rctrl, tempval);
-               priv->uses_rxfcb = 1;
-       } else {
-               /* Disable VLAN tag extraction */
-               tempval = gfar_read(&regs->rctrl);
-               tempval &= ~RCTRL_VLEX;
-               gfar_write(&regs->rctrl, tempval);
-
-               gfar_check_rx_parser_mode(priv);
-       }
-
-       gfar_change_mtu(dev, dev->mtu);
-
-       unlock_rx_qs(priv);
-       local_irq_restore(flags);
-}
-
 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
 {
-       int tempsize, tempval;
        struct gfar_private *priv = netdev_priv(dev);
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       int oldsize = priv->rx_buffer_size;
        int frame_size = new_mtu + ETH_HLEN;
 
        if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
@@ -2451,45 +2431,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
                return -EINVAL;
        }
 
-       if (priv->uses_rxfcb)
-               frame_size += GMAC_FCB_LEN;
-
-       frame_size += priv->padding;
-
-       tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
-                  INCREMENTAL_BUFFER_SIZE;
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
 
-       /* Only stop and start the controller if it isn't already
-        * stopped, and we changed something
-        */
-       if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+       if (dev->flags & IFF_UP)
                stop_gfar(dev);
 
-       priv->rx_buffer_size = tempsize;
-
        dev->mtu = new_mtu;
 
-       gfar_write(&regs->mrblr, priv->rx_buffer_size);
-       gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+       if (dev->flags & IFF_UP)
+               startup_gfar(dev);
 
-       /* If the mtu is larger than the max size for standard
-        * ethernet frames (ie, a jumbo frame), then set maccfg2
-        * to allow huge frames, and to check the length
-        */
-       tempval = gfar_read(&regs->maccfg2);
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
 
-       if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
-           gfar_has_errata(priv, GFAR_ERRATA_74))
-               tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
-       else
-               tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+       return 0;
+}
 
-       gfar_write(&regs->maccfg2, tempval);
+void reset_gfar(struct net_device *ndev)
+{
+       struct gfar_private *priv = netdev_priv(ndev);
 
-       if ((oldsize != tempsize) && (dev->flags & IFF_UP))
-               startup_gfar(dev);
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
 
-       return 0;
+       stop_gfar(ndev);
+       startup_gfar(ndev);
+
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
 }
 
 /* gfar_reset_task gets scheduled when a packet has not been
@@ -2501,16 +2469,7 @@ static void gfar_reset_task(struct work_struct *work)
 {
        struct gfar_private *priv = container_of(work, struct gfar_private,
                                                 reset_task);
-       struct net_device *dev = priv->ndev;
-
-       if (dev->flags & IFF_UP) {
-               netif_tx_stop_all_queues(dev);
-               stop_gfar(dev);
-               startup_gfar(dev);
-               netif_tx_start_all_queues(dev);
-       }
-
-       netif_tx_schedule_all(dev);
+       reset_gfar(priv->ndev);
 }
 
 static void gfar_timeout(struct net_device *dev)
@@ -2623,8 +2582,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        }
 
        /* If we freed a buffer, we can restart transmission, if necessary */
-       if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
-               netif_wake_subqueue(dev, tqi);
+       if (tx_queue->num_txbdfree &&
+           netif_tx_queue_stopped(txq) &&
+           !(test_bit(GFAR_DOWN, &priv->state)))
+               netif_wake_subqueue(priv->ndev, tqi);
 
        /* Update dirty indicators */
        tx_queue->skb_dirtytx = skb_dirtytx;
@@ -2633,31 +2594,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        netdev_tx_completed_queue(txq, howmany, bytes_sent);
 }
 
-static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&gfargrp->grplock, flags);
-       if (napi_schedule_prep(&gfargrp->napi)) {
-               gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
-               __napi_schedule(&gfargrp->napi);
-       } else {
-               /* Clear IEVENT, so interrupts aren't called again
-                * because of the packets that have already arrived.
-                */
-               gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
-       }
-       spin_unlock_irqrestore(&gfargrp->grplock, flags);
-
-}
-
-/* Interrupt Handler for Transmit complete */
-static irqreturn_t gfar_transmit(int irq, void *grp_id)
-{
-       gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
-       return IRQ_HANDLED;
-}
-
 static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
                           struct sk_buff *skb)
 {
@@ -2728,7 +2664,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
 
 irqreturn_t gfar_receive(int irq, void *grp_id)
 {
-       gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
+       struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+       unsigned long flags;
+       u32 imask;
+
+       if (likely(napi_schedule_prep(&grp->napi_rx))) {
+               spin_lock_irqsave(&grp->grplock, flags);
+               imask = gfar_read(&grp->regs->imask);
+               imask &= IMASK_RX_DISABLED;
+               gfar_write(&grp->regs->imask, imask);
+               spin_unlock_irqrestore(&grp->grplock, flags);
+               __napi_schedule(&grp->napi_rx);
+       } else {
+               /* Clear IEVENT, so interrupts aren't called again
+                * because of the packets that have already arrived.
+                */
+               gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/* Interrupt Handler for Transmit complete */
+static irqreturn_t gfar_transmit(int irq, void *grp_id)
+{
+       struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
+       unsigned long flags;
+       u32 imask;
+
+       if (likely(napi_schedule_prep(&grp->napi_tx))) {
+               spin_lock_irqsave(&grp->grplock, flags);
+               imask = gfar_read(&grp->regs->imask);
+               imask &= IMASK_TX_DISABLED;
+               gfar_write(&grp->regs->imask, imask);
+               spin_unlock_irqrestore(&grp->grplock, flags);
+               __napi_schedule(&grp->napi_tx);
+       } else {
+               /* Clear IEVENT, so interrupts aren't called again
+                * because of the packets that have already arrived.
+                */
+               gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -2852,7 +2829,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                                rx_queue->stats.rx_bytes += pkt_len;
                                skb_record_rx_queue(skb, rx_queue->qindex);
                                gfar_process_frame(dev, skb, amount_pull,
-                                                  &rx_queue->grp->napi);
+                                                  &rx_queue->grp->napi_rx);
 
                        } else {
                                netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2881,66 +2858,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
        return howmany;
 }
 
-static int gfar_poll_sq(struct napi_struct *napi, int budget)
+static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
 {
        struct gfar_priv_grp *gfargrp =
-               container_of(napi, struct gfar_priv_grp, napi);
+               container_of(napi, struct gfar_priv_grp, napi_rx);
        struct gfar __iomem *regs = gfargrp->regs;
-       struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0];
-       struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
+       struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
        int work_done = 0;
 
        /* Clear IEVENT, so interrupts aren't called again
         * because of the packets that have already arrived
         */
-       gfar_write(&regs->ievent, IEVENT_RTX_MASK);
-
-       /* run Tx cleanup to completion */
-       if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
-               gfar_clean_tx_ring(tx_queue);
+       gfar_write(&regs->ievent, IEVENT_RX_MASK);
 
        work_done = gfar_clean_rx_ring(rx_queue, budget);
 
        if (work_done < budget) {
+               u32 imask;
                napi_complete(napi);
                /* Clear the halt bit in RSTAT */
                gfar_write(&regs->rstat, gfargrp->rstat);
 
-               gfar_write(&regs->imask, IMASK_DEFAULT);
-
-               /* If we are coalescing interrupts, update the timer
-                * Otherwise, clear it
-                */
-               gfar_write(&regs->txic, 0);
-               if (likely(tx_queue->txcoalescing))
-                       gfar_write(&regs->txic, tx_queue->txic);
-
-               gfar_write(&regs->rxic, 0);
-               if (unlikely(rx_queue->rxcoalescing))
-                       gfar_write(&regs->rxic, rx_queue->rxic);
+               spin_lock_irq(&gfargrp->grplock);
+               imask = gfar_read(&regs->imask);
+               imask |= IMASK_RX_DEFAULT;
+               gfar_write(&regs->imask, imask);
+               spin_unlock_irq(&gfargrp->grplock);
        }
 
        return work_done;
 }
 
-static int gfar_poll(struct napi_struct *napi, int budget)
+static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
+{
+       struct gfar_priv_grp *gfargrp =
+               container_of(napi, struct gfar_priv_grp, napi_tx);
+       struct gfar __iomem *regs = gfargrp->regs;
+       struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
+       u32 imask;
+
+       /* Clear IEVENT, so interrupts aren't called again
+        * because of the packets that have already arrived
+        */
+       gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+       /* run Tx cleanup to completion */
+       if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
+               gfar_clean_tx_ring(tx_queue);
+
+       napi_complete(napi);
+
+       spin_lock_irq(&gfargrp->grplock);
+       imask = gfar_read(&regs->imask);
+       imask |= IMASK_TX_DEFAULT;
+       gfar_write(&regs->imask, imask);
+       spin_unlock_irq(&gfargrp->grplock);
+
+       return 0;
+}
+
+static int gfar_poll_rx(struct napi_struct *napi, int budget)
 {
        struct gfar_priv_grp *gfargrp =
-               container_of(napi, struct gfar_priv_grp, napi);
+               container_of(napi, struct gfar_priv_grp, napi_rx);
        struct gfar_private *priv = gfargrp->priv;
        struct gfar __iomem *regs = gfargrp->regs;
-       struct gfar_priv_tx_q *tx_queue = NULL;
        struct gfar_priv_rx_q *rx_queue = NULL;
        int work_done = 0, work_done_per_q = 0;
        int i, budget_per_q = 0;
-       int has_tx_work = 0;
        unsigned long rstat_rxf;
        int num_act_queues;
 
        /* Clear IEVENT, so interrupts aren't called again
         * because of the packets that have already arrived
         */
-       gfar_write(&regs->ievent, IEVENT_RTX_MASK);
+       gfar_write(&regs->ievent, IEVENT_RX_MASK);
 
        rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
 
@@ -2948,15 +2940,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
        if (num_act_queues)
                budget_per_q = budget/num_act_queues;
 
-       for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
-               tx_queue = priv->tx_queue[i];
-               /* run Tx cleanup to completion */
-               if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
-                       gfar_clean_tx_ring(tx_queue);
-                       has_tx_work = 1;
-               }
-       }
-
        for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
                /* skip queue if not active */
                if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
@@ -2979,25 +2962,62 @@ static int gfar_poll(struct napi_struct *napi, int budget)
                }
        }
 
-       if (!num_act_queues && !has_tx_work) {
-
+       if (!num_act_queues) {
+               u32 imask;
                napi_complete(napi);
 
                /* Clear the halt bit in RSTAT */
                gfar_write(&regs->rstat, gfargrp->rstat);
 
-               gfar_write(&regs->imask, IMASK_DEFAULT);
-
-               /* If we are coalescing interrupts, update the timer
-                * Otherwise, clear it
-                */
-               gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
-                                         gfargrp->tx_bit_map);
+               spin_lock_irq(&gfargrp->grplock);
+               imask = gfar_read(&regs->imask);
+               imask |= IMASK_RX_DEFAULT;
+               gfar_write(&regs->imask, imask);
+               spin_unlock_irq(&gfargrp->grplock);
        }
 
        return work_done;
 }
 
+static int gfar_poll_tx(struct napi_struct *napi, int budget)
+{
+       struct gfar_priv_grp *gfargrp =
+               container_of(napi, struct gfar_priv_grp, napi_tx);
+       struct gfar_private *priv = gfargrp->priv;
+       struct gfar __iomem *regs = gfargrp->regs;
+       struct gfar_priv_tx_q *tx_queue = NULL;
+       int has_tx_work = 0;
+       int i;
+
+       /* Clear IEVENT, so interrupts aren't called again
+        * because of the packets that have already arrived
+        */
+       gfar_write(&regs->ievent, IEVENT_TX_MASK);
+
+       for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+               tx_queue = priv->tx_queue[i];
+               /* run Tx cleanup to completion */
+               if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+                       gfar_clean_tx_ring(tx_queue);
+                       has_tx_work = 1;
+               }
+       }
+
+       if (!has_tx_work) {
+               u32 imask;
+               napi_complete(napi);
+
+               spin_lock_irq(&gfargrp->grplock);
+               imask = gfar_read(&regs->imask);
+               imask |= IMASK_TX_DEFAULT;
+               gfar_write(&regs->imask, imask);
+               spin_unlock_irq(&gfargrp->grplock);
+       }
+
+       return 0;
+}
+
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /* Polling 'interrupt' - used by things like netconsole to send skbs
  * without having to re-enable interrupts. It's not called while
@@ -3101,12 +3121,11 @@ static void adjust_link(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned long flags;
        struct phy_device *phydev = priv->phydev;
        int new_state = 0;
 
-       local_irq_save(flags);
-       lock_tx_qs(priv);
+       if (test_bit(GFAR_RESETTING, &priv->state))
+               return;
 
        if (phydev->link) {
                u32 tempval1 = gfar_read(&regs->maccfg1);
@@ -3178,8 +3197,6 @@ static void adjust_link(struct net_device *dev)
 
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
 }
 
 /* Update the hash table based on the current list of multicast
index 52bb2b0195cccf3e2749e39d37700bf0186aba4d..84632c569f2c3ba43225c2a9a4f773dd24ea1580 100644 (file)
@@ -9,7 +9,7 @@
  * Maintainer: Kumar Gala
  * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  *
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -377,8 +377,11 @@ extern const char gfar_driver_version[];
                IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
                IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
                | IMASK_PERR)
-#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
-                          & IMASK_DEFAULT)
+#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
+#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
+
+#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
+#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
 
 /* Fifo management */
 #define FIFO_TX_THR_MASK       0x01ff
@@ -409,7 +412,9 @@ extern const char gfar_driver_version[];
 
 /* This default RIR value directly corresponds
  * to the 3-bit hash value generated */
-#define DEFAULT_RIR0   0x05397700
+#define DEFAULT_8RXQ_RIR0      0x05397700
+/* Map even hash values to Q0, and odd ones to Q1 */
+#define DEFAULT_2RXQ_RIR0      0x04104100
 
 /* RQFCR register bits */
 #define RQFCR_GPI              0x80000000
@@ -880,7 +885,6 @@ struct gfar {
 #define FSL_GIANFAR_DEV_HAS_CSUM               0x00000010
 #define FSL_GIANFAR_DEV_HAS_VLAN               0x00000020
 #define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH      0x00000040
-#define FSL_GIANFAR_DEV_HAS_PADDING            0x00000080
 #define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET       0x00000100
 #define FSL_GIANFAR_DEV_HAS_BD_STASHING                0x00000200
 #define FSL_GIANFAR_DEV_HAS_BUF_STASHING       0x00000400
@@ -892,8 +896,8 @@ struct gfar {
 #define DEFAULT_MAPPING        0xFF
 #endif
 
-#define ISRG_SHIFT_TX  0x10
-#define ISRG_SHIFT_RX  0x18
+#define ISRG_RR0       0x80000000
+#define ISRG_TR0       0x00800000
 
 /* The same driver can operate in two modes */
 /* SQ_SG_MODE: Single Queue Single Group Mode
@@ -905,6 +909,22 @@ enum {
        MQ_MG_MODE
 };
 
+/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
+ *     The driver supports a single pair of RX/Tx queues
+ *     per interrupt group (Rx/Tx int line). MQ_MG mode
+ *     devices have 2 interrupt groups, so the device will
+ *     have a total of 2 Tx and 2 Rx queues in this case.
+ * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
+ *     The driver supports all the 8 Rx and Tx HW queues
+ *     each queue mapped by the Device Tree to one of
+ *     the 2 interrupt groups. This mode implies significant
+ *     processing overhead (CPU and controller level).
+ */
+enum gfar_poll_mode {
+       GFAR_SQ_POLLING = 0,
+       GFAR_MQ_POLLING
+};
+
 /*
  * Per TX queue stats
  */
@@ -966,7 +986,6 @@ struct rx_q_stats {
 
 /**
  *     struct gfar_priv_rx_q - per rx queue structure
- *     @rxlock: per queue rx spin lock
  *     @rx_skbuff: skb pointers
  *     @skb_currx: currently use skb pointer
  *     @rx_bd_base: First rx buffer descriptor
@@ -979,8 +998,7 @@ struct rx_q_stats {
  */
 
 struct gfar_priv_rx_q {
-       spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
-       struct  sk_buff ** rx_skbuff;
+       struct  sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
        dma_addr_t rx_bd_dma_base;
        struct  rxbd8 *rx_bd_base;
        struct  rxbd8 *cur_rx;
@@ -1016,17 +1034,20 @@ struct gfar_irqinfo {
  */
 
 struct gfar_priv_grp {
-       spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
-       struct  napi_struct napi;
-       struct gfar_private *priv;
+       spinlock_t grplock __aligned(SMP_CACHE_BYTES);
+       struct  napi_struct napi_rx;
+       struct  napi_struct napi_tx;
        struct gfar __iomem *regs;
-       unsigned int rstat;
-       unsigned long num_rx_queues;
-       unsigned long rx_bit_map;
-       /* cacheline 3 */
+       struct gfar_priv_tx_q *tx_queue;
+       struct gfar_priv_rx_q *rx_queue;
        unsigned int tstat;
+       unsigned int rstat;
+
+       struct gfar_private *priv;
        unsigned long num_tx_queues;
        unsigned long tx_bit_map;
+       unsigned long num_rx_queues;
+       unsigned long rx_bit_map;
 
        struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
 };
@@ -1041,6 +1062,11 @@ enum gfar_errata {
        GFAR_ERRATA_12          = 0x08, /* a.k.a errata eTSEC49 */
 };
 
+enum gfar_dev_state {
+       GFAR_DOWN = 1,
+       GFAR_RESETTING
+};
+
 /* Struct stolen almost completely (and shamelessly) from the FCC enet source
  * (Ok, that's not so true anymore, but there is a family resemblance)
  * The GFAR buffer descriptors track the ring buffers.  The rx_bd_base
@@ -1051,8 +1077,6 @@ enum gfar_errata {
  * the buffer descriptor determines the actual condition.
  */
 struct gfar_private {
-       unsigned int num_rx_queues;
-
        struct device *dev;
        struct net_device *ndev;
        enum gfar_errata errata;
@@ -1060,6 +1084,7 @@ struct gfar_private {
 
        u16 uses_rxfcb;
        u16 padding;
+       u32 device_flags;
 
        /* HW time stamping enabled flag */
        int hwts_rx_en;
@@ -1069,10 +1094,12 @@ struct gfar_private {
        struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
        struct gfar_priv_grp gfargrp[MAXGROUPS];
 
-       u32 device_flags;
+       unsigned long state;
 
-       unsigned int mode;
+       unsigned short mode;
+       unsigned short poll_mode;
        unsigned int num_tx_queues;
+       unsigned int num_rx_queues;
        unsigned int num_grps;
 
        /* Network Statistics */
@@ -1113,6 +1140,9 @@ struct gfar_private {
        unsigned int total_tx_ring_size;
        unsigned int total_rx_ring_size;
 
+       u32 rqueue;
+       u32 tqueue;
+
        /* RX per device parameters */
        unsigned int rx_stash_size;
        unsigned int rx_stash_index;
@@ -1127,11 +1157,6 @@ struct gfar_private {
        u32 __iomem *hash_regs[16];
        int hash_width;
 
-       /* global parameters */
-       unsigned int fifo_threshold;
-       unsigned int fifo_starve;
-       unsigned int fifo_starve_off;
-
        /*Filer table*/
        unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
        unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1176,21 +1201,42 @@ static inline void gfar_read_filer(struct gfar_private *priv,
        *fpr = gfar_read(&regs->rqfpr);
 }
 
-void lock_rx_qs(struct gfar_private *priv);
-void lock_tx_qs(struct gfar_private *priv);
-void unlock_rx_qs(struct gfar_private *priv);
-void unlock_tx_qs(struct gfar_private *priv);
+static inline void gfar_write_isrg(struct gfar_private *priv)
+{
+       struct gfar __iomem *regs = priv->gfargrp[0].regs;
+       u32 __iomem *baddr = &regs->isrg0;
+       u32 isrg = 0;
+       int grp_idx, i;
+
+       for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+               struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
+
+               for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+                       isrg |= (ISRG_RR0 >> i);
+               }
+
+               for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+                       isrg |= (ISRG_TR0 >> i);
+               }
+
+               gfar_write(baddr, isrg);
+
+               baddr++;
+               isrg = 0;
+       }
+}
+
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
-void gfar_halt(struct net_device *dev);
+void reset_gfar(struct net_device *dev);
+void gfar_mac_reset(struct gfar_private *priv);
+void gfar_halt(struct gfar_private *priv);
+void gfar_start(struct gfar_private *priv);
 void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
                   u32 regnum, u32 read);
 void gfar_configure_coalescing_all(struct gfar_private *priv);
-void gfar_init_sysfs(struct net_device *dev);
 int gfar_set_features(struct net_device *dev, netdev_features_t features);
-void gfar_check_rx_parser_mode(struct gfar_private *priv);
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index 63d234419cc1febaab1a9f143a607c19790289a6..891dbee6e6c14d2394cc2dff00092f448faf3dc2 100644 (file)
 
 #include "gianfar.h"
 
-extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
-                             int rx_work_limit);
-
 #define GFAR_MAX_COAL_USECS 0xffff
 #define GFAR_MAX_COAL_FRAMES 0xff
 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
@@ -364,25 +360,11 @@ static int gfar_scoalesce(struct net_device *dev,
                          struct ethtool_coalesce *cvals)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       int i = 0;
+       int i, err = 0;
 
        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
                return -EOPNOTSUPP;
 
-       /* Set up rx coalescing */
-       /* As of now, we will enable/disable coalescing for all
-        * queues together in case of eTSEC2, this will be modified
-        * along with the ethtool interface
-        */
-       if ((cvals->rx_coalesce_usecs == 0) ||
-           (cvals->rx_max_coalesced_frames == 0)) {
-               for (i = 0; i < priv->num_rx_queues; i++)
-                       priv->rx_queue[i]->rxcoalescing = 0;
-       } else {
-               for (i = 0; i < priv->num_rx_queues; i++)
-                       priv->rx_queue[i]->rxcoalescing = 1;
-       }
-
        if (NULL == priv->phydev)
                return -ENODEV;
 
@@ -399,6 +381,32 @@ static int gfar_scoalesce(struct net_device *dev,
                return -EINVAL;
        }
 
+       /* Check the bounds of the values */
+       if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+               netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+                           GFAR_MAX_COAL_USECS);
+               return -EINVAL;
+       }
+
+       if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+               netdev_info(dev, "Coalescing is limited to %d frames\n",
+                           GFAR_MAX_COAL_FRAMES);
+               return -EINVAL;
+       }
+
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
+
+       /* Set up rx coalescing */
+       if ((cvals->rx_coalesce_usecs == 0) ||
+           (cvals->rx_max_coalesced_frames == 0)) {
+               for (i = 0; i < priv->num_rx_queues; i++)
+                       priv->rx_queue[i]->rxcoalescing = 0;
+       } else {
+               for (i = 0; i < priv->num_rx_queues; i++)
+                       priv->rx_queue[i]->rxcoalescing = 1;
+       }
+
        for (i = 0; i < priv->num_rx_queues; i++) {
                priv->rx_queue[i]->rxic = mk_ic_value(
                        cvals->rx_max_coalesced_frames,
@@ -415,28 +423,22 @@ static int gfar_scoalesce(struct net_device *dev,
                        priv->tx_queue[i]->txcoalescing = 1;
        }
 
-       /* Check the bounds of the values */
-       if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
-               netdev_info(dev, "Coalescing is limited to %d microseconds\n",
-                           GFAR_MAX_COAL_USECS);
-               return -EINVAL;
-       }
-
-       if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
-               netdev_info(dev, "Coalescing is limited to %d frames\n",
-                           GFAR_MAX_COAL_FRAMES);
-               return -EINVAL;
-       }
-
        for (i = 0; i < priv->num_tx_queues; i++) {
                priv->tx_queue[i]->txic = mk_ic_value(
                        cvals->tx_max_coalesced_frames,
                        gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
        }
 
-       gfar_configure_coalescing_all(priv);
+       if (dev->flags & IFF_UP) {
+               stop_gfar(dev);
+               err = startup_gfar(dev);
+       } else {
+               gfar_mac_reset(priv);
+       }
+
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
 
-       return 0;
+       return err;
 }
 
 /* Fills in rvals with the current ring parameters.  Currently,
@@ -467,15 +469,13 @@ static void gfar_gringparam(struct net_device *dev,
 }
 
 /* Change the current ring parameters, stopping the controller if
- * necessary so that we don't mess things up while we're in
- * motion.  We wait for the ring to be clean before reallocating
- * the rings.
+ * necessary so that we don't mess things up while we're in motion.
  */
 static int gfar_sringparam(struct net_device *dev,
                           struct ethtool_ringparam *rvals)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       int err = 0, i = 0;
+       int err = 0, i;
 
        if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
                return -EINVAL;
@@ -493,44 +493,25 @@ static int gfar_sringparam(struct net_device *dev,
                return -EINVAL;
        }
 
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
 
-       if (dev->flags & IFF_UP) {
-               unsigned long flags;
-
-               /* Halt TX and RX, and process the frames which
-                * have already been received
-                */
-               local_irq_save(flags);
-               lock_tx_qs(priv);
-               lock_rx_qs(priv);
-
-               gfar_halt(dev);
-
-               unlock_rx_qs(priv);
-               unlock_tx_qs(priv);
-               local_irq_restore(flags);
-
-               for (i = 0; i < priv->num_rx_queues; i++)
-                       gfar_clean_rx_ring(priv->rx_queue[i],
-                                          priv->rx_queue[i]->rx_ring_size);
-
-               /* Now we take down the rings to rebuild them */
+       if (dev->flags & IFF_UP)
                stop_gfar(dev);
-       }
 
-       /* Change the size */
-       for (i = 0; i < priv->num_rx_queues; i++) {
+       /* Change the sizes */
+       for (i = 0; i < priv->num_rx_queues; i++)
                priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+
+       for (i = 0; i < priv->num_tx_queues; i++)
                priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
-               priv->tx_queue[i]->num_txbdfree =
-                       priv->tx_queue[i]->tx_ring_size;
-       }
 
        /* Rebuild the rings with the new size */
-       if (dev->flags & IFF_UP) {
+       if (dev->flags & IFF_UP)
                err = startup_gfar(dev);
-               netif_tx_wake_all_queues(dev);
-       }
+
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
        return err;
 }
 
@@ -608,43 +589,29 @@ static int gfar_spauseparam(struct net_device *dev,
 
 int gfar_set_features(struct net_device *dev, netdev_features_t features)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       unsigned long flags;
-       int err = 0, i = 0;
        netdev_features_t changed = dev->features ^ features;
+       struct gfar_private *priv = netdev_priv(dev);
+       int err = 0;
 
-       if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
-               gfar_vlan_mode(dev, features);
-
-       if (!(changed & NETIF_F_RXCSUM))
+       if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+                        NETIF_F_RXCSUM)))
                return 0;
 
-       if (dev->flags & IFF_UP) {
-               /* Halt TX and RX, and process the frames which
-                * have already been received
-                */
-               local_irq_save(flags);
-               lock_tx_qs(priv);
-               lock_rx_qs(priv);
-
-               gfar_halt(dev);
+       while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+               cpu_relax();
 
-               unlock_tx_qs(priv);
-               unlock_rx_qs(priv);
-               local_irq_restore(flags);
-
-               for (i = 0; i < priv->num_rx_queues; i++)
-                       gfar_clean_rx_ring(priv->rx_queue[i],
-                                          priv->rx_queue[i]->rx_ring_size);
+       dev->features = features;
 
+       if (dev->flags & IFF_UP) {
                /* Now we take down the rings to rebuild them */
                stop_gfar(dev);
-
-               dev->features = features;
-
                err = startup_gfar(dev);
-               netif_tx_wake_all_queues(dev);
+       } else {
+               gfar_mac_reset(priv);
        }
+
+       clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
        return err;
 }
 
@@ -1610,9 +1577,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
        if (tab->index > MAX_FILER_IDX - 1)
                return -EBUSY;
 
-       /* Avoid inconsistent filer table to be processed */
-       lock_rx_qs(priv);
-
        /* Fill regular entries */
        for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
             i++)
@@ -1625,8 +1589,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
         */
        gfar_write_filer(priv, i, 0x20, 0x0);
 
-       unlock_rx_qs(priv);
-
        return 0;
 }
 
@@ -1831,6 +1793,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
        struct gfar_private *priv = netdev_priv(dev);
        int ret = 0;
 
+       if (test_bit(GFAR_RESETTING, &priv->state))
+               return -EBUSY;
+
        mutex_lock(&priv->rx_queue_access);
 
        switch (cmd->cmd) {
index abc28da2704210e6a7143bc140b22a5c500d33ce..bb568006f37df605e808d1d6c7195a09b6e8d48f 100644 (file)
@@ -414,6 +414,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
        .n_alarm        = 0,
        .n_ext_ts       = N_EXT_TS,
        .n_per_out      = 0,
+       .n_pins         = 0,
        .pps            = 1,
        .adjfreq        = ptp_gianfar_adjfreq,
        .adjtime        = ptp_gianfar_adjtime,
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
deleted file mode 100644 (file)
index e02dd13..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * drivers/net/ethernet/freescale/gianfar_sysfs.c
- *
- * Gianfar Ethernet Driver
- * This driver is designed for the non-CPM ethernet controllers
- * on the 85xx and 83xx family of integrated processors
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (galak@kernel.crashing.org)
- * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
- *
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- * Sysfs file creation and management
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-
-#include "gianfar.h"
-
-static ssize_t gfar_show_bd_stash(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
-       return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
-}
-
-static ssize_t gfar_set_bd_stash(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       int new_setting = 0;
-       u32 temp;
-       unsigned long flags;
-
-       if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
-               return count;
-
-
-       /* Find out the new setting */
-       if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
-               new_setting = 1;
-       else if (!strncmp("off", buf, count - 1) ||
-                !strncmp("0", buf, count - 1))
-               new_setting = 0;
-       else
-               return count;
-
-
-       local_irq_save(flags);
-       lock_rx_qs(priv);
-
-       /* Set the new stashing value */
-       priv->bd_stash_en = new_setting;
-
-       temp = gfar_read(&regs->attr);
-
-       if (new_setting)
-               temp |= ATTR_BDSTASH;
-       else
-               temp &= ~(ATTR_BDSTASH);
-
-       gfar_write(&regs->attr, temp);
-
-       unlock_rx_qs(priv);
-       local_irq_restore(flags);
-
-       return count;
-}
-
-static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
-
-static ssize_t gfar_show_rx_stash_size(struct device *dev,
-                                      struct device_attribute *attr, char *buf)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
-       return sprintf(buf, "%d\n", priv->rx_stash_size);
-}
-
-static ssize_t gfar_set_rx_stash_size(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned int length = simple_strtoul(buf, NULL, 0);
-       u32 temp;
-       unsigned long flags;
-
-       if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
-               return count;
-
-       local_irq_save(flags);
-       lock_rx_qs(priv);
-
-       if (length > priv->rx_buffer_size)
-               goto out;
-
-       if (length == priv->rx_stash_size)
-               goto out;
-
-       priv->rx_stash_size = length;
-
-       temp = gfar_read(&regs->attreli);
-       temp &= ~ATTRELI_EL_MASK;
-       temp |= ATTRELI_EL(length);
-       gfar_write(&regs->attreli, temp);
-
-       /* Turn stashing on/off as appropriate */
-       temp = gfar_read(&regs->attr);
-
-       if (length)
-               temp |= ATTR_BUFSTASH;
-       else
-               temp &= ~(ATTR_BUFSTASH);
-
-       gfar_write(&regs->attr, temp);
-
-out:
-       unlock_rx_qs(priv);
-       local_irq_restore(flags);
-
-       return count;
-}
-
-static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
-                  gfar_set_rx_stash_size);
-
-/* Stashing will only be enabled when rx_stash_size != 0 */
-static ssize_t gfar_show_rx_stash_index(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
-       return sprintf(buf, "%d\n", priv->rx_stash_index);
-}
-
-static ssize_t gfar_set_rx_stash_index(struct device *dev,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned short index = simple_strtoul(buf, NULL, 0);
-       u32 temp;
-       unsigned long flags;
-
-       if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
-               return count;
-
-       local_irq_save(flags);
-       lock_rx_qs(priv);
-
-       if (index > priv->rx_stash_size)
-               goto out;
-
-       if (index == priv->rx_stash_index)
-               goto out;
-
-       priv->rx_stash_index = index;
-
-       temp = gfar_read(&regs->attreli);
-       temp &= ~ATTRELI_EI_MASK;
-       temp |= ATTRELI_EI(index);
-       gfar_write(&regs->attreli, temp);
-
-out:
-       unlock_rx_qs(priv);
-       local_irq_restore(flags);
-
-       return count;
-}
-
-static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
-                  gfar_set_rx_stash_index);
-
-static ssize_t gfar_show_fifo_threshold(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
-       return sprintf(buf, "%d\n", priv->fifo_threshold);
-}
-
-static ssize_t gfar_set_fifo_threshold(struct device *dev,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned int length = simple_strtoul(buf, NULL, 0);
-       u32 temp;
-       unsigned long flags;
-
-       if (length > GFAR_MAX_FIFO_THRESHOLD)
-               return count;
-
-       local_irq_save(flags);
-       lock_tx_qs(priv);
-
-       priv->fifo_threshold = length;
-
-       temp = gfar_read(&regs->fifo_tx_thr);
-       temp &= ~FIFO_TX_THR_MASK;
-       temp |= length;
-       gfar_write(&regs->fifo_tx_thr, temp);
-
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
-
-       return count;
-}
-
-static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
-                  gfar_set_fifo_threshold);
-
-static ssize_t gfar_show_fifo_starve(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
-       return sprintf(buf, "%d\n", priv->fifo_starve);
-}
-
-static ssize_t gfar_set_fifo_starve(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t count)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned int num = simple_strtoul(buf, NULL, 0);
-       u32 temp;
-       unsigned long flags;
-
-       if (num > GFAR_MAX_FIFO_STARVE)
-               return count;
-
-       local_irq_save(flags);
-       lock_tx_qs(priv);
-
-       priv->fifo_starve = num;
-
-       temp = gfar_read(&regs->fifo_tx_starve);
-       temp &= ~FIFO_TX_STARVE_MASK;
-       temp |= num;
-       gfar_write(&regs->fifo_tx_starve, temp);
-
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
-
-       return count;
-}
-
-static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
-                  gfar_set_fifo_starve);
-
-static ssize_t gfar_show_fifo_starve_off(struct device *dev,
-                                        struct device_attribute *attr,
-                                        char *buf)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
-       return sprintf(buf, "%d\n", priv->fifo_starve_off);
-}
-
-static ssize_t gfar_set_fifo_starve_off(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf, size_t count)
-{
-       struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-       struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned int num = simple_strtoul(buf, NULL, 0);
-       u32 temp;
-       unsigned long flags;
-
-       if (num > GFAR_MAX_FIFO_STARVE_OFF)
-               return count;
-
-       local_irq_save(flags);
-       lock_tx_qs(priv);
-
-       priv->fifo_starve_off = num;
-
-       temp = gfar_read(&regs->fifo_tx_starve_shutoff);
-       temp &= ~FIFO_TX_STARVE_OFF_MASK;
-       temp |= num;
-       gfar_write(&regs->fifo_tx_starve_shutoff, temp);
-
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
-
-       return count;
-}
-
-static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
-                  gfar_set_fifo_starve_off);
-
-void gfar_init_sysfs(struct net_device *dev)
-{
-       struct gfar_private *priv = netdev_priv(dev);
-       int rc;
-
-       /* Initialize the default values */
-       priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
-       priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
-       priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
-
-       /* Create our sysfs files */
-       rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
-       rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
-       rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
-       rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
-       rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
-       rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
-       if (rc)
-               dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
-}
index 72291a8904a90118f88913af0e3c453d9ce3ffff..c8299c31b21f9f5c52dc380f9b867597c1b8bcdf 100644 (file)
@@ -3261,7 +3261,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
 
                dev->stats.tx_packets++;
 
-               dev_kfree_skb(skb);
+               dev_consume_skb_any(skb);
 
                ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
                ugeth->skb_dirtytx[txQ] =
index 17fca323c1431047046b03511aac4b52aaf91135..c984998b34a02dfe4095be9479a71eb0326a1fbb 100644 (file)
@@ -993,7 +993,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                       dev->name));
                dev->stats.tx_dropped++;
 
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
        } else {
                if (++lp->next_tx_cmd == TX_RING_SIZE)
                        lp->next_tx_cmd = 0;
index 7628e0fd84554fd56eca5f4181f2ff31c85eca0d..538903bf13bce736161a96af324a2b9ebc9aecd9 100644 (file)
@@ -490,7 +490,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
                skb_arr[index] = skb;
                tmp_addr = ehea_map_vaddr(skb->data);
                if (tmp_addr == -1) {
-                       dev_kfree_skb(skb);
+                       dev_consume_skb_any(skb);
                        q_skba->os_skbs = fill_wqes - i;
                        ret = 0;
                        break;
@@ -856,7 +856,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
 
                        index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
                        skb = pr->sq_skba.arr[index];
-                       dev_kfree_skb(skb);
+                       dev_consume_skb_any(skb);
                        pr->sq_skba.arr[index] = NULL;
                }
 
@@ -2044,7 +2044,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
                skb_copy_bits(skb, 0, imm_data, skb->len);
 
        swqe->immediate_data_length = skb->len;
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 }
 
 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
index 1fc8334fc181ad6d949ecffa2a289eb899c2a160..c9127562bd22cb51114249d35264bcbb679dcc3d 100644 (file)
@@ -1044,7 +1044,7 @@ retry_bounce:
                               DMA_TO_DEVICE);
 
 out:
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
        return NETDEV_TX_OK;
 
 map_failed_frags:
@@ -1072,7 +1072,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
        unsigned long lpar_rc;
 
 restart_poll:
-       do {
+       while (frames_processed < budget) {
                if (!ibmveth_rxq_pending_buffer(adapter))
                        break;
 
@@ -1121,7 +1121,7 @@ restart_poll:
                        netdev->stats.rx_bytes += length;
                        frames_processed++;
                }
-       } while (frames_processed < budget);
+       }
 
        ibmveth_replenish_task(adapter);
 
index bf7a01ef9a57fd532e3b8433733a22f0bfcefc27..b56461ce674c7832152cdab4c0b4c3b27789d9a4 100644 (file)
@@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
         * testing, ie sending frames with bad CRC.
         */
        if (unlikely(skb->no_fcs))
-               cb->command |= __constant_cpu_to_le16(cb_tx_nc);
+               cb->command |= cpu_to_le16(cb_tx_nc);
        else
-               cb->command &= ~__constant_cpu_to_le16(cb_tx_nc);
+               cb->command &= ~cpu_to_le16(cb_tx_nc);
 
        /* interrupt every 16 packets regardless of delay */
        if ((nic->cbs_avail & ~15) == nic->cbs_avail)
index ff2d806eaef71bc2a7a861a8971c228cd6856e2a..a5f6b11d6992e63aa9af8da9f52540697455b2d1 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* 80003ES2LAN Gigabit Ethernet Controller (Copper)
  * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
index 90d363b2d2802c238cda0b1e792a21ac5afb2562..535a9430976df7653671e23d62adc1140973579e 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_80003ES2LAN_H_
 #define _E1000E_80003ES2LAN_H_
index 8fed74e3fa53da73ff781a215ac26714911a015f..e0aa7f1efb08ceb50d9049128dcbdb66f442b17d 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* 82571EB Gigabit Ethernet Controller
  * 82571EB Gigabit Ethernet Controller (Copper)
index 08e24dc3dc0e9d49c7742fed7b829826e2cea095..2e758f796d6099bc3dfb05107ea3a6ebc81dec09 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_82571_H_
 #define _E1000E_82571_H_
index c2dcfcc10857fc17608194c0a4c09140f6606143..106de493373ce6c73e85e7e2be6f6e8c3e61df0c 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
 # FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 # more details.
 #
-# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
 #
 # The full GNU General Public License is included in this distribution in
 # the file called "COPYING".
index 351c94a0cf74944b95c278c69709257216af64ef..d18e89212575626b880a173f08af119be2bdd20a 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_DEFINES_H_
 #define _E1000_DEFINES_H_
 
 /* Definitions for power management and wakeup registers */
 /* Wake Up Control */
-#define E1000_WUC_APME       0x00000001 /* APM Enable */
-#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
-#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
+#define E1000_WUC_APME         0x00000001      /* APM Enable */
+#define E1000_WUC_PME_EN       0x00000002      /* PME Enable */
+#define E1000_WUC_PME_STATUS   0x00000004      /* PME Status */
+#define E1000_WUC_APMPME       0x00000008      /* Assert PME on APM Wakeup */
+#define E1000_WUC_PHY_WAKE     0x00000100      /* if PHY supports wakeup */
 
 /* Wake Up Filter Control */
 #define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
index 0150f7fc893d4ae6985096f17e5a4678f39fc3b3..1471c5464a89e72d87aa571d4a1b15d791a3f015 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* Linux PRO/1000 Ethernet Driver main header file */
 
@@ -269,6 +262,7 @@ struct e1000_adapter {
        u32 tx_head_addr;
        u32 tx_fifo_size;
        u32 tx_dma_failed;
+       u32 tx_hwtstamp_timeouts;
 
        /* Rx */
        bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
@@ -333,7 +327,6 @@ struct e1000_adapter {
        struct work_struct update_phy_task;
        struct work_struct print_hang_task;
 
-       bool idle_check;
        int phy_hang_count;
 
        u16 tx_ring_count;
@@ -342,6 +335,7 @@ struct e1000_adapter {
        struct hwtstamp_config hwtstamp_config;
        struct delayed_work systim_overflow_work;
        struct sk_buff *tx_hwtstamp_skb;
+       unsigned long tx_hwtstamp_start;
        struct work_struct tx_hwtstamp_work;
        spinlock_t systim_lock; /* protects SYSTIML/H regsters */
        struct cyclecounter cc;
@@ -476,7 +470,7 @@ void e1000e_check_options(struct e1000_adapter *adapter);
 void e1000e_set_ethtool_ops(struct net_device *netdev);
 
 int e1000e_up(struct e1000_adapter *adapter);
-void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter, bool reset);
 void e1000e_reinit_locked(struct e1000_adapter *adapter);
 void e1000e_reset(struct e1000_adapter *adapter);
 void e1000e_power_up_phy(struct e1000_adapter *adapter);
index d14c8f53384cd415c3933015c02bf9d2a369ba11..cad250bc1b99fc81d51fb8956eee74c9acc3bc7e 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* ethtool support for e1000 */
 
@@ -111,6 +104,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
        E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
        E1000_STAT("uncorr_ecc_errors", uncorr_errors),
        E1000_STAT("corr_ecc_errors", corr_errors),
+       E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
 };
 
 #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -332,7 +326,7 @@ static int e1000_set_settings(struct net_device *netdev,
 
        /* reset the link */
        if (netif_running(adapter->netdev)) {
-               e1000e_down(adapter);
+               e1000e_down(adapter, true);
                e1000e_up(adapter);
        } else {
                e1000e_reset(adapter);
@@ -380,7 +374,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
        if (adapter->fc_autoneg == AUTONEG_ENABLE) {
                hw->fc.requested_mode = e1000_fc_default;
                if (netif_running(adapter->netdev)) {
-                       e1000e_down(adapter);
+                       e1000e_down(adapter, true);
                        e1000e_up(adapter);
                } else {
                        e1000e_reset(adapter);
@@ -726,7 +720,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
 
        pm_runtime_get_sync(netdev->dev.parent);
 
-       e1000e_down(adapter);
+       e1000e_down(adapter, true);
 
        /* We can't just free everything and then setup again, because the
         * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
@@ -924,15 +918,21 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
                }
                if (mac->type == e1000_pch2lan) {
                        /* SHRAH[0,1,2] different than previous */
-                       if (i == 7)
+                       if (i == 1)
                                mask &= 0xFFF4FFFF;
                        /* SHRAH[3] different than SHRAH[0,1,2] */
-                       if (i == 10)
+                       if (i == 4)
                                mask |= (1 << 30);
+                       /* RAR[1-6] owned by management engine - skipping */
+                       if (i > 0)
+                               i += 6;
                }
 
                REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
                                       0xFFFFFFFF);
+               /* reset index to actual value */
+               if ((mac->type == e1000_pch2lan) && (i > 6))
+                       i -= 6;
        }
 
        for (i = 0; i < mac->mta_reg_count; i++)
index b7f38435d1fdb90336dd0c9511d78c7cd273e8d7..6b3de5f39a97862e2f5742b14a24530ba6ecc33f 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000_HW_H_
 #define _E1000_HW_H_
@@ -655,12 +648,20 @@ struct e1000_shadow_ram {
 
 #define E1000_ICH8_SHADOW_RAM_WORDS            2048
 
+/* I218 PHY Ultra Low Power (ULP) states */
+enum e1000_ulp_state {
+       e1000_ulp_state_unknown,
+       e1000_ulp_state_off,
+       e1000_ulp_state_on,
+};
+
 struct e1000_dev_spec_ich8lan {
        bool kmrn_lock_loss_workaround_enabled;
        struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
        bool nvm_k1_enabled;
        bool eee_disable;
        u16 eee_lp_ability;
+       enum e1000_ulp_state ulp_state;
 };
 
 struct e1000_hw {
index 42f0f6717511c21bb0f16edbeee050cc4878db96..9866f264f55e33a8e564757730ada0d6ab7c6a92 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* 82562G 10/100 Network Connection
  * 82562G-2 10/100 Network Connection
  * 82578DC Gigabit Network Connection
  * 82579LM Gigabit Network Connection
  * 82579V Gigabit Network Connection
+ * Ethernet Connection I217-LM
+ * Ethernet Connection I217-V
+ * Ethernet Connection I218-V
+ * Ethernet Connection I218-LM
+ * Ethernet Connection (2) I218-LM
+ * Ethernet Connection (2) I218-V
+ * Ethernet Connection (3) I218-LM
+ * Ethernet Connection (3) I218-V
  */
 
 #include "e1000.h"
@@ -142,7 +143,9 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
 static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
 
 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
 {
@@ -238,6 +241,47 @@ out:
        return true;
 }
 
+/**
+ *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
+ *  @hw: pointer to the HW structure
+ *
+ *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
+ *  used to reset the PHY to a quiescent state when necessary.
+ **/
+static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
+{
+       u32 mac_reg;
+
+       /* Set Phy Config Counter to 50msec */
+       mac_reg = er32(FEXTNVM3);
+       mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
+       mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
+       ew32(FEXTNVM3, mac_reg);
+
+       /* Toggle LANPHYPC Value bit */
+       mac_reg = er32(CTRL);
+       mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
+       mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
+       ew32(CTRL, mac_reg);
+       e1e_flush();
+       usleep_range(10, 20);
+       mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+       ew32(CTRL, mac_reg);
+       e1e_flush();
+
+       if (hw->mac.type < e1000_pch_lpt) {
+               msleep(50);
+       } else {
+               u16 count = 20;
+
+               do {
+                       usleep_range(5000, 10000);
+               } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
+
+               msleep(30);
+       }
+}
+
 /**
  *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
  *  @hw: pointer to the HW structure
@@ -247,6 +291,7 @@ out:
  **/
 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 {
+       struct e1000_adapter *adapter = hw->adapter;
        u32 mac_reg, fwsm = er32(FWSM);
        s32 ret_val;
 
@@ -255,6 +300,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
         */
        e1000_gate_hw_phy_config_ich8lan(hw, true);
 
+       /* It is not possible to be certain of the current state of ULP
+        * so forcibly disable it.
+        */
+       hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
+       e1000_disable_ulp_lpt_lp(hw, true);
+
        ret_val = hw->phy.ops.acquire(hw);
        if (ret_val) {
                e_dbg("Failed to initialize PHY flow\n");
@@ -300,33 +351,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
                        break;
                }
 
-               e_dbg("Toggling LANPHYPC\n");
-
-               /* Set Phy Config Counter to 50msec */
-               mac_reg = er32(FEXTNVM3);
-               mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
-               mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
-               ew32(FEXTNVM3, mac_reg);
-
                /* Toggle LANPHYPC Value bit */
-               mac_reg = er32(CTRL);
-               mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
-               mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
-               ew32(CTRL, mac_reg);
-               e1e_flush();
-               usleep_range(10, 20);
-               mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
-               ew32(CTRL, mac_reg);
-               e1e_flush();
-               if (hw->mac.type < e1000_pch_lpt) {
-                       msleep(50);
-               } else {
-                       u16 count = 20;
-                       do {
-                               usleep_range(5000, 10000);
-                       } while (!(er32(CTRL_EXT) &
-                                  E1000_CTRL_EXT_LPCD) && count--);
-                       usleep_range(30000, 60000);
+               e1000_toggle_lanphypc_pch_lpt(hw);
+               if (hw->mac.type >= e1000_pch_lpt) {
                        if (e1000_phy_is_accessible_pchlan(hw))
                                break;
 
@@ -349,12 +376,31 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
 
        hw->phy.ops.release(hw);
        if (!ret_val) {
+
+               /* Check to see if able to reset PHY.  Print error if not */
+               if (hw->phy.ops.check_reset_block(hw)) {
+                       e_err("Reset blocked by ME\n");
+                       goto out;
+               }
+
                /* Reset the PHY before any access to it.  Doing so, ensures
                 * that the PHY is in a known good state before we read/write
                 * PHY registers.  The generic reset is sufficient here,
                 * because we haven't determined the PHY type yet.
                 */
                ret_val = e1000e_phy_hw_reset_generic(hw);
+               if (ret_val)
+                       goto out;
+
+               /* On a successful reset, possibly need to wait for the PHY
+                * to quiesce to an accessible state before returning control
+                * to the calling function.  If the PHY does not quiesce, then
+                * return E1000E_BLK_PHY_RESET, as this is the condition that
+                *  the PHY is in.
+                */
+               ret_val = hw->phy.ops.check_reset_block(hw);
+               if (ret_val)
+                       e_err("ME blocked access to PHY after reset\n");
        }
 
 out:
@@ -724,8 +770,14 @@ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
  *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
  *  the link and the EEE capabilities of the link partner.  The LPI Control
  *  register bits will remain set only if/when link is up.
+ *
+ *  EEE LPI must not be asserted earlier than one second after link is up.
+ *  On 82579, EEE LPI should not be enabled until such time otherwise there
+ *  can be link issues with some switches.  Other devices can have EEE LPI
+ *  enabled immediately upon link up since they have a timer in hardware which
+ *  prevents LPI from being asserted too early.
  **/
-static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
 {
        struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
        s32 ret_val;
@@ -978,6 +1030,253 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
        return 0;
 }
 
+/**
+ *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
+ *  @hw: pointer to the HW structure
+ *  @to_sx: boolean indicating a system power state transition to Sx
+ *
+ *  When link is down, configure ULP mode to significantly reduce the power
+ *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
+ *  ME firmware to start the ULP configuration.  If not on an ME enabled
+ *  system, configure the ULP mode by software.
+ */
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
+{
+       u32 mac_reg;
+       s32 ret_val = 0;
+       u16 phy_reg;
+
+       if ((hw->mac.type < e1000_pch_lpt) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+           (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
+               return 0;
+
+       if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+               /* Request ME configure ULP mode in the PHY */
+               mac_reg = er32(H2ME);
+               mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
+               ew32(H2ME, mac_reg);
+
+               goto out;
+       }
+
+       if (!to_sx) {
+               int i = 0;
+
+               /* Poll up to 5 seconds for Cable Disconnected indication */
+               while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
+                       /* Bail if link is re-acquired */
+                       if (er32(STATUS) & E1000_STATUS_LU)
+                               return -E1000_ERR_PHY;
+
+                       if (i++ == 100)
+                               break;
+
+                       msleep(50);
+               }
+               e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
+                     (er32(FEXT) &
+                      E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       /* Force SMBus mode in PHY */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
+       e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+       /* Force SMBus mode in MAC */
+       mac_reg = er32(CTRL_EXT);
+       mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+       ew32(CTRL_EXT, mac_reg);
+
+       /* Set Inband ULP Exit, Reset to SMBus mode and
+        * Disable SMBus Release on PERST# in PHY
+        */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
+                   I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+       if (to_sx) {
+               if (er32(WUFC) & E1000_WUFC_LNKC)
+                       phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
+
+               phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
+       } else {
+               phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
+       }
+       e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+       /* Set Disable SMBus Release on PERST# in MAC */
+       mac_reg = er32(FEXTNVM7);
+       mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
+       ew32(FEXTNVM7, mac_reg);
+
+       /* Commit ULP changes in PHY by starting auto ULP configuration */
+       phy_reg |= I218_ULP_CONFIG1_START;
+       e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+release:
+       hw->phy.ops.release(hw);
+out:
+       if (ret_val)
+               e_dbg("Error in ULP enable flow: %d\n", ret_val);
+       else
+               hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
+ *  @hw: pointer to the HW structure
+ *  @force: boolean indicating whether or not to force disabling ULP
+ *
+ *  Un-configure ULP mode when link is up, the system is transitioned from
+ *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
+ *  system, poll for an indication from ME that ULP has been un-configured.
+ *  If not on an ME enabled system, un-configure the ULP mode by software.
+ *
+ *  During nominal operation, this function is called when link is acquired
+ *  to disable ULP mode (force=false); otherwise, for example when unloading
+ *  the driver or during Sx->S0 transitions, this is called with force=true
+ *  to forcibly disable ULP.
+ */
+static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
+{
+       s32 ret_val = 0;
+       u32 mac_reg;
+       u16 phy_reg;
+       int i = 0;
+
+       if ((hw->mac.type < e1000_pch_lpt) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
+           (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
+           (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
+               return 0;
+
+       if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+               if (force) {
+                       /* Request ME un-configure ULP mode in the PHY */
+                       mac_reg = er32(H2ME);
+                       mac_reg &= ~E1000_H2ME_ULP;
+                       mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
+                       ew32(H2ME, mac_reg);
+               }
+
+               /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+               while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
+                       if (i++ == 10) {
+                               ret_val = -E1000_ERR_PHY;
+                               goto out;
+                       }
+
+                       usleep_range(10000, 20000);
+               }
+               e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+
+               if (force) {
+                       mac_reg = er32(H2ME);
+                       mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
+                       ew32(H2ME, mac_reg);
+               } else {
+                       /* Clear H2ME.ULP after ME ULP configuration */
+                       mac_reg = er32(H2ME);
+                       mac_reg &= ~E1000_H2ME_ULP;
+                       ew32(H2ME, mac_reg);
+               }
+
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       if (force)
+               /* Toggle LANPHYPC Value bit */
+               e1000_toggle_lanphypc_pch_lpt(hw);
+
+       /* Unforce SMBus mode in PHY */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
+       if (ret_val) {
+               /* The MAC might be in PCIe mode, so temporarily force to
+                * SMBus mode in order to access the PHY.
+                */
+               mac_reg = er32(CTRL_EXT);
+               mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
+               ew32(CTRL_EXT, mac_reg);
+
+               msleep(50);
+
+               ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
+                                                      &phy_reg);
+               if (ret_val)
+                       goto release;
+       }
+       phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+       e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
+
+       /* Unforce SMBus mode in MAC */
+       mac_reg = er32(CTRL_EXT);
+       mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+       ew32(CTRL_EXT, mac_reg);
+
+       /* When ULP mode was previously entered, K1 was disabled by the
+        * hardware.  Re-Enable K1 in the PHY when exiting ULP.
+        */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg |= HV_PM_CTRL_K1_ENABLE;
+       e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
+
+       /* Clear ULP enabled configuration */
+       ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
+       if (ret_val)
+               goto release;
+       phy_reg &= ~(I218_ULP_CONFIG1_IND |
+                    I218_ULP_CONFIG1_STICKY_ULP |
+                    I218_ULP_CONFIG1_RESET_TO_SMBUS |
+                    I218_ULP_CONFIG1_WOL_HOST |
+                    I218_ULP_CONFIG1_INBAND_EXIT |
+                    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
+       e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+       /* Commit ULP changes by starting auto ULP configuration */
+       phy_reg |= I218_ULP_CONFIG1_START;
+       e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
+
+       /* Clear Disable SMBus Release on PERST# in MAC */
+       mac_reg = er32(FEXTNVM7);
+       mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
+       ew32(FEXTNVM7, mac_reg);
+
+release:
+       hw->phy.ops.release(hw);
+       if (force) {
+               e1000_phy_hw_reset(hw);
+               msleep(50);
+       }
+out:
+       if (ret_val)
+               e_dbg("Error in ULP disable flow: %d\n", ret_val);
+       else
+               hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
+
+       return ret_val;
+}
+
 /**
  *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
  *  @hw: pointer to the HW structure
@@ -1106,9 +1405,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
        e1000e_check_downshift(hw);
 
        /* Enable/Disable EEE after link up */
-       ret_val = e1000_set_eee_pchlan(hw);
-       if (ret_val)
-               return ret_val;
+       if (hw->phy.type > e1000_phy_82579) {
+               ret_val = e1000_set_eee_pchlan(hw);
+               if (ret_val)
+                       return ret_val;
+       }
 
        /* If we are forcing speed/duplex, then we simply return since
         * we have already determined whether we have link or not.
@@ -1374,7 +1675,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
        /* RAR[1-6] are owned by manageability.  Skip those and program the
         * next address into the SHRA register array.
         */
-       if (index < (u32)(hw->mac.rar_entry_count - 6)) {
+       if (index < (u32)(hw->mac.rar_entry_count)) {
                s32 ret_val;
 
                ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1484,11 +1785,13 @@ out:
  **/
 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
 {
-       u32 fwsm;
+       bool blocked = false;
+       int i = 0;
 
-       fwsm = er32(FWSM);
-
-       return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
+       while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
+              (i++ < 10))
+               usleep_range(10000, 20000);
+       return blocked ? E1000_BLK_PHY_RESET : 0;
 }
 
 /**
index 217090df33e788d46603f1c0369a2cf4e8041719..bead50f9187b527291596da67351339b64482707 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_ICH8LAN_H_
 #define _E1000E_ICH8LAN_H_
 
 #define E1000_FWSM_WLOCK_MAC_MASK      0x0380
 #define E1000_FWSM_WLOCK_MAC_SHIFT     7
+#define E1000_FWSM_ULP_CFG_DONE                0x00000400      /* Low power cfg done */
 
 /* Shared Receive Address Registers */
 #define E1000_SHRAL_PCH_LPT(_i)                (0x05408 + ((_i) * 8))
 #define E1000_SHRAH_PCH_LPT(_i)                (0x0540C + ((_i) * 8))
 
+#define E1000_H2ME             0x05B50 /* Host to ME */
+#define E1000_H2ME_ULP         0x00000800      /* ULP Indication Bit */
+#define E1000_H2ME_ENFORCE_SETTINGS    0x00001000      /* Enforce Settings */
+
 #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
                                 (ID_LED_OFF1_OFF2 <<  8) | \
                                 (ID_LED_OFF1_ON2  <<  4) | \
@@ -82,6 +80,9 @@
 
 #define E1000_ICH8_LAN_INIT_TIMEOUT    1500
 
+/* FEXT register bit definition */
+#define E1000_FEXT_PHY_CABLE_DISCONNECTED      0x00000004
+
 #define E1000_FEXTNVM_SW_CONFIG                1
 #define E1000_FEXTNVM_SW_CONFIG_ICH8M  (1 << 27)       /* different on ICH8M */
 
 #define E1000_FEXTNVM6_REQ_PLL_CLK     0x00000100
 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION       0x00000200
 
+#define E1000_FEXTNVM7_DISABLE_SMB_PERST       0x00000020
+
 #define PCIE_ICH8_SNOOP_ALL    PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES  7
-#define E1000_PCH2_RAR_ENTRIES 11      /* RAR[0-6], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES 5       /* RAR[0], SHRA[0-3] */
 #define E1000_PCH_LPT_RAR_ENTRIES      12      /* RAR[0], SHRA[0-10] */
 
 #define PHY_PAGE_SHIFT         5
 #define CV_SMB_CTRL            PHY_REG(769, 23)
 #define CV_SMB_CTRL_FORCE_SMBUS        0x0001
 
+/* I218 Ultra Low Power Configuration 1 Register */
+#define I218_ULP_CONFIG1               PHY_REG(779, 16)
+#define I218_ULP_CONFIG1_START         0x0001  /* Start auto ULP config */
+#define I218_ULP_CONFIG1_IND           0x0004  /* Pwr up from ULP indication */
+#define I218_ULP_CONFIG1_STICKY_ULP    0x0010  /* Set sticky ULP mode */
+#define I218_ULP_CONFIG1_INBAND_EXIT   0x0020  /* Inband on ULP exit */
+#define I218_ULP_CONFIG1_WOL_HOST      0x0040  /* WoL Host on ULP exit */
+#define I218_ULP_CONFIG1_RESET_TO_SMBUS        0x0100  /* Reset to SMBus mode */
+#define I218_ULP_CONFIG1_DISABLE_SMB_PERST     0x1000  /* Disable on PERST# */
+
 /* SMBus Address Phy Register */
 #define HV_SMB_ADDR            PHY_REG(768, 26)
 #define HV_SMB_ADDR_MASK       0x007F
 /* PHY Power Management Control */
 #define HV_PM_CTRL             PHY_REG(770, 17)
 #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_ENABLE           0x4000
 
 #define SW_FLAG_TIMEOUT                1000    /* SW Semaphore flag timeout in ms */
 
@@ -268,4 +282,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
+s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
+s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
 #endif /* _E1000E_ICH8LAN_H_ */
index 2480c1091873864991d9342b6ce15df61b07c000..baa0a466d1d05ca533999b9ffcac3846a476ab5f 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "e1000.h"
 
index a61fee404ebeddf31b8ad636582fb740552e2c7c..4e81c2825b7a1ca60b9a870ce61737e29d4e7972 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_MAC_H_
 #define _E1000E_MAC_H_
index e4b0f1ef92f6fdcc95b193910c7a7947e0063dbd..cb37ff1f1321991c34f17b80d820e9d923d0d3aa 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "e1000.h"
 
index 326897c29ea81e5f319e2cad932f6870a8e69fe9..a8c27f98f7b05e15d9548ba78df62c1c351ac91d 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_MANAGE_H_
 #define _E1000E_MANAGE_H_
index 6d91933c4cdd3873990ba36100dd5ae7f21a7782..f1cce5928e201be7086fcfc6d3269a33960c5f2f 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
@@ -885,7 +878,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
                                 struct sk_buff *skb)
 {
        if (netdev->features & NETIF_F_RXHASH)
-               skb->rxhash = le32_to_cpu(rss);
+               skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
 }
 
 /**
@@ -1097,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work)
                adapter->tx_hang_recheck = true;
                return;
        }
-       /* Real hang detected */
        adapter->tx_hang_recheck = false;
+
+       if (er32(TDH(0)) == er32(TDT(0))) {
+               e_dbg("false hang detected, ignoring\n");
+               return;
+       }
+
+       /* Real hang detected */
        netif_stop_queue(netdev);
 
        e1e_rphy(hw, MII_BMSR, &phy_status);
@@ -1128,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
              eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
              phy_status, phy_1000t_status, phy_ext_status, pci_status);
 
+       e1000e_dump(adapter);
+
        /* Suggest workaround for known h/w issue */
        if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
                e_err("Try turning off Tx pause (flow control) via ethtool\n");
@@ -1147,9 +1148,6 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
                                                     tx_hwtstamp_work);
        struct e1000_hw *hw = &adapter->hw;
 
-       if (!adapter->tx_hwtstamp_skb)
-               return;
-
        if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
                struct skb_shared_hwtstamps shhwtstamps;
                u64 txstmp;
@@ -1162,6 +1160,12 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
                skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
                dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
                adapter->tx_hwtstamp_skb = NULL;
+       } else if (time_after(jiffies, adapter->tx_hwtstamp_start
+                             + adapter->tx_timeout_factor * HZ)) {
+               dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+               adapter->tx_hwtstamp_skb = NULL;
+               adapter->tx_hwtstamp_timeouts++;
+               e_warn("clearing Tx timestamp hang");
        } else {
                /* reschedule to check later */
                schedule_work(&adapter->tx_hwtstamp_work);
@@ -1701,7 +1705,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
        adapter->flags2 &= ~FLAG2_IS_DISCARDING;
 
        writel(0, rx_ring->head);
-       if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
                e1000e_update_rdt_wa(rx_ring, 0);
        else
                writel(0, rx_ring->tail);
@@ -2038,13 +2042,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                                                               msix_entry),
                                                        GFP_KERNEL);
                        if (adapter->msix_entries) {
+                               struct e1000_adapter *a = adapter;
+
                                for (i = 0; i < adapter->num_vectors; i++)
                                        adapter->msix_entries[i].entry = i;
 
-                               err = pci_enable_msix(adapter->pdev,
-                                                     adapter->msix_entries,
-                                                     adapter->num_vectors);
-                               if (err == 0)
+                               err = pci_enable_msix_range(a->pdev,
+                                                           a->msix_entries,
+                                                           a->num_vectors,
+                                                           a->num_vectors);
+                               if (err > 0)
                                        return;
                        }
                        /* MSI-X failed, so fall through and try MSI */
@@ -2402,7 +2409,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
        tx_ring->next_to_clean = 0;
 
        writel(0, tx_ring->head);
-       if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+       if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
                e1000e_update_tdt_wa(tx_ring, 0);
        else
                writel(0, tx_ring->tail);
@@ -2894,7 +2901,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        struct e1000_hw *hw = &adapter->hw;
        struct e1000_ring *tx_ring = adapter->tx_ring;
        u64 tdba;
-       u32 tdlen, tarc;
+       u32 tdlen, tctl, tarc;
 
        /* Setup the HW Tx Head and Tail descriptor pointers */
        tdba = tx_ring->dma;
@@ -2931,6 +2938,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        /* erratum work around: set txdctl the same for both queues */
        ew32(TXDCTL(1), er32(TXDCTL(0)));
 
+       /* Program the Transmit Control Register */
+       tctl = er32(TCTL);
+       tctl &= ~E1000_TCTL_CT;
+       tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+               (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
        if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
                tarc = er32(TARC(0));
                /* set the speed mode bit, we'll clear it if we're not at
@@ -2961,6 +2974,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
        /* enable Report Status bit */
        adapter->txd_cmd |= E1000_TXD_CMD_RS;
 
+       ew32(TCTL, tctl);
+
        hw->mac.ops.config_collision_dist(hw);
 }
 
@@ -3331,6 +3346,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
        struct e1000_hw *hw = &adapter->hw;
        u32 rctl;
 
+       if (pm_runtime_suspended(netdev->dev.parent))
+               return;
+
        /* Check for Promiscuous and All Multicast modes */
        rctl = er32(RCTL);
 
@@ -3691,10 +3709,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
  */
 static void e1000_power_down_phy(struct e1000_adapter *adapter)
 {
-       /* WoL is enabled */
-       if (adapter->wol)
-               return;
-
        if (adapter->hw.phy.ops.power_down)
                adapter->hw.phy.ops.power_down(&adapter->hw);
 }
@@ -3911,10 +3925,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
        }
 
        if (!netif_running(adapter->netdev) &&
-           !test_bit(__E1000_TESTING, &adapter->state)) {
+           !test_bit(__E1000_TESTING, &adapter->state))
                e1000_power_down_phy(adapter);
-               return;
-       }
 
        e1000_get_phy_info(hw);
 
@@ -3981,7 +3993,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
 
 static void e1000e_update_stats(struct e1000_adapter *adapter);
 
-void e1000e_down(struct e1000_adapter *adapter)
+/**
+ * e1000e_down - quiesce the device and optionally reset the hardware
+ * @adapter: board private structure
+ * @reset: boolean flag to reset the hardware or not
+ */
+void e1000e_down(struct e1000_adapter *adapter, bool reset)
 {
        struct net_device *netdev = adapter->netdev;
        struct e1000_hw *hw = &adapter->hw;
@@ -4035,12 +4052,8 @@ void e1000e_down(struct e1000_adapter *adapter)
            e1000_lv_jumbo_workaround_ich8lan(hw, false))
                e_dbg("failed to disable jumbo frame workaround mode\n");
 
-       if (!pci_channel_offline(adapter->pdev))
+       if (reset && !pci_channel_offline(adapter->pdev))
                e1000e_reset(adapter);
-
-       /* TODO: for power management, we could drop the link and
-        * pci_disable_device here.
-        */
 }
 
 void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4048,7 +4061,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
        might_sleep();
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
-       e1000e_down(adapter);
+       e1000e_down(adapter, true);
        e1000e_up(adapter);
        clear_bit(__E1000_RESETTING, &adapter->state);
 }
@@ -4326,7 +4339,6 @@ static int e1000_open(struct net_device *netdev)
        adapter->tx_hang_recheck = false;
        netif_start_queue(netdev);
 
-       adapter->idle_check = true;
        hw->mac.get_link_status = true;
        pm_runtime_put(&pdev->dev);
 
@@ -4376,14 +4388,15 @@ static int e1000_close(struct net_device *netdev)
        pm_runtime_get_sync(&pdev->dev);
 
        if (!test_bit(__E1000_DOWN, &adapter->state)) {
-               e1000e_down(adapter);
+               e1000e_down(adapter, true);
                e1000_free_irq(adapter);
+
+               /* Link status message must follow this format */
+               pr_info("%s NIC Link is Down\n", adapter->netdev->name);
        }
 
        napi_disable(&adapter->napi);
 
-       e1000_power_down_phy(adapter);
-
        e1000e_free_tx_resources(adapter->tx_ring);
        e1000e_free_rx_resources(adapter->rx_ring);
 
@@ -4460,11 +4473,16 @@ static void e1000e_update_phy_task(struct work_struct *work)
        struct e1000_adapter *adapter = container_of(work,
                                                     struct e1000_adapter,
                                                     update_phy_task);
+       struct e1000_hw *hw = &adapter->hw;
 
        if (test_bit(__E1000_DOWN, &adapter->state))
                return;
 
-       e1000_get_phy_info(&adapter->hw);
+       e1000_get_phy_info(hw);
+
+       /* Enable EEE on 82579 after link up */
+       if (hw->phy.type == e1000_phy_82579)
+               e1000_set_eee_pchlan(hw);
 }
 
 /**
@@ -4799,6 +4817,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
 
        if (adapter->phy_hang_count > 1) {
                adapter->phy_hang_count = 0;
+               e_dbg("PHY appears hung - resetting\n");
                schedule_work(&adapter->reset_task);
        }
 }
@@ -4957,15 +4976,11 @@ static void e1000_watchdog_task(struct work_struct *work)
                                mod_timer(&adapter->phy_info_timer,
                                          round_jiffies(jiffies + 2 * HZ));
 
-                       /* The link is lost so the controller stops DMA.
-                        * If there is queued Tx work that cannot be done
-                        * or if on an 8000ES2LAN which requires a Rx packet
-                        * buffer work-around on link down event, reset the
-                        * controller to flush the Tx/Rx packet buffers.
-                        * (Do the reset outside of interrupt context).
+                       /* 8000ES2LAN requires a Rx packet buffer work-around
+                        * on link down event; reset the controller to flush
+                        * the Rx packet buffer.
                         */
-                       if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
-                           (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+                       if (adapter->flags & FLAG_RX_NEEDS_RESTART)
                                adapter->flags |= FLAG_RESTART_NOW;
                        else
                                pm_schedule_suspend(netdev->dev.parent,
@@ -4988,6 +5003,15 @@ link_up:
        adapter->gotc_old = adapter->stats.gotc;
        spin_unlock(&adapter->stats64_lock);
 
+       /* If the link is lost the controller stops DMA, but
+        * if there is queued Tx work it cannot be done.  So
+        * reset the controller to flush the Tx packet buffers.
+        */
+       if (!netif_carrier_ok(netdev) &&
+           (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+               adapter->flags |= FLAG_RESTART_NOW;
+
+       /* If reset is necessary, do it outside of interrupt context. */
        if (adapter->flags & FLAG_RESTART_NOW) {
                schedule_work(&adapter->reset_task);
                /* return immediately since reset is imminent */
@@ -5546,6 +5570,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
                        adapter->tx_hwtstamp_skb = skb_get(skb);
+                       adapter->tx_hwtstamp_start = jiffies;
                        schedule_work(&adapter->tx_hwtstamp_work);
                } else {
                        skb_tx_timestamp(skb);
@@ -5684,8 +5709,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        adapter->max_frame_size = max_frame;
        e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
+
+       pm_runtime_get_sync(netdev->dev.parent);
+
        if (netif_running(netdev))
-               e1000e_down(adapter);
+               e1000e_down(adapter, true);
 
        /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
         * means we reserve 2 more, this pushes us to allocate from the next
@@ -5711,6 +5739,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
        else
                e1000e_reset(adapter);
 
+       pm_runtime_put_sync(netdev->dev.parent);
+
        clear_bit(__E1000_RESETTING, &adapter->state);
 
        return 0;
@@ -5852,7 +5882,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 i, mac_reg;
+       u32 i, mac_reg, wuc;
        u16 phy_reg, wuc_enable;
        int retval;
 
@@ -5899,13 +5929,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
                phy_reg |= BM_RCTL_RFCE;
        hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
 
+       wuc = E1000_WUC_PME_EN;
+       if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
+               wuc |= E1000_WUC_APME;
+
        /* enable PHY wakeup in MAC register */
        ew32(WUFC, wufc);
-       ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+       ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
+                  E1000_WUC_PME_STATUS | wuc));
 
        /* configure and enable PHY wakeup in PHY registers */
        hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
-       hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+       hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
 
        /* activate PHY wakeup */
        wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
@@ -5918,15 +5953,10 @@ release:
        return retval;
 }
 
-static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+static int e1000e_pm_freeze(struct device *dev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
-       u32 ctrl, ctrl_ext, rctl, status;
-       /* Runtime suspend should only enable wakeup for link changes */
-       u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-       int retval = 0;
 
        netif_device_detach(netdev);
 
@@ -5937,11 +5967,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
                        usleep_range(10000, 20000);
 
                WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
-               e1000e_down(adapter);
+
+               /* Quiesce the device without resetting the hardware */
+               e1000e_down(adapter, false);
                e1000_free_irq(adapter);
        }
        e1000e_reset_interrupt_capability(adapter);
 
+       /* Allow time for pending master requests to run */
+       e1000e_disable_pcie_master(&adapter->hw);
+
+       return 0;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       u32 ctrl, ctrl_ext, rctl, status;
+       /* Runtime suspend should only enable wakeup for link changes */
+       u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+       int retval = 0;
+
        status = er32(STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -5972,12 +6020,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
                        ew32(CTRL_EXT, ctrl_ext);
                }
 
+               if (!runtime)
+                       e1000e_power_up_phy(adapter);
+
                if (adapter->flags & FLAG_IS_ICH)
                        e1000_suspend_workarounds_ich8lan(&adapter->hw);
 
-               /* Allow time for pending master requests to run */
-               e1000e_disable_pcie_master(&adapter->hw);
-
                if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
                        /* enable wakeup by the PHY */
                        retval = e1000_init_phy_wakeup(adapter, wufc);
@@ -5991,10 +6039,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
        } else {
                ew32(WUC, 0);
                ew32(WUFC, 0);
+
+               e1000_power_down_phy(adapter);
        }
 
-       if (adapter->hw.phy.type == e1000_phy_igp_3)
+       if (adapter->hw.phy.type == e1000_phy_igp_3) {
                e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+       } else if (hw->mac.type == e1000_pch_lpt) {
+               if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+                       /* ULP does not support wake from unicast, multicast
+                        * or broadcast.
+                        */
+                       retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
+
+               if (retval)
+                       return retval;
+       }
+
 
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
@@ -6102,18 +6163,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
 }
 
 #ifdef CONFIG_PM
-static bool e1000e_pm_ready(struct e1000_adapter *adapter)
-{
-       return !!adapter->tx_ring->buffer_info;
-}
-
 static int __e1000_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u16 aspm_disable_flag = 0;
-       u32 err;
 
        if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
                aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6124,13 +6179,6 @@ static int __e1000_resume(struct pci_dev *pdev)
 
        pci_set_master(pdev);
 
-       e1000e_set_interrupt_capability(adapter);
-       if (netif_running(netdev)) {
-               err = e1000_request_irq(adapter);
-               if (err)
-                       return err;
-       }
-
        if (hw->mac.type >= e1000_pch2lan)
                e1000_resume_workarounds_pchlan(&adapter->hw);
 
@@ -6169,11 +6217,6 @@ static int __e1000_resume(struct pci_dev *pdev)
 
        e1000_init_manageability_pt(adapter);
 
-       if (netif_running(netdev))
-               e1000e_up(adapter);
-
-       netif_device_attach(netdev);
-
        /* If the controller has AMT, do not set DRV_LOAD until the interface
         * is up.  For all other cases, let the f/w know that the h/w is now
         * under the control of the driver.
@@ -6184,75 +6227,111 @@ static int __e1000_resume(struct pci_dev *pdev)
        return 0;
 }
 
+static int e1000e_pm_thaw(struct device *dev)
+{
+       struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+
+       e1000e_set_interrupt_capability(adapter);
+       if (netif_running(netdev)) {
+               u32 err = e1000_request_irq(adapter);
+
+               if (err)
+                       return err;
+
+               e1000e_up(adapter);
+       }
+
+       netif_device_attach(netdev);
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
-static int e1000_suspend(struct device *dev)
+static int e1000e_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
 
+       e1000e_pm_freeze(dev);
+
        return __e1000_shutdown(pdev, false);
 }
 
-static int e1000_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct e1000_adapter *adapter = netdev_priv(netdev);
+       int rc;
 
-       if (e1000e_pm_ready(adapter))
-               adapter->idle_check = true;
+       rc = __e1000_resume(pdev);
+       if (rc)
+               return rc;
 
-       return __e1000_resume(pdev);
+       return e1000e_pm_thaw(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_PM_RUNTIME
-static int e1000_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_idle(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       if (!e1000e_pm_ready(adapter))
-               return 0;
+       if (!e1000e_has_link(adapter))
+               pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
 
-       return __e1000_shutdown(pdev, true);
+       return -EBUSY;
 }
 
-static int e1000_idle(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       int rc;
 
-       if (!e1000e_pm_ready(adapter))
-               return 0;
+       rc = __e1000_resume(pdev);
+       if (rc)
+               return rc;
 
-       if (adapter->idle_check) {
-               adapter->idle_check = false;
-               if (!e1000e_has_link(adapter))
-                       pm_schedule_suspend(dev, MSEC_PER_SEC);
-       }
+       if (netdev->flags & IFF_UP)
+               rc = e1000e_up(adapter);
 
-       return -EBUSY;
+       return rc;
 }
 
-static int e1000_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       if (!e1000e_pm_ready(adapter))
-               return 0;
+       if (netdev->flags & IFF_UP) {
+               int count = E1000_CHECK_RESET_COUNT;
 
-       adapter->idle_check = !dev->power.runtime_auto;
-       return __e1000_resume(pdev);
+               while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
+                       usleep_range(10000, 20000);
+
+               WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+               /* Down the device without resetting the hardware */
+               e1000e_down(adapter, false);
+       }
+
+       if (__e1000_shutdown(pdev, true)) {
+               e1000e_pm_runtime_resume(dev);
+               return -EBUSY;
+       }
+
+       return 0;
 }
 #endif /* CONFIG_PM_RUNTIME */
 #endif /* CONFIG_PM */
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
+       e1000e_pm_freeze(&pdev->dev);
+
        __e1000_shutdown(pdev, false);
 }
 
@@ -6338,7 +6417,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
                return PCI_ERS_RESULT_DISCONNECT;
 
        if (netif_running(netdev))
-               e1000e_down(adapter);
+               e1000e_down(adapter, true);
        pci_disable_device(pdev);
 
        /* Request a slot slot reset. */
@@ -6350,7 +6429,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
  * @pdev: Pointer to PCI device
  *
  * Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the e1000_resume routine.
+ * resembles the first-half of the e1000e_pm_resume routine.
  */
 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
 {
@@ -6397,7 +6476,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
  *
  * This callback is called when the error recovery driver tells us that
  * its OK to resume normal operation. Implementation resembles the
- * second-half of the e1000_resume routine.
+ * second-half of the e1000e_pm_resume routine.
  */
 static void e1000_io_resume(struct pci_dev *pdev)
 {
@@ -6902,9 +6981,6 @@ static void e1000_remove(struct pci_dev *pdev)
                }
        }
 
-       if (!(netdev->flags & IFF_UP))
-               e1000_power_down_phy(adapter);
-
        /* Don't lie to e1000_close() down the road. */
        if (!down)
                clear_bit(__E1000_DOWN, &adapter->state);
@@ -7026,9 +7102,16 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
 
 static const struct dev_pm_ops e1000_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
-       SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
-                          e1000_idle)
+#ifdef CONFIG_PM_SLEEP
+       .suspend        = e1000e_pm_suspend,
+       .resume         = e1000e_pm_resume,
+       .freeze         = e1000e_pm_freeze,
+       .thaw           = e1000e_pm_thaw,
+       .poweroff       = e1000e_pm_suspend,
+       .restore        = e1000e_pm_resume,
+#endif
+       SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+                          e1000e_pm_runtime_idle)
 };
 
 /* PCI Device API Driver */
@@ -7055,7 +7138,7 @@ static int __init e1000_init_module(void)
        int ret;
        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
                e1000e_driver_version);
-       pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
+       pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
        ret = pci_register_driver(&e1000_driver);
 
        return ret;
index d70a03906ac0a0cecfb3f078fa584e376eae061a..a9a976f04bffe957e22b0852bd8918c6d6f63bd0 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "e1000.h"
 
index 45fc69561627d87b9d59dcbfb3a74b34f48e175d..342bf69efab545efcd460eaeb3d50198a22af408 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_NVM_H_
 #define _E1000E_NVM_H_
index c16bd75b6caa3f85f4f510da7fb56bafcd0f2b40..d0ac0f3249c886415d308c4a0cd376feda3d44db 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include <linux/netdevice.h>
 #include <linux/module.h>
@@ -381,6 +374,12 @@ void e1000e_check_options(struct e1000_adapter *adapter)
                                 "%s set to dynamic mode\n", opt.name);
                        adapter->itr = 20000;
                        break;
+               case 2:
+                       dev_info(&adapter->pdev->dev,
+                                "%s Invalid mode - setting default\n",
+                                opt.name);
+                       adapter->itr_setting = opt.def;
+                       /* fall-through */
                case 3:
                        dev_info(&adapter->pdev->dev,
                                 "%s set to dynamic conservative mode\n",
index 20e71f4ca4261f99694b04eccd3647906326239d..00b3fc98bf309bf3d371a984f0da7245caf07013 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #include "e1000.h"
 
index f4f71b9991e3733bc35d7b17381e4cae04a28113..3841bccf058c7aa0fe3b2f90e2c70c38e9b6f209 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_PHY_H_
 #define _E1000E_PHY_H_
index 065f8c80d4f2d751e2cd2763f3bd6d1e73b6ab9b..fb1a914a3ad4dc98cf4da0cfc0ad7271a8a12488 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 /* PTP 1588 Hardware Clock (PHC)
  * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
@@ -47,6 +40,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
                                                     ptp_clock_info);
        struct e1000_hw *hw = &adapter->hw;
        bool neg_adj = false;
+       unsigned long flags;
        u64 adjustment;
        u32 timinca, incvalue;
        s32 ret_val;
@@ -64,6 +58,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
        if (ret_val)
                return ret_val;
 
+       spin_lock_irqsave(&adapter->systim_lock, flags);
+
        incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
 
        adjustment = incvalue;
@@ -77,6 +73,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 
        ew32(TIMINCA, timinca);
 
+       spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
        return 0;
 }
 
@@ -191,6 +189,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
        .n_alarm        = 0,
        .n_ext_ts       = 0,
        .n_per_out      = 0,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = e1000e_phc_adjfreq,
        .adjtime        = e1000e_phc_adjtime,
index a7e6a3e37257b34f200ba01c3525fd3269964943..ea235bbe50d3c3d32361f505cd98ddfcec5d9744 100644 (file)
@@ -1,30 +1,23 @@
-/*******************************************************************************
-
-  Intel PRO/1000 Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
-
-  This program is free software; you can redistribute it and/or modify it
-  under the terms and conditions of the GNU General Public License,
-  version 2, as published by the Free Software Foundation.
-
-  This program is distributed in the hope it will be useful, but WITHOUT
-  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  more details.
-
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
-  The full GNU General Public License is included in this distribution in
-  the file called "COPYING".
-
-  Contact Information:
-  Linux NICS <linux.nics@intel.com>
-  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
-  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel PRO/1000 Linux driver
+ * Copyright(c) 1999 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * Linux NICS <linux.nics@intel.com>
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
 
 #ifndef _E1000E_REGS_H_
 #define _E1000E_REGS_H_
@@ -39,6 +32,7 @@
 #define E1000_SCTL     0x00024 /* SerDes Control - RW */
 #define E1000_FCAL     0x00028 /* Flow Control Address Low - RW */
 #define E1000_FCAH     0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXT     0x0002C /* Future Extended - RW */
 #define E1000_FEXTNVM  0x00028 /* Future Extended NVM - RW */
 #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
 #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
index 72dae4d97b43e37690fe74818b818d3a9a1e14dc..beb7b4393a6c26fc46c917a798a6fd7bfaee28ea 100644 (file)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT  8
-#define I40E_NVM_VERSION_HI_MASK   (0xff << I40E_NVM_VERSION_HI_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT  12
+#define I40E_NVM_VERSION_HI_MASK   (0xf << I40E_NVM_VERSION_HI_SHIFT)
 
 /* The values in here are decimal coded as hex as is the case in the NVM map*/
 #define I40E_CURRENT_NVM_VERSION_HI 0x2
-#define I40E_CURRENT_NVM_VERSION_LO 0x30
+#define I40E_CURRENT_NVM_VERSION_LO 0x40
 
 /* magic for getting defines into strings */
 #define STRINGIFY(foo)  #foo
@@ -136,6 +136,7 @@ enum i40e_state_t {
        __I40E_EMP_RESET_REQUESTED,
        __I40E_FILTER_OVERFLOW_PROMISC,
        __I40E_SUSPENDED,
+       __I40E_BAD_EEPROM,
 };
 
 enum i40e_interrupt_policy {
@@ -152,8 +153,21 @@ struct i40e_lump_tracking {
 };
 
 #define I40E_DEFAULT_ATR_SAMPLE_RATE   20
-#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
-struct i40e_fdir_data {
+#define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
+#define I40E_FDIR_BUFFER_FULL_MARGIN   10
+#define I40E_FDIR_BUFFER_HEAD_ROOM     200
+
+struct i40e_fdir_filter {
+       struct hlist_node fdir_node;
+       /* filter ipnut set */
+       u8 flow_type;
+       u8 ip4_proto;
+       __be32 dst_ip[4];
+       __be32 src_ip[4];
+       __be16 src_port;
+       __be16 dst_port;
+       __be32 sctp_v_tag;
+       /* filter control */
        u16 q_index;
        u8  flex_off;
        u8  pctype;
@@ -162,7 +176,6 @@ struct i40e_fdir_data {
        u8  fd_status;
        u16 cnt_index;
        u32 fd_id;
-       u8  *raw_packet;
 };
 
 #define I40E_ETH_P_LLDP                        0x88cc
@@ -196,7 +209,7 @@ struct i40e_pf {
        bool fc_autoneg_status;
 
        u16 eeprom_version;
-       u16 num_vmdq_vsis;         /* num vmdq pools this pf has set up */
+       u16 num_vmdq_vsis;         /* num vmdq vsis this pf has set up */
        u16 num_vmdq_qps;          /* num queue pairs per vmdq pool */
        u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
        u16 num_req_vfs;           /* num vfs requested for this vf */
@@ -210,6 +223,9 @@ struct i40e_pf {
        u8 atr_sample_rate;
        bool wol_en;
 
+       struct hlist_head fdir_filter_list;
+       u16 fdir_pf_active_filters;
+
 #ifdef CONFIG_I40E_VXLAN
        __be16  vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
        u16 pending_vxlan_bitmap;
@@ -251,6 +267,9 @@ struct i40e_pf {
 #define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
 #endif
 
+       /* tracks features that get auto disabled by errors */
+       u64 auto_disable_flags;
+
        bool stat_offsets_loaded;
        struct i40e_hw_port_stats stats;
        struct i40e_hw_port_stats stats_offsets;
@@ -477,10 +496,10 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
                 "f%d.%d a%d.%d n%02x.%02x e%08x",
                 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
                 hw->aq.api_maj_ver, hw->aq.api_min_ver,
-                (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
-                                               >> I40E_NVM_VERSION_HI_SHIFT,
-                (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
-                                               >> I40E_NVM_VERSION_LO_SHIFT,
+                (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
+                       I40E_NVM_VERSION_HI_SHIFT,
+                (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
+                       I40E_NVM_VERSION_LO_SHIFT,
                 hw->nvm.eetrack);
 
        return buf;
@@ -534,9 +553,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
 int i40e_fetch_switch_configuration(struct i40e_pf *pf,
                                    bool printconfig);
 
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                             struct i40e_pf *pf, bool add);
-
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+                     struct i40e_fdir_filter *input, bool add);
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
+int i40e_get_current_fd_count(struct i40e_pf *pf);
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
 void i40e_set_ethtool_ops(struct net_device *netdev);
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        u8 *macaddr, s16 vlan,
@@ -575,6 +598,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+int i40e_vsi_open(struct i40e_vsi *vsi);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
index a50e6b3479ae9c779475681634535f1c8181f309..ed3902bf249b3e4ceb047ed14762d6ea50b1a4c0 100644 (file)
@@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
                        desc_cb = *desc;
                        cb_func(hw, &desc_cb);
                }
-               memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
-               memset((void *)details, 0,
-                      sizeof(struct i40e_asq_cmd_details));
+               memset(desc, 0, sizeof(*desc));
+               memset(details, 0, sizeof(*details));
                ntc++;
                if (ntc == asq->count)
                        ntc = 0;
index e7f38b57834d978a82c541df8bb26ffc3ce46937..bb948dd924743f2305491a4836897116bdbaac30 100644 (file)
@@ -162,6 +162,372 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
        return status;
 }
 
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ *      Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ *      Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ *      Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+       {       PTYPE, \
+               1, \
+               I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+               I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+               I40E_RX_PTYPE_##OUTER_FRAG, \
+               I40E_RX_PTYPE_TUNNEL_##T, \
+               I40E_RX_PTYPE_TUNNEL_END_##TE, \
+               I40E_RX_PTYPE_##TEF, \
+               I40E_RX_PTYPE_INNER_PROT_##I, \
+               I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+               { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF              I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG              I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS    I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+       /* L2 Packet types */
+       I40E_PTT_UNUSED_ENTRY(0),
+       I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
+       I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT_UNUSED_ENTRY(4),
+       I40E_PTT_UNUSED_ENTRY(5),
+       I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT_UNUSED_ENTRY(8),
+       I40E_PTT_UNUSED_ENTRY(9),
+       I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+       I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+       /* Non Tunneled IPv4 */
+       I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(25),
+       I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
+       I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+       I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+       /* IPv4 --> IPv4 */
+       I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(32),
+       I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> IPv6 */
+       I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(39),
+       I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT */
+       I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 --> GRE/NAT --> IPv4 */
+       I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(47),
+       I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> IPv6 */
+       I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(54),
+       I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> MAC */
+       I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+       I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(62),
+       I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+       I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(69),
+       I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> MAC/VLAN */
+       I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+       I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(77),
+       I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+       I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(84),
+       I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+       /* Non Tunneled IPv6 */
+       I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+       I40E_PTT_UNUSED_ENTRY(91),
+       I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+       I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+       I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+       /* IPv6 --> IPv4 */
+       I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(98),
+       I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> IPv6 */
+       I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(105),
+       I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT */
+       I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> IPv4 */
+       I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(113),
+       I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> IPv6 */
+       I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(120),
+       I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC */
+       I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+       I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(128),
+       I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+       I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(135),
+       I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN */
+       I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+       I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(143),
+       I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+       I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(150),
+       I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+       /* unused entries */
+       I40E_PTT_UNUSED_ENTRY(154),
+       I40E_PTT_UNUSED_ENTRY(155),
+       I40E_PTT_UNUSED_ENTRY(156),
+       I40E_PTT_UNUSED_ENTRY(157),
+       I40E_PTT_UNUSED_ENTRY(158),
+       I40E_PTT_UNUSED_ENTRY(159),
+
+       I40E_PTT_UNUSED_ENTRY(160),
+       I40E_PTT_UNUSED_ENTRY(161),
+       I40E_PTT_UNUSED_ENTRY(162),
+       I40E_PTT_UNUSED_ENTRY(163),
+       I40E_PTT_UNUSED_ENTRY(164),
+       I40E_PTT_UNUSED_ENTRY(165),
+       I40E_PTT_UNUSED_ENTRY(166),
+       I40E_PTT_UNUSED_ENTRY(167),
+       I40E_PTT_UNUSED_ENTRY(168),
+       I40E_PTT_UNUSED_ENTRY(169),
+
+       I40E_PTT_UNUSED_ENTRY(170),
+       I40E_PTT_UNUSED_ENTRY(171),
+       I40E_PTT_UNUSED_ENTRY(172),
+       I40E_PTT_UNUSED_ENTRY(173),
+       I40E_PTT_UNUSED_ENTRY(174),
+       I40E_PTT_UNUSED_ENTRY(175),
+       I40E_PTT_UNUSED_ENTRY(176),
+       I40E_PTT_UNUSED_ENTRY(177),
+       I40E_PTT_UNUSED_ENTRY(178),
+       I40E_PTT_UNUSED_ENTRY(179),
+
+       I40E_PTT_UNUSED_ENTRY(180),
+       I40E_PTT_UNUSED_ENTRY(181),
+       I40E_PTT_UNUSED_ENTRY(182),
+       I40E_PTT_UNUSED_ENTRY(183),
+       I40E_PTT_UNUSED_ENTRY(184),
+       I40E_PTT_UNUSED_ENTRY(185),
+       I40E_PTT_UNUSED_ENTRY(186),
+       I40E_PTT_UNUSED_ENTRY(187),
+       I40E_PTT_UNUSED_ENTRY(188),
+       I40E_PTT_UNUSED_ENTRY(189),
+
+       I40E_PTT_UNUSED_ENTRY(190),
+       I40E_PTT_UNUSED_ENTRY(191),
+       I40E_PTT_UNUSED_ENTRY(192),
+       I40E_PTT_UNUSED_ENTRY(193),
+       I40E_PTT_UNUSED_ENTRY(194),
+       I40E_PTT_UNUSED_ENTRY(195),
+       I40E_PTT_UNUSED_ENTRY(196),
+       I40E_PTT_UNUSED_ENTRY(197),
+       I40E_PTT_UNUSED_ENTRY(198),
+       I40E_PTT_UNUSED_ENTRY(199),
+
+       I40E_PTT_UNUSED_ENTRY(200),
+       I40E_PTT_UNUSED_ENTRY(201),
+       I40E_PTT_UNUSED_ENTRY(202),
+       I40E_PTT_UNUSED_ENTRY(203),
+       I40E_PTT_UNUSED_ENTRY(204),
+       I40E_PTT_UNUSED_ENTRY(205),
+       I40E_PTT_UNUSED_ENTRY(206),
+       I40E_PTT_UNUSED_ENTRY(207),
+       I40E_PTT_UNUSED_ENTRY(208),
+       I40E_PTT_UNUSED_ENTRY(209),
+
+       I40E_PTT_UNUSED_ENTRY(210),
+       I40E_PTT_UNUSED_ENTRY(211),
+       I40E_PTT_UNUSED_ENTRY(212),
+       I40E_PTT_UNUSED_ENTRY(213),
+       I40E_PTT_UNUSED_ENTRY(214),
+       I40E_PTT_UNUSED_ENTRY(215),
+       I40E_PTT_UNUSED_ENTRY(216),
+       I40E_PTT_UNUSED_ENTRY(217),
+       I40E_PTT_UNUSED_ENTRY(218),
+       I40E_PTT_UNUSED_ENTRY(219),
+
+       I40E_PTT_UNUSED_ENTRY(220),
+       I40E_PTT_UNUSED_ENTRY(221),
+       I40E_PTT_UNUSED_ENTRY(222),
+       I40E_PTT_UNUSED_ENTRY(223),
+       I40E_PTT_UNUSED_ENTRY(224),
+       I40E_PTT_UNUSED_ENTRY(225),
+       I40E_PTT_UNUSED_ENTRY(226),
+       I40E_PTT_UNUSED_ENTRY(227),
+       I40E_PTT_UNUSED_ENTRY(228),
+       I40E_PTT_UNUSED_ENTRY(229),
+
+       I40E_PTT_UNUSED_ENTRY(230),
+       I40E_PTT_UNUSED_ENTRY(231),
+       I40E_PTT_UNUSED_ENTRY(232),
+       I40E_PTT_UNUSED_ENTRY(233),
+       I40E_PTT_UNUSED_ENTRY(234),
+       I40E_PTT_UNUSED_ENTRY(235),
+       I40E_PTT_UNUSED_ENTRY(236),
+       I40E_PTT_UNUSED_ENTRY(237),
+       I40E_PTT_UNUSED_ENTRY(238),
+       I40E_PTT_UNUSED_ENTRY(239),
+
+       I40E_PTT_UNUSED_ENTRY(240),
+       I40E_PTT_UNUSED_ENTRY(241),
+       I40E_PTT_UNUSED_ENTRY(242),
+       I40E_PTT_UNUSED_ENTRY(243),
+       I40E_PTT_UNUSED_ENTRY(244),
+       I40E_PTT_UNUSED_ENTRY(245),
+       I40E_PTT_UNUSED_ENTRY(246),
+       I40E_PTT_UNUSED_ENTRY(247),
+       I40E_PTT_UNUSED_ENTRY(248),
+       I40E_PTT_UNUSED_ENTRY(249),
+
+       I40E_PTT_UNUSED_ENTRY(250),
+       I40E_PTT_UNUSED_ENTRY(251),
+       I40E_PTT_UNUSED_ENTRY(252),
+       I40E_PTT_UNUSED_ENTRY(253),
+       I40E_PTT_UNUSED_ENTRY(254),
+       I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
 /**
  * i40e_init_shared_code - Initialize the shared code
  * @hw: pointer to hardware structure
index 50730141bb7b2ea9c726ed070d641579fc9a28bd..036570d76176cd211ef2ee22328aeecd96b7174a 100644 (file)
@@ -332,6 +332,7 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
        u16 type;
        u16 length;
        u16 typelength;
+       u16 offset = 0;
 
        if (!lldpmib || !dcbcfg)
                return I40E_ERR_PARAM;
@@ -339,15 +340,17 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
        /* set to the start of LLDPDU */
        lldpmib += ETH_HLEN;
        tlv = (struct i40e_lldp_org_tlv *)lldpmib;
-       while (tlv) {
+       while (1) {
                typelength = ntohs(tlv->typelength);
                type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
                             I40E_LLDP_TLV_TYPE_SHIFT);
                length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
                               I40E_LLDP_TLV_LEN_SHIFT);
+               offset += sizeof(typelength) + length;
 
-               if (type == I40E_TLV_TYPE_END)
-                       break;/* END TLV break out */
+               /* END TLV or beyond LLDPDU size */
+               if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
+                       break;
 
                switch (type) {
                case I40E_TLV_TYPE_ORG:
index da22c3fa2c004a23adb8545ee01058d3dd0b2e3f..3c37386fd138fdeb3941170e5789687d90385fd5 100644 (file)
@@ -1011,10 +1011,12 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
  **/
 static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
 {
-       if (enable)
+       if (enable) {
                pf->flags |= flag;
-       else
+       } else {
                pf->flags &= ~flag;
+               pf->auto_disable_flags |= flag;
+       }
        dev_info(&pf->pdev->dev, "requesting a pf reset\n");
        i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
 }
@@ -1467,19 +1469,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                 pf->msg_enable);
                }
        } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
-               dev_info(&pf->pdev->dev, "forcing PFR\n");
+               dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
                i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "corer", 5) == 0) {
-               dev_info(&pf->pdev->dev, "forcing CoreR\n");
+               dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
                i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "globr", 5) == 0) {
-               dev_info(&pf->pdev->dev, "forcing GlobR\n");
+               dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
                i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "empr", 4) == 0) {
-               dev_info(&pf->pdev->dev, "forcing EMPR\n");
+               dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
                i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "read", 4) == 0) {
@@ -1663,28 +1665,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                desc = NULL;
        } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
                   (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
-               struct i40e_fdir_data fd_data;
+               struct i40e_fdir_filter fd_data;
                u16 packet_len, i, j = 0;
                char *asc_packet;
+               u8 *raw_packet;
                bool add = false;
                int ret;
 
-               asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+               if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+                       goto command_write_done;
+
+               if (strncmp(cmd_buf, "add", 3) == 0)
+                       add = true;
+
+               if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+                       goto command_write_done;
+
+               asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
                                     GFP_KERNEL);
                if (!asc_packet)
                        goto command_write_done;
 
-               fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
-                                            GFP_KERNEL);
+               raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
+                                    GFP_KERNEL);
 
-               if (!fd_data.raw_packet) {
+               if (!raw_packet) {
                        kfree(asc_packet);
                        asc_packet = NULL;
                        goto command_write_done;
                }
 
-               if (strncmp(cmd_buf, "add", 3) == 0)
-                       add = true;
                cnt = sscanf(&cmd_buf[13],
                             "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
                             &fd_data.q_index,
@@ -1698,36 +1708,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                 cnt);
                        kfree(asc_packet);
                        asc_packet = NULL;
-                       kfree(fd_data.raw_packet);
+                       kfree(raw_packet);
                        goto command_write_done;
                }
 
                /* fix packet length if user entered 0 */
                if (packet_len == 0)
-                       packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+                       packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
 
                /* make sure to check the max as well */
                packet_len = min_t(u16,
-                                  packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+                                  packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
 
                for (i = 0; i < packet_len; i++) {
                        sscanf(&asc_packet[j], "%2hhx ",
-                              &fd_data.raw_packet[i]);
+                              &raw_packet[i]);
                        j += 3;
                }
                dev_info(&pf->pdev->dev, "FD raw packet dump\n");
                print_hex_dump(KERN_INFO, "FD raw packet: ",
                               DUMP_PREFIX_OFFSET, 16, 1,
-                              fd_data.raw_packet, packet_len, true);
-               ret = i40e_program_fdir_filter(&fd_data, pf, add);
+                              raw_packet, packet_len, true);
+               ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
                if (!ret) {
                        dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
                } else {
                        dev_info(&pf->pdev->dev,
                                 "Filter command send failed %d\n", ret);
                }
-               kfree(fd_data.raw_packet);
-               fd_data.raw_packet = NULL;
+               kfree(raw_packet);
+               raw_packet = NULL;
                kfree(asc_packet);
                asc_packet = NULL;
        } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
@@ -2077,9 +2087,13 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                if (!vsi) {
                        dev_info(&pf->pdev->dev,
                                 "tx_timeout: VSI %d not found\n", vsi_seid);
-                       goto netdev_ops_write_done;
-               }
-               if (rtnl_trylock()) {
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else if (test_bit(__I40E_DOWN, &vsi->state)) {
+                       dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
+                                vsi_seid);
+               } else if (rtnl_trylock()) {
                        vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
                        rtnl_unlock();
                        dev_info(&pf->pdev->dev, "tx_timeout called\n");
@@ -2098,9 +2112,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                if (!vsi) {
                        dev_info(&pf->pdev->dev,
                                 "change_mtu: VSI %d not found\n", vsi_seid);
-                       goto netdev_ops_write_done;
-               }
-               if (rtnl_trylock()) {
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else if (rtnl_trylock()) {
                        vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
                                                                mtu);
                        rtnl_unlock();
@@ -2119,9 +2134,10 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                if (!vsi) {
                        dev_info(&pf->pdev->dev,
                                 "set_rx_mode: VSI %d not found\n", vsi_seid);
-                       goto netdev_ops_write_done;
-               }
-               if (rtnl_trylock()) {
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else if (rtnl_trylock()) {
                        vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
                        rtnl_unlock();
                        dev_info(&pf->pdev->dev, "set_rx_mode called\n");
@@ -2139,11 +2155,14 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                if (!vsi) {
                        dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
                                 vsi_seid);
-                       goto netdev_ops_write_done;
+               } else if (!vsi->netdev) {
+                       dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
+                                vsi_seid);
+               } else {
+                       for (i = 0; i < vsi->num_q_vectors; i++)
+                               napi_schedule(&vsi->q_vectors[i]->napi);
+                       dev_info(&pf->pdev->dev, "napi called\n");
                }
-               for (i = 0; i < vsi->num_q_vectors; i++)
-                       napi_schedule(&vsi->q_vectors[i]->napi);
-               dev_info(&pf->pdev->dev, "napi called\n");
        } else {
                dev_info(&pf->pdev->dev, "unknown command '%s'\n",
                         i40e_dbg_netdev_ops_buf);
index b1d7d8c5cb9b64a362a614f8a412698723582ae8..aa123f43fb8e34e802259e34f7e29d603b0a6de3 100644 (file)
@@ -62,6 +62,9 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
        I40E_NETDEV_STAT(rx_crc_errors),
 };
 
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+                                struct ethtool_rxnfc *cmd);
+
 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
  * but they are separate.  This device supports Virtualization, and
  * as such might have several netdevs supporting VMDq and FCoE going
@@ -84,6 +87,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
        I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
        I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+       I40E_PF_STAT("tx_timeout", tx_timeout_count),
        I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
        I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
        I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -110,6 +114,11 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
        I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+       /* LPI stats */
+       I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
+       I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
+       I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
+       I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
 };
 
 #define I40E_QUEUE_STATS_LEN(n) \
@@ -649,18 +658,18 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
 
                /* process Tx ring statistics */
                do {
-                       start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
                        data[i] = tx_ring->stats.packets;
                        data[i + 1] = tx_ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
 
                /* Rx ring is the 2nd half of the queue pair */
                rx_ring = &tx_ring[1];
                do {
-                       start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
                        data[i + 2] = rx_ring->stats.packets;
                        data[i + 3] = rx_ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
        }
        rcu_read_unlock();
        if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1111,6 +1120,84 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
        return 0;
 }
 
+/**
+ * i40e_get_ethtool_fdir_all - Populates the rule count of a command
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ * @rule_locs: Array of used rule locations
+ *
+ * This function populates both the total and actual rule count of
+ * the ethtool flow classification command
+ *
+ * Returns 0 on success or -EMSGSIZE if entry not found
+ **/
+static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
+                                    struct ethtool_rxnfc *cmd,
+                                    u32 *rule_locs)
+{
+       struct i40e_fdir_filter *rule;
+       struct hlist_node *node2;
+       int cnt = 0;
+
+       /* report total rule count */
+       cmd->data = pf->hw.fdir_shared_filter_count +
+                   pf->fdir_pf_filter_count;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               if (cnt == cmd->rule_cnt)
+                       return -EMSGSIZE;
+
+               rule_locs[cnt] = rule->fd_id;
+               cnt++;
+       }
+
+       cmd->rule_cnt = cnt;
+
+       return 0;
+}
+
+/**
+ * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
+ * @pf: Pointer to the physical function struct
+ * @cmd: The command to get or set Rx flow classification rules
+ *
+ * This function looks up a filter based on the Rx flow classification
+ * command and fills the flow spec info for it if found
+ *
+ * Returns 0 on success or -EINVAL if filter not found
+ **/
+static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
+                                      struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp =
+                       (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct i40e_fdir_filter *rule = NULL;
+       struct hlist_node *node2;
+
+       /* report total rule count */
+       cmd->data = pf->hw.fdir_shared_filter_count +
+                   pf->fdir_pf_filter_count;
+
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               if (fsp->location <= rule->fd_id)
+                       break;
+       }
+
+       if (!rule || fsp->location != rule->fd_id)
+               return -EINVAL;
+
+       fsp->flow_type = rule->flow_type;
+       fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
+       fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
+       fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
+       fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
+       fsp->ring_cookie = rule->q_index;
+
+       return 0;
+}
+
 /**
  * i40e_get_rxnfc - command to get RX flow classification rules
  * @netdev: network interface device structure
@@ -1135,15 +1222,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
                ret = i40e_get_rss_hash_opts(pf, cmd);
                break;
        case ETHTOOL_GRXCLSRLCNT:
-               cmd->rule_cnt = 10;
+               cmd->rule_cnt = pf->fdir_pf_active_filters;
                ret = 0;
                break;
        case ETHTOOL_GRXCLSRULE:
-               ret = 0;
+               ret = i40e_get_ethtool_fdir_entry(pf, cmd);
                break;
        case ETHTOOL_GRXCLSRLALL:
-               cmd->data = 500;
-               ret = 0;
+               ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
+               break;
        default:
                break;
        }
@@ -1274,289 +1361,182 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        return 0;
 }
 
-#define IP_HEADER_OFFSET 14
-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
 /**
- * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_match_fdir_input_set - Match a new filter against an existing one
+ * @rule: The filter already added
+ * @input: The new filter to comapre against
  *
- * Returns 0 if the filters were successfully added or removed
+ * Returns true if the two input set match
  **/
-static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
-                                  struct i40e_fdir_data *fd_data,
-                                  struct ethtool_rx_flow_spec *fsp, bool add)
+static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
+                                     struct i40e_fdir_filter *input)
 {
-       struct i40e_pf *pf = vsi->back;
-       struct udphdr *udp;
-       struct iphdr *ip;
-       bool err = false;
-       int ret;
-       int i;
-       char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
-                        0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
-                        0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                        0, 0, 0, 0, 0, 0, 0, 0};
-
-       memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
-
-       ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
-       udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
-             + sizeof(struct iphdr));
-
-       ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
-       ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
-       udp->source = fsp->h_u.tcp_ip4_spec.psrc;
-       udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
-
-       for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
-            i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
-               fd_data->pctype = i;
-               ret = i40e_program_fdir_filter(fd_data, pf, add);
-
-               if (ret) {
-                       dev_info(&pf->pdev->dev,
-                                "Filter command send failed for PCTYPE %d (ret = %d)\n",
-                                fd_data->pctype, ret);
-                       err = true;
-               } else {
-                       dev_info(&pf->pdev->dev,
-                                "Filter OK for PCTYPE %d (ret = %d)\n",
-                                fd_data->pctype, ret);
-               }
-       }
-
-       return err ? -EOPNOTSUPP : 0;
+       if ((rule->dst_ip[0] != input->dst_ip[0]) ||
+           (rule->src_ip[0] != input->src_ip[0]) ||
+           (rule->dst_port != input->dst_port) ||
+           (rule->src_port != input->src_port))
+               return false;
+       return true;
 }
 
-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
 /**
- * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
+ * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @input: The filter to update or NULL to indicate deletion
+ * @sw_idx: Software index to the filter
+ * @cmd: The command to get or set Rx flow classification rules
  *
- * Returns 0 if the filters were successfully added or removed
+ * This function updates (or deletes) a Flow Director entry from
+ * the hlist of the corresponding PF
+ *
+ * Returns 0 on success
  **/
-static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
-                                  struct i40e_fdir_data *fd_data,
-                                  struct ethtool_rx_flow_spec *fsp, bool add)
+static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
+                                         struct i40e_fdir_filter *input,
+                                         u16 sw_idx,
+                                         struct ethtool_rxnfc *cmd)
 {
+       struct i40e_fdir_filter *rule, *parent;
        struct i40e_pf *pf = vsi->back;
-       struct tcphdr *tcp;
-       struct iphdr *ip;
-       bool err = false;
-       int ret;
-       /* Dummy packet */
-       char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
-                        0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
-                        0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                        0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
-
-       memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
-
-       ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
-       tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
-             + sizeof(struct iphdr));
-
-       ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
-       tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
-       ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
-       tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
-
-       if (add) {
-               if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
-                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
-                       pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-               }
-       }
+       struct hlist_node *node2;
+       int err = -EINVAL;
 
-       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
-       ret = i40e_program_fdir_filter(fd_data, pf, add);
+       parent = NULL;
+       rule = NULL;
 
-       if (ret) {
-               dev_info(&pf->pdev->dev,
-                        "Filter command send failed for PCTYPE %d (ret = %d)\n",
-                        fd_data->pctype, ret);
-               err = true;
-       } else {
-               dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
-                        fd_data->pctype, ret);
+       hlist_for_each_entry_safe(rule, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               /* hash found, or no matching entry */
+               if (rule->fd_id >= sw_idx)
+                       break;
+               parent = rule;
        }
 
-       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-
-       ret = i40e_program_fdir_filter(fd_data, pf, add);
-       if (ret) {
-               dev_info(&pf->pdev->dev,
-                        "Filter command send failed for PCTYPE %d (ret = %d)\n",
-                        fd_data->pctype, ret);
-               err = true;
-       } else {
-               dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
-                         fd_data->pctype, ret);
+       /* if there is an old rule occupying our place remove it */
+       if (rule && (rule->fd_id == sw_idx)) {
+               if (input && !i40e_match_fdir_input_set(rule, input))
+                       err = i40e_add_del_fdir(vsi, rule, false);
+               else if (!input)
+                       err = i40e_add_del_fdir(vsi, rule, false);
+               hlist_del(&rule->fdir_node);
+               kfree(rule);
+               pf->fdir_pf_active_filters--;
        }
 
-       return err ? -EOPNOTSUPP : 0;
-}
+       /* If no input this was a delete, err should be 0 if a rule was
+        * successfully found and removed from the list else -EINVAL
+        */
+       if (!input)
+               return err;
 
-/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required from the FDir descriptor
- * @ethtool_rx_flow_spec: the flow spec
- * @add: true adds a filter, false removes it
- *
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
-                                   struct i40e_fdir_data *fd_data,
-                                   struct ethtool_rx_flow_spec *fsp, bool add)
-{
-       return -EOPNOTSUPP;
+       /* initialize node and set software index */
+       INIT_HLIST_NODE(&input->fdir_node);
+
+       /* add filter to the list */
+       if (parent)
+               hlist_add_after(&parent->fdir_node, &input->fdir_node);
+       else
+               hlist_add_head(&input->fdir_node,
+                              &pf->fdir_filter_list);
+
+       /* update counts */
+       pf->fdir_pf_active_filters++;
+
+       return 0;
 }
 
-#define I40E_IP_DUMMY_PACKET_LEN 34
 /**
- * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
- * a specific flow spec
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @fsp: the ethtool flow spec
- * @add: true adds a filter, false removes it
+ * i40e_del_fdir_entry - Deletes a Flow Director filter entry
+ * @vsi: Pointer to the targeted VSI
+ * @cmd: The command to get or set Rx flow classification rules
  *
- * Returns 0 if the filters were successfully added or removed
- **/
-static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
-                                 struct i40e_fdir_data *fd_data,
-                                 struct ethtool_rx_flow_spec *fsp, bool add)
+ * The function removes a Flow Director filter entry from the
+ * hlist of the corresponding PF
+ *
+ * Returns 0 on success
+ */
+static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
+                              struct ethtool_rxnfc *cmd)
 {
+       struct ethtool_rx_flow_spec *fsp =
+               (struct ethtool_rx_flow_spec *)&cmd->fs;
        struct i40e_pf *pf = vsi->back;
-       struct iphdr *ip;
-       bool err = false;
-       int ret;
-       int i;
-       char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
-                        0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
-                        0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-
-       memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
-       ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+       int ret = 0;
 
-       ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
-       ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
-       ip->protocol = fsp->h_u.usr_ip4_spec.proto;
+       ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
 
-       for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
-            i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
-               fd_data->pctype = i;
-               ret = i40e_program_fdir_filter(fd_data, pf, add);
-
-               if (ret) {
-                       dev_info(&pf->pdev->dev,
-                                "Filter command send failed for PCTYPE %d (ret = %d)\n",
-                                fd_data->pctype, ret);
-                       err = true;
-               } else {
-                       dev_info(&pf->pdev->dev,
-                                "Filter OK for PCTYPE %d (ret = %d)\n",
-                                fd_data->pctype, ret);
-               }
-       }
-
-       return err ? -EOPNOTSUPP : 0;
+       i40e_fdir_check_and_reenable(pf);
+       return ret;
 }
 
 /**
- * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
- * a specific flow spec based on their protocol
+ * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
  * @vsi: pointer to the targeted VSI
  * @cmd: command to get or set RX flow classification rules
- * @add: true adds a filter, false removes it
  *
- * Returns 0 if the filters were successfully added or removed
+ * Add Flow Director filters for a specific flow spec based on their
+ * protocol.  Returns 0 if the filters were successfully added.
  **/
-static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
-                       struct ethtool_rxnfc *cmd, bool add)
+static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
+                                struct ethtool_rxnfc *cmd)
 {
-       struct i40e_fdir_data fd_data;
-       int ret = -EINVAL;
+       struct ethtool_rx_flow_spec *fsp;
+       struct i40e_fdir_filter *input;
        struct i40e_pf *pf;
-       struct ethtool_rx_flow_spec *fsp =
-               (struct ethtool_rx_flow_spec *)&cmd->fs;
+       int ret = -EINVAL;
 
        if (!vsi)
                return -EINVAL;
 
        pf = vsi->back;
 
-       if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
-           (fsp->ring_cookie >= vsi->num_queue_pairs))
-               return -EINVAL;
+       if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+               return -EOPNOTSUPP;
 
-       /* Populate the Flow Director that we have at the moment
-        * and allocate the raw packet buffer for the calling functions
-        */
-       fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
-                                    GFP_KERNEL);
+       if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)
+               return -ENOSPC;
 
-       if (!fd_data.raw_packet) {
-               dev_info(&pf->pdev->dev, "Could not allocate memory\n");
-               return -ENOMEM;
+       fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+       if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
+                             pf->hw.func_caps.fd_filters_guaranteed)) {
+               return -EINVAL;
        }
 
-       fd_data.q_index = fsp->ring_cookie;
-       fd_data.flex_off = 0;
-       fd_data.pctype = 0;
-       fd_data.dest_vsi = vsi->id;
-       fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
-       fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
-       fd_data.cnt_index = 0;
-       fd_data.fd_id = 0;
+       if (fsp->ring_cookie >= vsi->num_queue_pairs)
+               return -EINVAL;
 
-       switch (fsp->flow_type & ~FLOW_EXT) {
-       case TCP_V4_FLOW:
-               ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
-               break;
-       case UDP_V4_FLOW:
-               ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
-               break;
-       case SCTP_V4_FLOW:
-               ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
-               break;
-       case IPV4_FLOW:
-               ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
-               break;
-       case IP_USER_FLOW:
-               switch (fsp->h_u.usr_ip4_spec.proto) {
-               case IPPROTO_TCP:
-                       ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
-                       break;
-               case IPPROTO_UDP:
-                       ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
-                       break;
-               case IPPROTO_SCTP:
-                       ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
-                       break;
-               default:
-                       ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
-                       break;
-               }
-               break;
-       default:
-               dev_info(&pf->pdev->dev, "Could not specify spec type\n");
-               ret = -EINVAL;
-       }
+       input = kzalloc(sizeof(*input), GFP_KERNEL);
+
+       if (!input)
+               return -ENOMEM;
 
-       kfree(fd_data.raw_packet);
-       fd_data.raw_packet = NULL;
+       input->fd_id = fsp->location;
+
+       if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+               input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
+       else
+               input->dest_ctl =
+                            I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
+
+       input->q_index = fsp->ring_cookie;
+       input->flex_off = 0;
+       input->pctype = 0;
+       input->dest_vsi = vsi->id;
+       input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
+       input->cnt_index = 0;
+       input->flow_type = fsp->flow_type;
+       input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
+       input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
+       input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+       input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+       input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+       ret = i40e_add_del_fdir(vsi, input, true);
+       if (ret)
+               kfree(input);
+       else
+               i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
 
        return ret;
 }
@@ -1580,10 +1560,10 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
                ret = i40e_set_rss_hash_opt(pf, cmd);
                break;
        case ETHTOOL_SRXCLSRLINS:
-               ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+               ret = i40e_add_fdir_ethtool(vsi, cmd);
                break;
        case ETHTOOL_SRXCLSRLDEL:
-               ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+               ret = i40e_del_fdir_entry(vsi, cmd);
                break;
        default:
                break;
index b901371ca361a1e6eafa411f5b144f6e8866aa9c..a1ec793b93db234ab3144066c558c93236b21184 100644 (file)
@@ -26,6 +26,7 @@
 
 /* Local includes */
 #include "i40e.h"
+#include "i40e_diag.h"
 #ifdef CONFIG_I40E_VXLAN
 #include <net/vxlan.h>
 #endif
@@ -38,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 30
+#define DRV_VERSION_BUILD 36
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -305,6 +306,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
                break;
        default:
                netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+               set_bit(__I40E_DOWN, &vsi->state);
                i40e_down(vsi);
                break;
        }
@@ -375,20 +377,20 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
                        continue;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
                        packets = tx_ring->stats.packets;
                        bytes   = tx_ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
 
                stats->tx_packets += packets;
                stats->tx_bytes   += bytes;
                rx_ring = &tx_ring[1];
 
                do {
-                       start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
                        packets = rx_ring->stats.packets;
                        bytes   = rx_ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
 
                stats->rx_packets += packets;
                stats->rx_bytes   += bytes;
@@ -739,6 +741,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
        u32 rx_page, rx_buf;
        u64 rx_p, rx_b;
        u64 tx_p, tx_b;
+       u32 val;
        int i;
        u16 q;
 
@@ -769,10 +772,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
                p = ACCESS_ONCE(vsi->tx_rings[q]);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
                        packets = p->stats.packets;
                        bytes = p->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
                tx_b += bytes;
                tx_p += packets;
                tx_restart += p->tx_stats.restart_queue;
@@ -781,10 +784,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
                /* Rx queue is part of the same block as Tx queue */
                p = &p[1];
                do {
-                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
                        packets = p->stats.packets;
                        bytes = p->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
                rx_b += bytes;
                rx_p += packets;
                rx_buf += p->rx_stats.alloc_buff_failed;
@@ -971,6 +974,20 @@ void i40e_update_stats(struct i40e_vsi *vsi)
                i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
                                   pf->stat_offsets_loaded,
                                   &osd->rx_jabber, &nsd->rx_jabber);
+
+               val = rd32(hw, I40E_PRTPM_EEE_STAT);
+               nsd->tx_lpi_status =
+                              (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+                               I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+               nsd->rx_lpi_status =
+                              (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+                               I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+               i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+                                  pf->stat_offsets_loaded,
+                                  &osd->tx_lpi_count, &nsd->tx_lpi_count);
+               i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+                                  pf->stat_offsets_loaded,
+                                  &osd->rx_lpi_count, &nsd->rx_lpi_count);
        }
 
        pf->stat_offsets_loaded = true;
@@ -1964,11 +1981,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
 
        netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
 
-       /* If the network stack called us with vid = 0, we should
-        * indicate to i40e_vsi_add_vlan() that we want to receive
-        * any traffic (i.e. with any vlan tag, or untagged)
+       /* If the network stack called us with vid = 0 then
+        * it is asking to receive priority tagged packets with
+        * vlan id 0.  Our HW receives them by default when configured
+        * to receive untagged packets so there is no need to add an
+        * extra filter for vlan 0 tagged packets.
         */
-       ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+       if (vid)
+               ret = i40e_vsi_add_vlan(vsi, vid);
 
        if (!ret && (vid < VLAN_N_VID))
                set_bit(vid, vsi->active_vlans);
@@ -1981,7 +2001,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
  * @netdev: network interface to be adjusted
  * @vid: vlan id to be removed
  *
- * net_device_ops implementation for adding vlan ids
+ * net_device_ops implementation for removing vlan ids
  **/
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                                 __always_unused __be16 proto, u16 vid)
@@ -2177,6 +2197,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
                                               I40E_FLAG_FD_ATR_ENABLED));
        tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
+       /* FDIR VSI tx ring can still use RS bit and writebacks */
+       if (vsi->type != I40E_VSI_FDIR)
+               tx_ctx.head_wb_ena = 1;
+       tx_ctx.head_wb_addr = ring->dma +
+                             (ring->count * sizeof(struct i40e_tx_desc));
 
        /* As part of VSI creation/update, FW allocates certain
         * Tx arbitration queue sets for each TC enabled for
@@ -2419,6 +2444,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
                i40e_set_rx_mode(vsi->netdev);
 }
 
+/**
+ * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
+ * @vsi: Pointer to the targeted VSI
+ *
+ * This function replays the hlist on the hw where all the SB Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
+{
+       struct i40e_fdir_filter *filter;
+       struct i40e_pf *pf = vsi->back;
+       struct hlist_node *node;
+
+       if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+               return;
+
+       hlist_for_each_entry_safe(filter, node,
+                                 &pf->fdir_filter_list, fdir_node) {
+               i40e_add_del_fdir(vsi, filter, true);
+       }
+}
+
 /**
  * i40e_vsi_configure - Set up the VSI for action
  * @vsi: the VSI being configured
@@ -2557,7 +2604,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
        /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
        wr32(hw, I40E_PFINT_LNKLST0, 0);
 
-       /* Associate the queue pair to the vector and enable the q int */
+       /* Associate the queue pair to the vector and enable the queue int */
        val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
              (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
              (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
@@ -2831,12 +2878,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
                val = rd32(hw, I40E_GLGEN_RSTAT);
                val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
                       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
-               if (val == I40E_RESET_CORER)
+               if (val == I40E_RESET_CORER) {
                        pf->corer_count++;
-               else if (val == I40E_RESET_GLOBR)
+               } else if (val == I40E_RESET_GLOBR) {
                        pf->globr_count++;
-               else if (val == I40E_RESET_EMPR)
+               } else if (val == I40E_RESET_EMPR) {
                        pf->empr_count++;
+                       set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+               }
        }
 
        if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
@@ -2866,8 +2915,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
                         icr0_remaining);
                if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
                    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
-                   (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
-                   (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
+                   (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
                        dev_info(&pf->pdev->dev, "device will be reset\n");
                        set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                        i40e_service_event_schedule(pf);
@@ -3107,13 +3155,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               j = 1000;
-               do {
-                       usleep_range(1000, 2000);
+               for (j = 0; j < 50; j++) {
                        tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
-               } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
-                              ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
-
+                       if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+                           ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+                               break;
+                       usleep_range(1000, 2000);
+               }
                /* Skip if the queue is already in the requested state */
                if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
                        continue;
@@ -3123,8 +3171,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                /* turn on/off the queue */
                if (enable) {
                        wr32(hw, I40E_QTX_HEAD(pf_q), 0);
-                       tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
-                                 I40E_QTX_ENA_QENA_STAT_MASK;
+                       tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
                } else {
                        tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
                }
@@ -3171,12 +3218,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               j = 1000;
-               do {
-                       usleep_range(1000, 2000);
+               for (j = 0; j < 50; j++) {
                        rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
-               } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
-                              ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+                       if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+                           ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+                               break;
+                       usleep_range(1000, 2000);
+               }
 
                if (enable) {
                        /* is STAT set ? */
@@ -3190,11 +3238,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 
                /* turn on/off the queue */
                if (enable)
-                       rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
-                                 I40E_QRX_ENA_QENA_STAT_MASK;
+                       rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
                else
-                       rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
-                                 I40E_QRX_ENA_QENA_STAT_MASK);
+                       rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
                wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
 
                /* wait for the change to finish */
@@ -3732,8 +3778,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                          NULL);
        if (aq_ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "AQ command Config VSI BW allocation per TC failed = %d\n",
+                        vsi->back->hw.aq.asq_last_status);
                return -EINVAL;
        }
 
@@ -4062,6 +4108,10 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
        } else if (vsi->netdev) {
                netdev_info(vsi->netdev, "NIC Link is Down\n");
        }
+
+       /* replay FDIR SB filters */
+       if (vsi->type == I40E_VSI_FDIR)
+               i40e_fdir_filter_restore(vsi);
        i40e_service_event_schedule(pf);
 
        return 0;
@@ -4208,15 +4258,40 @@ static int i40e_open(struct net_device *netdev)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
-       char int_name[IFNAMSIZ];
        int err;
 
-       /* disallow open during test */
-       if (test_bit(__I40E_TESTING, &pf->state))
+       /* disallow open during test or if eeprom is broken */
+       if (test_bit(__I40E_TESTING, &pf->state) ||
+           test_bit(__I40E_BAD_EEPROM, &pf->state))
                return -EBUSY;
 
        netif_carrier_off(netdev);
 
+       err = i40e_vsi_open(vsi);
+       if (err)
+               return err;
+
+#ifdef CONFIG_I40E_VXLAN
+       vxlan_get_rx_port(netdev);
+#endif
+
+       return 0;
+}
+
+/**
+ * i40e_vsi_open -
+ * @vsi: the VSI to open
+ *
+ * Finish initialization of the VSI.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+int i40e_vsi_open(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       char int_name[IFNAMSIZ];
+       int err;
+
        /* allocate descriptors */
        err = i40e_vsi_setup_tx_resources(vsi);
        if (err)
@@ -4229,18 +4304,22 @@ static int i40e_open(struct net_device *netdev)
        if (err)
                goto err_setup_rx;
 
+       if (!vsi->netdev) {
+               err = EINVAL;
+               goto err_setup_rx;
+       }
        snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
-                dev_driver_string(&pf->pdev->dev), netdev->name);
+                dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
        err = i40e_vsi_request_irq(vsi, int_name);
        if (err)
                goto err_setup_rx;
 
        /* Notify the stack of the actual queue counts. */
-       err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs);
+       err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
        if (err)
                goto err_set_queues;
 
-       err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs);
+       err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
        if (err)
                goto err_set_queues;
 
@@ -4248,10 +4327,6 @@ static int i40e_open(struct net_device *netdev)
        if (err)
                goto err_up_complete;
 
-#ifdef CONFIG_I40E_VXLAN
-       vxlan_get_rx_port(netdev);
-#endif
-
        return 0;
 
 err_up_complete:
@@ -4268,6 +4343,26 @@ err_setup_tx:
        return err;
 }
 
+/**
+ * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
+ * @pf: Pointer to pf
+ *
+ * This function destroys the hlist where all the Flow Director
+ * filters were saved.
+ **/
+static void i40e_fdir_filter_exit(struct i40e_pf *pf)
+{
+       struct i40e_fdir_filter *filter;
+       struct hlist_node *node2;
+
+       hlist_for_each_entry_safe(filter, node2,
+                                 &pf->fdir_filter_list, fdir_node) {
+               hlist_del(&filter->fdir_node);
+               kfree(filter);
+       }
+       pf->fdir_pf_active_filters = 0;
+}
+
 /**
  * i40e_close - Disables a network interface
  * @netdev: network interface device structure
@@ -4321,7 +4416,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                 * for the warning interrupt will deal with the shutdown
                 * and recovery of the switch setup.
                 */
-               dev_info(&pf->pdev->dev, "GlobalR requested\n");
+               dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
                val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
                val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4332,7 +4427,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                 *
                 * Same as Global Reset, except does *not* include the MAC/PHY
                 */
-               dev_info(&pf->pdev->dev, "CoreR requested\n");
+               dev_dbg(&pf->pdev->dev, "CoreR requested\n");
                val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
                val |= I40E_GLGEN_RTRIG_CORER_MASK;
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4366,7 +4461,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                 * the switch, since we need to do all the recovery as
                 * for the Core Reset.
                 */
-               dev_info(&pf->pdev->dev, "PFR requested\n");
+               dev_dbg(&pf->pdev->dev, "PFR requested\n");
                i40e_handle_reset_warning(pf);
 
        } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
@@ -4415,18 +4510,18 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
                           &old_cfg->etscfg.prioritytable,
                           sizeof(new_cfg->etscfg.prioritytable))) {
                        need_reconfig = true;
-                       dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n");
+                       dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
                }
 
                if (memcmp(&new_cfg->etscfg.tcbwtable,
                           &old_cfg->etscfg.tcbwtable,
                           sizeof(new_cfg->etscfg.tcbwtable)))
-                       dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+                       dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
 
                if (memcmp(&new_cfg->etscfg.tsatable,
                           &old_cfg->etscfg.tsatable,
                           sizeof(new_cfg->etscfg.tsatable)))
-                       dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n");
+                       dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
        }
 
        /* Check if PFC configuration has changed */
@@ -4434,7 +4529,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
                   &old_cfg->pfc,
                   sizeof(new_cfg->pfc))) {
                need_reconfig = true;
-               dev_info(&pf->pdev->dev, "PFC config change detected.\n");
+               dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
        }
 
        /* Check if APP Table has changed */
@@ -4442,7 +4537,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
                   &old_cfg->app,
                   sizeof(new_cfg->app))) {
                need_reconfig = true;
-               dev_info(&pf->pdev->dev, "APP Table change detected.\n");
+               dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
        }
 
        return need_reconfig;
@@ -4492,7 +4587,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
 
        /* No change detected in DCBX configs */
        if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
-               dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
+               dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
                goto exit;
        }
 
@@ -4550,8 +4645,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
        struct i40e_vf *vf;
        u16 vf_id;
 
-       dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
-                __func__, queue, qtx_ctl);
+       dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
+               queue, qtx_ctl);
 
        /* Queue belongs to VF, find the VF and issue VF reset */
        if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
@@ -4580,6 +4675,54 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
        clear_bit(__I40E_SERVICE_SCHED, &pf->state);
 }
 
+/**
+ * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
+ * @pf: board private structure
+ **/
+int i40e_get_current_fd_count(struct i40e_pf *pf)
+{
+       int val, fcnt_prog;
+       val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+       fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
+                   ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+                     I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+       return fcnt_prog;
+}
+
+/**
+ * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
+ * @pf: board private structure
+ **/
+void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
+{
+       u32 fcnt_prog, fcnt_avail;
+
+       /* Check if, FD SB or ATR was auto disabled and if there is enough room
+        * to re-enable
+        */
+       if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+           (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+               return;
+       fcnt_prog = i40e_get_current_fd_count(pf);
+       fcnt_avail = pf->hw.fdir_shared_filter_count +
+                                              pf->fdir_pf_filter_count;
+       if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
+               if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+               }
+       }
+       /* Wait for some more space to be available to turn on ATR */
+       if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
+               if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+               }
+       }
+}
+
 /**
  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
  * @pf: board private structure
@@ -4589,11 +4732,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
        if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
                return;
 
-       pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
-
        /* if interface is down do nothing */
        if (test_bit(__I40E_DOWN, &pf->state))
                return;
+       i40e_fdir_check_and_reenable(pf);
+
+       if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+           (pf->flags & I40E_FLAG_FD_SB_ENABLED))
+               pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
 }
 
 /**
@@ -4903,7 +5049,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
                                        event.msg_size);
                        break;
                case i40e_aqc_opc_lldp_update_mib:
-                       dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+                       dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
 #ifdef CONFIG_I40E_DCB
                        rtnl_lock();
                        ret = i40e_handle_lldp_event(pf, &event);
@@ -4911,7 +5057,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
 #endif /* CONFIG_I40E_DCB */
                        break;
                case i40e_aqc_opc_event_lan_overflow:
-                       dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+                       dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
                        i40e_handle_lan_overflow_event(pf, &event);
                        break;
                case i40e_aqc_opc_send_msg_to_peer:
@@ -4935,6 +5081,31 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        kfree(event.msg_buf);
 }
 
+/**
+ * i40e_verify_eeprom - make sure eeprom is good to use
+ * @pf: board private structure
+ **/
+static void i40e_verify_eeprom(struct i40e_pf *pf)
+{
+       int err;
+
+       err = i40e_diag_eeprom_test(&pf->hw);
+       if (err) {
+               /* retry in case of garbage read */
+               err = i40e_diag_eeprom_test(&pf->hw);
+               if (err) {
+                       dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
+                                err);
+                       set_bit(__I40E_BAD_EEPROM, &pf->state);
+               }
+       }
+
+       if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+               dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
+               clear_bit(__I40E_BAD_EEPROM, &pf->state);
+       }
+}
+
 /**
  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
  * @veb: pointer to the VEB instance
@@ -5053,6 +5224,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
        /* increment MSI-X count because current FW skips one */
        pf->hw.func_caps.num_msix_vectors++;
 
+       if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
+           (pf->hw.aq.fw_maj_ver < 2)) {
+               pf->hw.func_caps.num_msix_vectors++;
+               pf->hw.func_caps.num_msix_vectors_vf++;
+       }
+
        if (pf->hw.debug_mask & I40E_DEBUG_USER)
                dev_info(&pf->pdev->dev,
                         "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -5132,9 +5309,9 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
                err = i40e_up_complete(vsi);
                if (err)
                        goto err_up_complete;
+               clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
        }
 
-       clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
        return;
 
 err_up_complete:
@@ -5157,6 +5334,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
 {
        int i;
 
+       i40e_fdir_filter_exit(pf);
        for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
                if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
                        i40e_vsi_release(pf->vsi[i]);
@@ -5181,7 +5359,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
        if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
                return 0;
 
-       dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+       dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
        if (i40e_check_asq_alive(hw))
                i40e_vc_notify_reset(pf);
@@ -5228,7 +5406,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
 
        if (test_bit(__I40E_DOWN, &pf->state))
                goto end_core_reset;
-       dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+       dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
 
        /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
        ret = i40e_init_adminq(&pf->hw);
@@ -5237,6 +5415,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                goto end_core_reset;
        }
 
+       /* re-verify the eeprom if we just had an EMP reset */
+       if (test_bit(__I40E_EMP_RESET_REQUESTED, &pf->state)) {
+               clear_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
+               i40e_verify_eeprom(pf);
+       }
+
        ret = i40e_get_capabilities(pf);
        if (ret) {
                dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5278,7 +5462,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
         * try to recover minimal use by getting the basic PF VSI working.
         */
        if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
-               dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+               dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
                /* find the one VEB connected to the MAC, and find orphans */
                for (v = 0; v < I40E_MAX_VEB; v++) {
                        if (!pf->veb[v])
@@ -5331,6 +5515,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        /* restart the VSIs that were rebuilt and running before the reset */
        i40e_pf_unquiesce_all_vsi(pf);
 
+       if (pf->num_alloc_vfs) {
+               for (v = 0; v < pf->num_alloc_vfs; v++)
+                       i40e_reset_vf(&pf->vf[v], true);
+       }
+
        /* tell the firmware that we're starting */
        dv.major_version = DRV_VERSION_MAJOR;
        dv.minor_version = DRV_VERSION_MINOR;
@@ -5338,7 +5527,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        dv.subbuild_version = 0;
        i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
 
-       dev_info(&pf->pdev->dev, "PF reset done\n");
+       dev_info(&pf->pdev->dev, "reset complete\n");
 
 end_core_reset:
        clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5387,7 +5576,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
                                >> I40E_GL_MDET_TX_QUEUE_SHIFT;
                dev_info(&pf->pdev->dev,
-                        "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+                        "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
                         event, queue, func);
                wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
                mdd_detected = true;
@@ -5401,7 +5590,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
                                >> I40E_GL_MDET_RX_QUEUE_SHIFT;
                dev_info(&pf->pdev->dev,
-                        "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+                        "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
                         event, queue, func);
                wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
                mdd_detected = true;
@@ -5850,37 +6039,16 @@ err_out:
  **/
 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
 {
-       int err = 0;
-
-       pf->num_msix_entries = 0;
-       while (vectors >= I40E_MIN_MSIX) {
-               err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
-               if (err == 0) {
-                       /* good to go */
-                       pf->num_msix_entries = vectors;
-                       break;
-               } else if (err < 0) {
-                       /* total failure */
-                       dev_info(&pf->pdev->dev,
-                                "MSI-X vector reservation failed: %d\n", err);
-                       vectors = 0;
-                       break;
-               } else {
-                       /* err > 0 is the hint for retry */
-                       dev_info(&pf->pdev->dev,
-                                "MSI-X vectors wanted %d, retrying with %d\n",
-                                vectors, err);
-                       vectors = err;
-               }
-       }
-
-       if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+       vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+                                       I40E_MIN_MSIX, vectors);
+       if (vectors < 0) {
                dev_info(&pf->pdev->dev,
-                        "Couldn't get enough vectors, only %d available\n",
-                        vectors);
+                        "MSI-X vector reservation failed: %d\n", vectors);
                vectors = 0;
        }
 
+       pf->num_msix_entries = vectors;
+
        return vectors;
 }
 
@@ -5942,7 +6110,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
 
        } else if (vec == I40E_MIN_MSIX) {
                /* Adjust for minimal MSIX use */
-               dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+               dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
                pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
                pf->num_vmdq_vsis = 0;
                pf->num_vmdq_qps = 0;
@@ -5978,13 +6146,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
 }
 
 /**
- * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
  * @vsi: the VSI being configured
  * @v_idx: index of the vector in the vsi struct
  *
  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
  **/
-static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 {
        struct i40e_q_vector *q_vector;
 
@@ -6010,13 +6178,13 @@ static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 }
 
 /**
- * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
  * @vsi: the VSI being configured
  *
  * We allocate one q_vector per queue interrupt.  If allocation fails we
  * return -ENOMEM.
  **/
-static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int v_idx, num_q_vectors;
@@ -6031,7 +6199,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
                return -EINVAL;
 
        for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               err = i40e_alloc_q_vector(vsi, v_idx);
+               err = i40e_vsi_alloc_q_vector(vsi, v_idx);
                if (err)
                        goto err_out;
        }
@@ -6071,7 +6239,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
 
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
            (pf->flags & I40E_FLAG_MSI_ENABLED)) {
-               dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
+               dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
                err = pci_enable_msi(pf->pdev);
                if (err) {
                        dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
@@ -6080,7 +6248,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
        }
 
        if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
-               dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+               dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
 
        /* track first vector for misc interrupts */
        err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
@@ -6107,7 +6275,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
                                  i40e_intr, 0, pf->misc_int_name, pf);
                if (err) {
                        dev_info(&pf->pdev->dev,
-                                "request_irq for msix_misc failed: %d\n", err);
+                                "request_irq for %s failed: %d\n",
+                                pf->misc_int_name, err);
                        return -EFAULT;
                }
        }
@@ -6258,15 +6427,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-               dev_info(&pf->pdev->dev,
-                       "Flow Director ATR mode Enabled\n");
                if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
                        pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-                       dev_info(&pf->pdev->dev,
-                                "Flow Director Side Band mode Enabled\n");
                } else {
                        dev_info(&pf->pdev->dev,
-                                "Flow Director Side Band mode Disabled in MFP mode\n");
+                                "Flow Director Sideband mode Disabled in MFP mode\n");
                }
                pf->fdir_pf_filter_count =
                                 pf->hw.func_caps.fd_filters_guaranteed;
@@ -6287,9 +6452,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
                pf->num_req_vfs = min_t(int,
                                        pf->hw.func_caps.num_vfs,
                                        I40E_MAX_VF_COUNT);
-               dev_info(&pf->pdev->dev,
-                        "Number of VFs being requested for PF[%d] = %d\n",
-                        pf->hw.pf_id, pf->num_req_vfs);
        }
 #endif /* CONFIG_PCI_IOV */
        pf->eeprom_version = 0xDEAD;
@@ -6325,6 +6487,39 @@ sw_init_done:
        return err;
 }
 
+/**
+ * i40e_set_ntuple - set the ntuple feature flag and take action
+ * @pf: board private structure to initialize
+ * @features: the feature set that the stack is suggesting
+ *
+ * returns a bool to indicate if reset needs to happen
+ **/
+bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
+{
+       bool need_reset = false;
+
+       /* Check if Flow Director n-tuple support was enabled or disabled.  If
+        * the state changed, we need to reset.
+        */
+       if (features & NETIF_F_NTUPLE) {
+               /* Enable filters and mark for reset */
+               if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
+                       need_reset = true;
+               pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+       } else {
+               /* turn off filters, mark for reset and clear SW filter list */
+               if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+                       need_reset = true;
+                       i40e_fdir_filter_exit(pf);
+               }
+               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               /* if ATR was disabled it can be re-enabled. */
+               if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
+                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+       }
+       return need_reset;
+}
+
 /**
  * i40e_set_features - set the netdev feature flags
  * @netdev: ptr to the netdev being adjusted
@@ -6335,12 +6530,19 @@ static int i40e_set_features(struct net_device *netdev,
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       bool need_reset;
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
                i40e_vlan_stripping_enable(vsi);
        else
                i40e_vlan_stripping_disable(vsi);
 
+       need_reset = i40e_set_ntuple(pf, features);
+
+       if (need_reset)
+               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
        return 0;
 }
 
@@ -6464,6 +6666,7 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
        .ndo_set_vf_tx_rate     = i40e_ndo_set_vf_bw,
        .ndo_get_vf_config      = i40e_ndo_get_vf_config,
+       .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
 #ifdef CONFIG_I40E_VXLAN
        .ndo_add_vxlan_port     = i40e_add_vxlan_port,
        .ndo_del_vxlan_port     = i40e_del_vxlan_port,
@@ -6495,10 +6698,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        np = netdev_priv(netdev);
        np->vsi = vsi;
 
-       netdev->hw_enc_features = NETIF_F_IP_CSUM        |
+       netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
                                  NETIF_F_GSO_UDP_TUNNEL |
-                                 NETIF_F_TSO            |
-                                 NETIF_F_SG;
+                                 NETIF_F_TSO;
 
        netdev->features = NETIF_F_SG                  |
                           NETIF_F_IP_CSUM             |
@@ -6512,6 +6714,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                           NETIF_F_TSO                 |
                           NETIF_F_TSO6                |
                           NETIF_F_RXCSUM              |
+                          NETIF_F_NTUPLE              |
                           NETIF_F_RXHASH              |
                           0;
 
@@ -6771,8 +6974,6 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                        if (vsi->netdev) {
                                /* results in a call to i40e_close() */
                                unregister_netdev(vsi->netdev);
-                               free_netdev(vsi->netdev);
-                               vsi->netdev = NULL;
                        }
                } else {
                        if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
@@ -6791,6 +6992,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
 
        i40e_vsi_delete(vsi);
        i40e_vsi_free_q_vectors(vsi);
+       if (vsi->netdev) {
+               free_netdev(vsi->netdev);
+               vsi->netdev = NULL;
+       }
        i40e_vsi_clear_rings(vsi);
        i40e_vsi_clear(vsi);
 
@@ -6845,13 +7050,12 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
        }
 
        if (vsi->base_vector) {
-               dev_info(&pf->pdev->dev,
-                        "VSI %d has non-zero base vector %d\n",
+               dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
                         vsi->seid, vsi->base_vector);
                return -EEXIST;
        }
 
-       ret = i40e_alloc_q_vectors(vsi);
+       ret = i40e_vsi_alloc_q_vectors(vsi);
        if (ret) {
                dev_info(&pf->pdev->dev,
                         "failed to allocate %d q_vector for VSI %d, ret=%d\n",
@@ -6865,7 +7069,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
                                                 vsi->num_q_vectors, vsi->idx);
        if (vsi->base_vector < 0) {
                dev_info(&pf->pdev->dev,
-                        "failed to get q tracking for VSI %d, err=%d\n",
+                        "failed to get queue tracking for VSI %d, err=%d\n",
                         vsi->seid, vsi->base_vector);
                i40e_vsi_free_q_vectors(vsi);
                ret = -ENOENT;
@@ -7822,6 +8026,44 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
        return 0;
 }
 
+#define INFO_STRING_LEN 255
+static void i40e_print_features(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       char *buf, *string;
+
+       string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
+       if (!string) {
+               dev_err(&pf->pdev->dev, "Features string allocation failed\n");
+               return;
+       }
+
+       buf = string;
+
+       buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
+#ifdef CONFIG_PCI_IOV
+       buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
+#endif
+       buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
+                      pf->vsi[pf->lan_vsi]->num_queue_pairs);
+
+       if (pf->flags & I40E_FLAG_RSS_ENABLED)
+               buf += sprintf(buf, "RSS ");
+       buf += sprintf(buf, "FDir ");
+       if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
+               buf += sprintf(buf, "ATR ");
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+               buf += sprintf(buf, "NTUPLE ");
+       if (pf->flags & I40E_FLAG_DCB_ENABLED)
+               buf += sprintf(buf, "DCB ");
+       if (pf->flags & I40E_FLAG_PTP)
+               buf += sprintf(buf, "PTP ");
+
+       BUG_ON(buf > (string + INFO_STRING_LEN));
+       dev_info(&pf->pdev->dev, "%s\n", string);
+       kfree(string);
+}
+
 /**
  * i40e_probe - Device initialization routine
  * @pdev: PCI device information struct
@@ -7848,16 +8090,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        /* set up for high or low dma */
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               /* coherent mask for the same size will always succeed if
-                * dma_set_mask does
-                */
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-       } else {
-               dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
-               err = -EIO;
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (err)
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&pdev->dev,
+                       "DMA configuration failed: 0x%x\n", err);
                goto err_dma;
        }
 
@@ -7946,13 +8184,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = i40e_init_adminq(hw);
        dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
-       if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
-                >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
-               dev_info(&pdev->dev,
-                        "warning: NVM version not supported, supported version: %02x.%02x\n",
-                        I40E_CURRENT_NVM_VERSION_HI,
-                        I40E_CURRENT_NVM_VERSION_LO);
-       }
        if (err) {
                dev_info(&pdev->dev,
                         "init_adminq failed: %d expecting API %02x.%02x\n",
@@ -7961,6 +8192,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_pf_reset;
        }
 
+       i40e_verify_eeprom(pf);
+
        i40e_clear_pxe_mode(hw);
        err = i40e_get_capabilities(pf);
        if (err)
@@ -8062,7 +8295,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* prep for VF support */
        if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
-           (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+           (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+           !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
                u32 val;
 
                /* disable link interrupts for VFs */
@@ -8070,6 +8304,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
                wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
                i40e_flush(hw);
+
+               if (pci_num_vf(pdev)) {
+                       dev_info(&pdev->dev,
+                                "Active VFs found, allocating resources.\n");
+                       err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+                       if (err)
+                               dev_info(&pdev->dev,
+                                        "Error %d allocating resources for existing VFs\n",
+                                        err);
+               }
        }
 
        pfs_found++;
@@ -8092,7 +8336,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        i40e_set_pci_config_data(hw, link_status);
 
-       dev_info(&pdev->dev, "PCI Express: %s %s\n",
+       dev_info(&pdev->dev, "PCI-Express: %s %s\n",
                (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
                 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
                 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
@@ -8109,6 +8353,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
        }
 
+       /* print a string summarizing features */
+       i40e_print_features(pf);
+
        return 0;
 
        /* Unwind what we've done if something failed in the setup */
@@ -8165,16 +8412,16 @@ static void i40e_remove(struct pci_dev *pdev)
 
        i40e_ptp_stop(pf);
 
-       if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
-               i40e_free_vfs(pf);
-               pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
-       }
-
        /* no more scheduling of any task */
        set_bit(__I40E_DOWN, &pf->state);
        del_timer_sync(&pf->service_timer);
        cancel_work_sync(&pf->service_task);
 
+       if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+               i40e_free_vfs(pf);
+               pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+       }
+
        i40e_fdir_teardown(pf);
 
        /* If there is a switch structure or any orphans, remove them.
index 73f95b081927c13413f7a006a6638195b3929507..262bdf11d221e5a30be53a2f57a09e1865b32e79 100644 (file)
 #include "i40e_prototype.h"
 
 /**
- *  i40e_init_nvm_ops - Initialize NVM function pointers.
- *  @hw: pointer to the HW structure.
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
  *
- *  Setups the function pointers and the NVM info structure. Should be called
- *  once per NVM initialization, e.g. inside the i40e_init_shared_code().
- *  Please notice that the NVM term is used here (& in all methods covered
- *  in this file) as an equivalent of the FLASH part mapped into the SR.
- *  We are accessing FLASH always thru the Shadow RAM.
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
  **/
 i40e_status i40e_init_nvm(struct i40e_hw *hw)
 {
@@ -49,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
        gens = rd32(hw, I40E_GLNVM_GENS);
        sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
                           I40E_GLNVM_GENS_SR_SIZE_SHIFT);
-       /* Switching to words (sr_size contains power of 2KB). */
+       /* Switching to words (sr_size contains power of 2KB) */
        nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
 
-       /* Check if we are in the normal or blank NVM programming mode. */
+       /* Check if we are in the normal or blank NVM programming mode */
        fla = rd32(hw, I40E_GLNVM_FLA);
-       if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
-               /* Max NVM timeout. */
+       if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+               /* Max NVM timeout */
                nvm->timeout = I40E_MAX_NVM_TIMEOUT;
                nvm->blank_nvm_mode = false;
-       } else { /* Blank programming mode. */
+       } else { /* Blank programming mode */
                nvm->blank_nvm_mode = true;
                ret_code = I40E_ERR_NVM_BLANK_MODE;
                hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
@@ -68,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
 }
 
 /**
- *  i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
- *  @hw: pointer to the HW structure.
- *  @access: NVM access type (read or write).
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
  *
- *  This function will request NVM ownership for reading
- *  via the proper Admin Command.
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
  **/
 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
                                       enum i40e_aq_resource_access_type access)
@@ -87,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
 
        ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
                                            0, &time, NULL);
-       /* Reading the Global Device Timer. */
+       /* Reading the Global Device Timer */
        gtime = rd32(hw, I40E_GLVFGEN_TIMER);
 
-       /* Store the timeout. */
+       /* Store the timeout */
        hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
 
        if (ret_code) {
-               /* Set the polling timeout. */
+               /* Set the polling timeout */
                if (time > I40E_MAX_NVM_TIMEOUT)
                        timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
                                  + gtime;
                else
                        timeout = hw->nvm.hw_semaphore_timeout;
-               /* Poll until the current NVM owner timeouts. */
+               /* Poll until the current NVM owner timeouts */
                while (gtime < timeout) {
                        usleep_range(10000, 20000);
                        ret_code = i40e_aq_request_resource(hw,
@@ -128,10 +128,10 @@ i40e_i40e_acquire_nvm_exit:
 }
 
 /**
- *  i40e_release_nvm - Generic request for releasing the NVM ownership.
- *  @hw: pointer to the HW structure.
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
  *
- *  This function will release NVM resource via the proper Admin Command.
+ * This function will release NVM resource via the proper Admin Command.
  **/
 void i40e_release_nvm(struct i40e_hw *hw)
 {
@@ -140,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw)
 }
 
 /**
- *  i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
- *  @hw: pointer to the HW structure.
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
  *
- *  Polls the SRCTL Shadow RAM register done bit.
+ * Polls the SRCTL Shadow RAM register done bit.
  **/
 static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 {
        i40e_status ret_code = I40E_ERR_TIMEOUT;
        u32 srctl, wait_cnt;
 
-       /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+       /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
        for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
                srctl = rd32(hw, I40E_GLNVM_SRCTL);
                if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
@@ -165,12 +165,12 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
 }
 
 /**
- *  i40e_read_nvm_word - Reads Shadow RAM
- *  @hw: pointer to the HW structure.
- *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- *  @data: word read from the Shadow RAM.
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
  *
- *  Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
  **/
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                                         u16 *data)
@@ -184,15 +184,15 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                goto read_nvm_exit;
        }
 
-       /* Poll the done bit first. */
+       /* Poll the done bit first */
        ret_code = i40e_poll_sr_srctl_done_bit(hw);
        if (!ret_code) {
-               /* Write the address and start reading. */
+               /* Write the address and start reading */
                sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
                         (1 << I40E_GLNVM_SRCTL_START_SHIFT);
                wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 
-               /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+               /* Poll I40E_GLNVM_SRCTL until the done bit is set */
                ret_code = i40e_poll_sr_srctl_done_bit(hw);
                if (!ret_code) {
                        sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
@@ -210,16 +210,15 @@ read_nvm_exit:
 }
 
 /**
- *  i40e_read_nvm_buffer - Reads Shadow RAM buffer.
- *  @hw: pointer to the HW structure.
- *  @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
- *  @words: number of words to read (in) &
- *          number of words read before the NVM ownership timeout (out).
- *  @data: words read from the Shadow RAM.
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
  *
- *  Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
- *  method. The buffer read is preceded by the NVM ownership take
- *  and followed by the release.
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
  **/
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                                           u16 *words, u16 *data)
@@ -227,7 +226,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
        i40e_status ret_code = 0;
        u16 index, word;
 
-       /* Loop thru the selected region. */
+       /* Loop thru the selected region */
        for (word = 0; word < *words; word++) {
                index = offset + word;
                ret_code = i40e_read_nvm_word(hw, index, &data[word]);
@@ -235,21 +234,21 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                        break;
        }
 
-       /* Update the number of words read from the Shadow RAM. */
+       /* Update the number of words read from the Shadow RAM */
        *words = word;
 
        return ret_code;
 }
 
 /**
- *  i40e_calc_nvm_checksum - Calculates and returns the checksum
- *  @hw: pointer to hardware structure
- *  @checksum: pointer to the checksum
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
  *
- *  This function calculate SW Checksum that covers the whole 64kB shadow RAM
- *  except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
- *  is customer specific and unknown. Therefore, this function skips all maximum
- *  possible size of VPD (1kB).
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
  **/
 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
                                                    u16 *checksum)
@@ -311,12 +310,12 @@ i40e_calc_nvm_checksum_exit:
 }
 
 /**
- *  i40e_validate_nvm_checksum - Validate EEPROM checksum
- *  @hw: pointer to hardware structure
- *  @checksum: calculated checksum
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
  *
- *  Performs checksum calculation and validates the NVM SW checksum. If the
- *  caller does not need checksum, the value can be NULL.
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
  **/
 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
                                                 u16 *checksum)
index ed91f93ede2bd6bc839676560e3da077a873a245..9cd57e617959b1622ec57f8c8f6e2146c9c800a6 100644 (file)
@@ -231,6 +231,13 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
                                                 u16 *checksum);
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+       return i40e_ptype_lookup[ptype];
+}
+
 /* prototype for functions used for SW locks */
 
 /* i40e_common for VF drivers*/
index d4bb482b1a7f277e301b4f221030b242c9e2d187..a329aacb392f3400332047954eab72190ce42bae 100644 (file)
@@ -25,6 +25,7 @@
  ******************************************************************************/
 
 #include "i40e.h"
+#include "i40e_prototype.h"
 
 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
                                u32 td_tag)
@@ -39,11 +40,12 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 /**
  * i40e_program_fdir_filter - Program a Flow Director filter
- * @fdir_input: Packet data that will be filter parameters
+ * @fdir_data: Packet data that will be filter parameters
+ * @raw_packet: the pre-allocated packet buffer for FDir
  * @pf: The pf pointer
  * @add: True for add/update, False for remove
  **/
-int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                             struct i40e_pf *pf, bool add)
 {
        struct i40e_filter_program_desc *fdir_desc;
@@ -68,8 +70,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
        tx_ring = vsi->tx_rings[0];
        dev = tx_ring->dev;
 
-       dma = dma_map_single(dev, fdir_data->raw_packet,
-                            I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+       dma = dma_map_single(dev, raw_packet,
+                            I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, dma))
                goto dma_fail;
 
@@ -132,14 +134,14 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
        tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
        /* record length, and DMA address */
-       dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+       dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
        dma_unmap_addr_set(tx_buf, dma, dma);
 
        tx_desc->buffer_addr = cpu_to_le64(dma);
        td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 
        tx_desc->cmd_type_offset_bsz =
-               build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+               build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 
        /* set the timestamp */
        tx_buf->time_stamp = jiffies;
@@ -161,26 +163,329 @@ dma_fail:
        return -1;
 }
 
+#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+                                  struct i40e_fdir_filter *fd_data,
+                                  u8 *raw_packet, bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct udphdr *udp;
+       struct iphdr *ip;
+       bool err = false;
+       int ret;
+       int i;
+       static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+               0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+       memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
+
+       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+       udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
+             + sizeof(struct iphdr));
+
+       ip->daddr = fd_data->dst_ip[0];
+       udp->dest = fd_data->dst_port;
+       ip->saddr = fd_data->src_ip[0];
+       udp->source = fd_data->src_port;
+
+       for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+            i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
+               fd_data->pctype = i;
+               ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Filter command send failed for PCTYPE %d (ret = %d)\n",
+                                fd_data->pctype, ret);
+                       err = true;
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "Filter OK for PCTYPE %d (ret = %d)\n",
+                                fd_data->pctype, ret);
+               }
+       }
+
+       return err ? -EOPNOTSUPP : 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+                                  struct i40e_fdir_filter *fd_data,
+                                  u8 *raw_packet, bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct tcphdr *tcp;
+       struct iphdr *ip;
+       bool err = false;
+       int ret;
+       /* Dummy packet */
+       static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+               0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
+               0x0, 0x72, 0, 0, 0, 0};
+
+       memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
+
+       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+       tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
+             + sizeof(struct iphdr));
+
+       ip->daddr = fd_data->dst_ip[0];
+       tcp->dest = fd_data->dst_port;
+       ip->saddr = fd_data->src_ip[0];
+       tcp->source = fd_data->src_port;
+
+       if (add) {
+               if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
+                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+                       pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               }
+       }
+
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Filter command send failed for PCTYPE %d (ret = %d)\n",
+                        fd_data->pctype, ret);
+               err = true;
+       } else {
+               dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+                        fd_data->pctype, ret);
+       }
+
+       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+       if (ret) {
+               dev_info(&pf->pdev->dev,
+                        "Filter command send failed for PCTYPE %d (ret = %d)\n",
+                        fd_data->pctype, ret);
+               err = true;
+       } else {
+               dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+                         fd_data->pctype, ret);
+       }
+
+       return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+                                   struct i40e_fdir_filter *fd_data,
+                                   u8 *raw_packet, bool add)
+{
+       return -EOPNOTSUPP;
+}
+
+#define I40E_IP_DUMMY_PACKET_LEN 34
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @raw_packet: the pre-allocated packet buffer for FDir
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+                                 struct i40e_fdir_filter *fd_data,
+                                 u8 *raw_packet, bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct iphdr *ip;
+       bool err = false;
+       int ret;
+       int i;
+       static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
+               0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
+               0, 0, 0, 0};
+
+       memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
+       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+
+       ip->saddr = fd_data->src_ip[0];
+       ip->daddr = fd_data->dst_ip[0];
+       ip->protocol = 0;
+
+       for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+            i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+               fd_data->pctype = i;
+               ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Filter command send failed for PCTYPE %d (ret = %d)\n",
+                                fd_data->pctype, ret);
+                       err = true;
+               } else {
+                       dev_info(&pf->pdev->dev,
+                                "Filter OK for PCTYPE %d (ret = %d)\n",
+                                fd_data->pctype, ret);
+               }
+       }
+
+       return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir - Build raw packets to add/del fdir filter
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ **/
+int i40e_add_del_fdir(struct i40e_vsi *vsi,
+                     struct i40e_fdir_filter *input, bool add)
+{
+       struct i40e_pf *pf = vsi->back;
+       u8 *raw_packet;
+       int ret;
+
+       /* Populate the Flow Director that we have at the moment
+        * and allocate the raw packet buffer for the calling functions
+        */
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
+
+       switch (input->flow_type & ~FLOW_EXT) {
+       case TCP_V4_FLOW:
+               ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
+                                             add);
+               break;
+       case UDP_V4_FLOW:
+               ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
+                                             add);
+               break;
+       case SCTP_V4_FLOW:
+               ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
+                                              add);
+               break;
+       case IPV4_FLOW:
+               ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
+                                            add);
+               break;
+       case IP_USER_FLOW:
+               switch (input->ip4_proto) {
+               case IPPROTO_TCP:
+                       ret = i40e_add_del_fdir_tcpv4(vsi, input,
+                                                     raw_packet, add);
+                       break;
+               case IPPROTO_UDP:
+                       ret = i40e_add_del_fdir_udpv4(vsi, input,
+                                                     raw_packet, add);
+                       break;
+               case IPPROTO_SCTP:
+                       ret = i40e_add_del_fdir_sctpv4(vsi, input,
+                                                      raw_packet, add);
+                       break;
+               default:
+                       ret = i40e_add_del_fdir_ipv4(vsi, input,
+                                                    raw_packet, add);
+                       break;
+               }
+               break;
+       default:
+               dev_info(&pf->pdev->dev, "Could not specify spec type %d",
+                        input->flow_type);
+               ret = -EINVAL;
+       }
+
+       kfree(raw_packet);
+       return ret;
+}
+
 /**
  * i40e_fd_handle_status - check the Programming Status for FD
  * @rx_ring: the Rx ring for this descriptor
- * @qw: the descriptor data
+ * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
  * @prog_id: the id originally used for programming
  *
  * This is used to verify if the FD programming or invalidation
  * requested by SW to the HW is successful or not and take actions accordingly.
  **/
-static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
+                                 union i40e_rx_desc *rx_desc, u8 prog_id)
 {
-       struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+       struct i40e_pf *pf = rx_ring->vsi->back;
+       struct pci_dev *pdev = pf->pdev;
+       u32 fcnt_prog, fcnt_avail;
        u32 error;
+       u64 qw;
 
+       qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
        error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
                I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
-       /* for now just print the Status */
-       dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
-                prog_id, error);
+       if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+               dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
+                        rx_desc->wb.qword0.hi_dword.fd_id);
+
+               /* filter programming failed most likely due to table full */
+               fcnt_prog = i40e_get_current_fd_count(pf);
+               fcnt_avail = pf->hw.fdir_shared_filter_count +
+                                                      pf->fdir_pf_filter_count;
+
+               /* If ATR is running fcnt_prog can quickly change,
+                * if we are very close to full, it makes sense to disable
+                * FD ATR/SB and then re-enable it when there is room.
+                */
+               if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
+                       /* Turn off ATR first */
+                       if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
+                               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                               dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
+                               pf->auto_disable_flags |=
+                                                      I40E_FLAG_FD_ATR_ENABLED;
+                               pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+                       } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
+                               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                               dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+                               pf->auto_disable_flags |=
+                                                       I40E_FLAG_FD_SB_ENABLED;
+                               pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
+                       }
+               } else {
+                       dev_info(&pdev->dev, "FD filter programming error");
+               }
+       } else if (error ==
+                         (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
+                                rx_desc->wb.qword0.hi_dword.fd_id);
+       }
 }
 
 /**
@@ -314,6 +619,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
        return ret;
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
  * @tx_ring:  tx ring to clean
@@ -325,6 +644,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 {
        u16 i = tx_ring->next_to_clean;
        struct i40e_tx_buffer *tx_buf;
+       struct i40e_tx_desc *tx_head;
        struct i40e_tx_desc *tx_desc;
        unsigned int total_packets = 0;
        unsigned int total_bytes = 0;
@@ -333,6 +653,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_desc = I40E_TX_DESC(tx_ring, i);
        i -= tx_ring->count;
 
+       tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
        do {
                struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 
@@ -343,9 +665,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                /* prevent any other reads prior to eop_desc */
                read_barrier_depends();
 
-               /* if the descriptor isn't done, no work yet to do */
-               if (!(eop_desc->cmd_type_offset_bsz &
-                     cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+               /* we have caught up to head, no work left to do */
+               if (tx_head == tx_desc)
                        break;
 
                /* clear next_to_watch to prevent false hangs */
@@ -577,7 +898,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
                  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
 
        if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
-               i40e_fd_handle_status(rx_ring, qw, id);
+               i40e_fd_handle_status(rx_ring, rx_desc, id);
 }
 
 /**
@@ -601,6 +922,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+       /* add u32 for head writeback, align after this takes care of
+        * guaranteeing this is at least one cache line in size
+        */
+       tx_ring->size += sizeof(u32);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
        tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
                                           &tx_ring->dma, GFP_KERNEL);
@@ -892,7 +1217,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
              rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
                return;
 
-       /* likely incorrect csum if alternate IP extention headers found */
+       /* likely incorrect csum if alternate IP extension headers found */
        if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                return;
 
@@ -955,6 +1280,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
                return 0;
 }
 
+/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+       struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+       if (!decoded.known)
+               return PKT_HASH_TYPE_NONE;
+
+       if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+           decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+               return PKT_HASH_TYPE_L4;
+       else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+                decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+               return PKT_HASH_TYPE_L3;
+       else
+               return PKT_HASH_TYPE_L2;
+}
+
 /**
  * i40e_clean_rx_irq - Reclaim resources after receive completes
  * @rx_ring:  rx ring to clean
@@ -972,8 +1320,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        u16 i = rx_ring->next_to_clean;
        union i40e_rx_desc *rx_desc;
        u32 rx_error, rx_status;
+       u8 rx_ptype;
        u64 qword;
-       u16 rx_ptype;
+
+       if (budget <= 0)
+               return 0;
 
        rx_desc = I40E_RX_DESC(rx_ring, i);
        qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -1087,7 +1438,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        goto next_desc;
                }
 
-               skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+                            i40e_ptype_to_hash(rx_ptype));
                if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
                        i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
                                           I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
@@ -1246,8 +1598,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!tx_ring->atr_sample_rate)
                return;
 
-       tx_ring->atr_count++;
-
        /* snag network header to get L4 type and address */
        hdr.network = skb_network_header(skb);
 
@@ -1269,8 +1619,17 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        th = (struct tcphdr *)(hdr.network + hlen);
 
-       /* sample on all syn/fin packets or once every atr sample rate */
-       if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+       /* Due to lack of space, no more new filters can be programmed */
+       if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+               return;
+
+       tx_ring->atr_count++;
+
+       /* sample on all syn/fin/rst packets or once every atr sample rate */
+       if (!th->fin &&
+           !th->syn &&
+           !th->rst &&
+           (tx_ring->atr_count < tx_ring->atr_sample_rate))
                return;
 
        tx_ring->atr_count = 0;
@@ -1294,7 +1653,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
 
-       dtype_cmd |= th->fin ?
+       dtype_cmd |= (th->fin || th->rst) ?
                     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
                      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
                     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
@@ -1596,7 +1955,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        struct i40e_tx_context_desc *context_desc;
        int i = tx_ring->next_to_use;
 
-       if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+       if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+           !cd_tunneling && !cd_l2tag2)
                return;
 
        /* grab the next descriptor */
@@ -1707,9 +2067,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                tx_bi = &tx_ring->tx_bi[i];
        }
 
-       tx_desc->cmd_type_offset_bsz =
-               build_ctob(td_cmd, td_offset, size, td_tag) |
-               cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+       /* Place RS bit on last descriptor of any packet that spans across the
+        * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+        */
+#define WB_STRIDE 0x3
+       if (((i & WB_STRIDE) != WB_STRIDE) &&
+           (first <= &tx_ring->tx_bi[i]) &&
+           (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+               tx_desc->cmd_type_offset_bsz =
+                       build_ctob(td_cmd, td_offset, size, td_tag) |
+                       cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+                                        I40E_TXD_QW1_CMD_SHIFT);
+       } else {
+               tx_desc->cmd_type_offset_bsz =
+                       build_ctob(td_cmd, td_offset, size, td_tag) |
+                       cpu_to_le64((u64)I40E_TXD_CMD <<
+                                        I40E_TXD_QW1_CMD_SHIFT);
+       }
 
        netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
                                                 tx_ring->queue_index),
@@ -1812,7 +2186,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
 
        /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
         *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
-        *       + 2 desc gap to keep tail from touching head,
+        *       + 4 desc gap to avoid the cache line where head is,
         *       + 1 desc for context descriptor,
         * otherwise try next time
         */
@@ -1823,7 +2197,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
        count += skb_shinfo(skb)->nr_frags;
 #endif
        count += TXD_USE_COUNT(skb_headlen(skb));
-       if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
                tx_ring->tx_stats.tx_busy++;
                return 0;
        }
index 181a825d3160dc377d4b909821a15959b3c49fe2..71a968fe557f33e12f6feb81d416f4f5a55946fb 100644 (file)
@@ -91,6 +91,7 @@ enum i40e_debug_mask {
        I40E_DEBUG_FLOW                 = 0x00000200,
        I40E_DEBUG_DCB                  = 0x00000400,
        I40E_DEBUG_DIAG                 = 0x00000800,
+       I40E_DEBUG_FD                   = 0x00001000,
 
        I40E_DEBUG_AQ_MESSAGE           = 0x01000000,
        I40E_DEBUG_AQ_DESCRIPTOR        = 0x02000000,
@@ -458,6 +459,10 @@ union i40e_32byte_rx_desc {
                        union {
                                __le32 rss; /* RSS Hash */
                                __le32 fcoe_param; /* FCoE DDP Context id */
+                               /* Flow director filter id in case of
+                                * Programming status desc WB
+                                */
+                               __le32 fd_id;
                        } hi_dword;
                } qword0;
                struct {
@@ -698,7 +703,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
 enum i40e_rx_prog_status_desc_error_bits {
        /* Note: These are predefined bit offsets */
        I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT      = 0,
-       I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT      = 1,
+       I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT      = 1,
        I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT    = 2,
        I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT    = 3
 };
@@ -1010,6 +1015,11 @@ struct i40e_hw_port_stats {
        u64 tx_size_big;                /* ptc9522 */
        u64 mac_short_packet_dropped;   /* mspdc */
        u64 checksum_error;             /* xec */
+       /* EEE LPI */
+       bool tx_lpi_status;
+       bool rx_lpi_status;
+       u64 tx_lpi_count;               /* etlpic */
+       u64 rx_lpi_count;               /* erlpic */
 };
 
 /* Checksum and Shadow RAM pointers */
index b9d1c1c8ca5a69f0d4d801f3cba0e4bc960ab46a..02c11a7f7d29e80c533b48894e5ee0b5619a9c91 100644 (file)
@@ -69,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
 {
        struct i40e_pf *pf = vf->pf;
 
-       return vector_id <= pf->hw.func_caps.num_msix_vectors_vf;
+       return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
 }
 
 /***********************vf resource mgmt routines*****************/
@@ -126,8 +126,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
        else
                reg_idx = I40E_VPINT_LNKLSTN(
-                                          (pf->hw.func_caps.num_msix_vectors_vf
-                                             * vf->vf_id) + (vector_id - 1));
+                    ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
+                    (vector_id - 1));
 
        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
                /* Special case - No queues mapped on this vector */
@@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
        tx_ctx.qlen = info->ring_len;
        tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
        tx_ctx.rdylist_act = 0;
+       tx_ctx.head_wb_ena = 1;
+       tx_ctx.head_wb_addr = info->dma_ring_addr +
+                             (info->ring_len * sizeof(struct i40e_tx_desc));
 
        /* clear the context in the HMC */
        ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -408,18 +411,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                                 "Could not allocate VF broadcast filter\n");
        }
 
-       if (!f) {
-               dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
-               ret = -ENOMEM;
-               goto error_alloc_vsi_res;
-       }
-
        /* program mac filter */
        ret = i40e_sync_vsi_filters(vsi);
-       if (ret) {
+       if (ret)
                dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
-               goto error_alloc_vsi_res;
-       }
 
 error_alloc_vsi_res:
        return ret;
@@ -514,7 +509,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
                vf->lan_vsi_index = 0;
                vf->lan_vsi_id = 0;
        }
-       msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1;
+       msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
        /* disable interrupts so the VF starts in a known state */
        for (i = 0; i < msix_vf; i++) {
                /* format is same for both registers */
@@ -679,9 +675,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
 complete_reset:
        /* reallocate vf resources to reset the VSI state */
        i40e_free_vf_res(vf);
-       mdelay(10);
        i40e_alloc_vf_res(vf);
        i40e_enable_vf_mappings(vf);
+       set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
 
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -847,7 +843,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
  *
  * allocate vf resources
  **/
-static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
 {
        struct i40e_vf *vfs;
        int i, ret = 0;
@@ -855,16 +851,18 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
        /* Disable interrupt 0 so we don't try to handle the VFLR. */
        i40e_irq_dynamic_disable_icr0(pf);
 
-       ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
-       if (ret) {
-               dev_err(&pf->pdev->dev,
-                       "pci_enable_sriov failed with error %d!\n", ret);
-               pf->num_alloc_vfs = 0;
-               goto err_iov;
+       /* Check to see if we're just allocating resources for extant VFs */
+       if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+               ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+               if (ret) {
+                       dev_err(&pf->pdev->dev,
+                               "Failed to enable SR-IOV, error %d.\n", ret);
+                       pf->num_alloc_vfs = 0;
+                       goto err_iov;
+               }
        }
-
        /* allocate memory */
-       vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+       vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
        if (!vfs) {
                ret = -ENOMEM;
                goto err_alloc;
@@ -1776,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
                           u32 v_retval, u8 *msg, u16 msglen)
 {
        struct i40e_hw *hw = &pf->hw;
-       int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+       unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
        struct i40e_vf *vf;
        int ret;
 
@@ -1873,7 +1871,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
                        /* clear the bit in GLGEN_VFLRSTAT */
                        wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
 
-                       i40e_reset_vf(vf, true);
+                       if (!test_bit(__I40E_DOWN, &pf->state))
+                               i40e_reset_vf(vf, true);
                }
        }
 
@@ -1924,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 void i40e_vc_notify_link_state(struct i40e_pf *pf)
 {
        struct i40e_virtchnl_pf_event pfe;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf = pf->vf;
+       struct i40e_link_status *ls = &pf->hw.phy.link_info;
+       int i;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
-       pfe.event_data.link_event.link_status =
-           pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
-       pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
-
-       i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
-                            (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+       for (i = 0; i < pf->num_alloc_vfs; i++) {
+               if (vf->link_forced) {
+                       pfe.event_data.link_event.link_status = vf->link_up;
+                       pfe.event_data.link_event.link_speed =
+                               (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
+               } else {
+                       pfe.event_data.link_event.link_status =
+                               ls->link_info & I40E_AQ_LINK_UP;
+                       pfe.event_data.link_event.link_speed = ls->link_speed;
+               }
+               i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+                                      0, (u8 *)&pfe, sizeof(pfe),
+                                      NULL);
+               vf++;
+       }
 }
 
 /**
@@ -2197,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
 error_param:
        return ret;
 }
+
+/**
+ * i40e_ndo_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @link: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_virtchnl_pf_event pfe;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_vf *vf;
+       int ret = 0;
+
+       /* validate the request */
+       if (vf_id >= pf->num_alloc_vfs) {
+               dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+               ret = -EINVAL;
+               goto error_out;
+       }
+
+       vf = &pf->vf[vf_id];
+
+       pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+       pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+
+       switch (link) {
+       case IFLA_VF_LINK_STATE_AUTO:
+               vf->link_forced = false;
+               pfe.event_data.link_event.link_status =
+                       pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+               pfe.event_data.link_event.link_speed =
+                       pf->hw.phy.link_info.link_speed;
+               break;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               vf->link_forced = true;
+               vf->link_up = true;
+               pfe.event_data.link_event.link_status = true;
+               pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+               break;
+       case IFLA_VF_LINK_STATE_DISABLE:
+               vf->link_forced = true;
+               vf->link_up = false;
+               pfe.event_data.link_event.link_status = false;
+               pfe.event_data.link_event.link_speed = 0;
+               break;
+       default:
+               ret = -EINVAL;
+               goto error_out;
+       }
+       /* Notify the VF of its new link state */
+       i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+                              0, (u8 *)&pfe, sizeof(pfe), NULL);
+
+error_out:
+       return ret;
+}
index cc1feee36e12b69e0ffaa300a4aa8509ba97bd4b..389c47f396d5261d708228f48d8e1bf8ed90e4c5 100644 (file)
@@ -98,10 +98,13 @@ struct i40e_vf {
 
        unsigned long vf_caps;  /* vf's adv. capabilities */
        unsigned long vf_states;        /* vf's runtime states */
+       bool link_forced;
+       bool link_up;           /* only valid if vf link is forced */
 };
 
 void i40e_free_vfs(struct i40e_pf *pf);
 int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
                           u32 v_retval, u8 *msg, u16 msglen);
 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
@@ -115,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
 int i40e_ndo_get_vf_config(struct net_device *netdev,
                           int vf_id, struct ifla_vf_info *ivi);
+int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+
 void i40e_vc_notify_link_state(struct i40e_pf *pf);
 void i40e_vc_notify_reset(struct i40e_pf *pf);
 
index f7cea1bca38d08303f20c5dca3d08470a2e3a03a..97662b6bd98a3e5badd0660bfcc9c932500ae124 100644 (file)
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                 2
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                  3
 
-       __le32 tenant_id ;
+       __le32 tenant_id;
        u8     reserved[4];
        __le16 queue_number;
 #define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT                  0
index 7b13953b28c47a34af943a684b376dcb1d3ebf15..c688a0fc5c2965a2ddf42d935b1fabd6902ace23 100644 (file)
@@ -160,6 +160,372 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
 }
 
 
+/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40evf_ptype_lookup[ptype].known
+ * THEN
+ *      Packet is unknown
+ * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ *      Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ *      Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+       {       PTYPE, \
+               1, \
+               I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+               I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+               I40E_RX_PTYPE_##OUTER_FRAG, \
+               I40E_RX_PTYPE_TUNNEL_##T, \
+               I40E_RX_PTYPE_TUNNEL_END_##TE, \
+               I40E_RX_PTYPE_##TEF, \
+               I40E_RX_PTYPE_INNER_PROT_##I, \
+               I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+               { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF              I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG              I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS    I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
+       /* L2 Packet types */
+       I40E_PTT_UNUSED_ENTRY(0),
+       I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
+       I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT_UNUSED_ENTRY(4),
+       I40E_PTT_UNUSED_ENTRY(5),
+       I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT_UNUSED_ENTRY(8),
+       I40E_PTT_UNUSED_ENTRY(9),
+       I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+       I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+       I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+       /* Non Tunneled IPv4 */
+       I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(25),
+       I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
+       I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+       I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+       /* IPv4 --> IPv4 */
+       I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(32),
+       I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> IPv6 */
+       I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(39),
+       I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT */
+       I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 --> GRE/NAT --> IPv4 */
+       I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(47),
+       I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> IPv6 */
+       I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(54),
+       I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> MAC */
+       I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+       I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(62),
+       I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+       I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(69),
+       I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv4 --> GRE/NAT --> MAC/VLAN */
+       I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+       /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+       I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(77),
+       I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+       I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(84),
+       I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+       /* Non Tunneled IPv6 */
+       I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+       I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+       I40E_PTT_UNUSED_ENTRY(91),
+       I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+       I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+       I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+       /* IPv6 --> IPv4 */
+       I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(98),
+       I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> IPv6 */
+       I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(105),
+       I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT */
+       I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> IPv4 */
+       I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(113),
+       I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> IPv6 */
+       I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(120),
+       I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC */
+       I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+       I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(128),
+       I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+       I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(135),
+       I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN */
+       I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+       I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+       I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+       I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(143),
+       I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+       I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+       I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+       /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+       I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+       I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+       I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+       I40E_PTT_UNUSED_ENTRY(150),
+       I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+       I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+       I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+       /* unused entries */
+       I40E_PTT_UNUSED_ENTRY(154),
+       I40E_PTT_UNUSED_ENTRY(155),
+       I40E_PTT_UNUSED_ENTRY(156),
+       I40E_PTT_UNUSED_ENTRY(157),
+       I40E_PTT_UNUSED_ENTRY(158),
+       I40E_PTT_UNUSED_ENTRY(159),
+
+       I40E_PTT_UNUSED_ENTRY(160),
+       I40E_PTT_UNUSED_ENTRY(161),
+       I40E_PTT_UNUSED_ENTRY(162),
+       I40E_PTT_UNUSED_ENTRY(163),
+       I40E_PTT_UNUSED_ENTRY(164),
+       I40E_PTT_UNUSED_ENTRY(165),
+       I40E_PTT_UNUSED_ENTRY(166),
+       I40E_PTT_UNUSED_ENTRY(167),
+       I40E_PTT_UNUSED_ENTRY(168),
+       I40E_PTT_UNUSED_ENTRY(169),
+
+       I40E_PTT_UNUSED_ENTRY(170),
+       I40E_PTT_UNUSED_ENTRY(171),
+       I40E_PTT_UNUSED_ENTRY(172),
+       I40E_PTT_UNUSED_ENTRY(173),
+       I40E_PTT_UNUSED_ENTRY(174),
+       I40E_PTT_UNUSED_ENTRY(175),
+       I40E_PTT_UNUSED_ENTRY(176),
+       I40E_PTT_UNUSED_ENTRY(177),
+       I40E_PTT_UNUSED_ENTRY(178),
+       I40E_PTT_UNUSED_ENTRY(179),
+
+       I40E_PTT_UNUSED_ENTRY(180),
+       I40E_PTT_UNUSED_ENTRY(181),
+       I40E_PTT_UNUSED_ENTRY(182),
+       I40E_PTT_UNUSED_ENTRY(183),
+       I40E_PTT_UNUSED_ENTRY(184),
+       I40E_PTT_UNUSED_ENTRY(185),
+       I40E_PTT_UNUSED_ENTRY(186),
+       I40E_PTT_UNUSED_ENTRY(187),
+       I40E_PTT_UNUSED_ENTRY(188),
+       I40E_PTT_UNUSED_ENTRY(189),
+
+       I40E_PTT_UNUSED_ENTRY(190),
+       I40E_PTT_UNUSED_ENTRY(191),
+       I40E_PTT_UNUSED_ENTRY(192),
+       I40E_PTT_UNUSED_ENTRY(193),
+       I40E_PTT_UNUSED_ENTRY(194),
+       I40E_PTT_UNUSED_ENTRY(195),
+       I40E_PTT_UNUSED_ENTRY(196),
+       I40E_PTT_UNUSED_ENTRY(197),
+       I40E_PTT_UNUSED_ENTRY(198),
+       I40E_PTT_UNUSED_ENTRY(199),
+
+       I40E_PTT_UNUSED_ENTRY(200),
+       I40E_PTT_UNUSED_ENTRY(201),
+       I40E_PTT_UNUSED_ENTRY(202),
+       I40E_PTT_UNUSED_ENTRY(203),
+       I40E_PTT_UNUSED_ENTRY(204),
+       I40E_PTT_UNUSED_ENTRY(205),
+       I40E_PTT_UNUSED_ENTRY(206),
+       I40E_PTT_UNUSED_ENTRY(207),
+       I40E_PTT_UNUSED_ENTRY(208),
+       I40E_PTT_UNUSED_ENTRY(209),
+
+       I40E_PTT_UNUSED_ENTRY(210),
+       I40E_PTT_UNUSED_ENTRY(211),
+       I40E_PTT_UNUSED_ENTRY(212),
+       I40E_PTT_UNUSED_ENTRY(213),
+       I40E_PTT_UNUSED_ENTRY(214),
+       I40E_PTT_UNUSED_ENTRY(215),
+       I40E_PTT_UNUSED_ENTRY(216),
+       I40E_PTT_UNUSED_ENTRY(217),
+       I40E_PTT_UNUSED_ENTRY(218),
+       I40E_PTT_UNUSED_ENTRY(219),
+
+       I40E_PTT_UNUSED_ENTRY(220),
+       I40E_PTT_UNUSED_ENTRY(221),
+       I40E_PTT_UNUSED_ENTRY(222),
+       I40E_PTT_UNUSED_ENTRY(223),
+       I40E_PTT_UNUSED_ENTRY(224),
+       I40E_PTT_UNUSED_ENTRY(225),
+       I40E_PTT_UNUSED_ENTRY(226),
+       I40E_PTT_UNUSED_ENTRY(227),
+       I40E_PTT_UNUSED_ENTRY(228),
+       I40E_PTT_UNUSED_ENTRY(229),
+
+       I40E_PTT_UNUSED_ENTRY(230),
+       I40E_PTT_UNUSED_ENTRY(231),
+       I40E_PTT_UNUSED_ENTRY(232),
+       I40E_PTT_UNUSED_ENTRY(233),
+       I40E_PTT_UNUSED_ENTRY(234),
+       I40E_PTT_UNUSED_ENTRY(235),
+       I40E_PTT_UNUSED_ENTRY(236),
+       I40E_PTT_UNUSED_ENTRY(237),
+       I40E_PTT_UNUSED_ENTRY(238),
+       I40E_PTT_UNUSED_ENTRY(239),
+
+       I40E_PTT_UNUSED_ENTRY(240),
+       I40E_PTT_UNUSED_ENTRY(241),
+       I40E_PTT_UNUSED_ENTRY(242),
+       I40E_PTT_UNUSED_ENTRY(243),
+       I40E_PTT_UNUSED_ENTRY(244),
+       I40E_PTT_UNUSED_ENTRY(245),
+       I40E_PTT_UNUSED_ENTRY(246),
+       I40E_PTT_UNUSED_ENTRY(247),
+       I40E_PTT_UNUSED_ENTRY(248),
+       I40E_PTT_UNUSED_ENTRY(249),
+
+       I40E_PTT_UNUSED_ENTRY(250),
+       I40E_PTT_UNUSED_ENTRY(251),
+       I40E_PTT_UNUSED_ENTRY(252),
+       I40E_PTT_UNUSED_ENTRY(253),
+       I40E_PTT_UNUSED_ENTRY(254),
+       I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
 /**
  * i40e_aq_send_msg_to_pf
  * @hw: pointer to the hardware structure
index 7841573a58c943304af1aa671699af1756fb61ea..97ab8c2b76f8f0e6cf1c6c485eaeafee0e42a5a1 100644 (file)
@@ -63,6 +63,13 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
+extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
+
+static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+       return i40evf_ptype_lookup[ptype];
+}
+
 /* prototype for functions used for SW locks */
 
 /* i40e_common for VF drivers*/
index ffdb01d853dbdf3500524f335d352cb932b4af85..53be5f44d0158eca869fe0a9a818d3fa597bcd53 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -24,6 +24,7 @@
 #include <linux/prefetch.h>
 
 #include "i40evf.h"
+#include "i40e_prototype.h"
 
 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
                                u32 td_tag)
@@ -168,6 +169,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
        return ret;
 }
 
+/**
+ * i40e_get_head - Retrieve head from head writeback
+ * @tx_ring:  tx ring to fetch head of
+ *
+ * Returns value of Tx ring head based on value stored
+ * in head write-back location
+ **/
+static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
+{
+       void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
+
+       return le32_to_cpu(*(volatile __le32 *)head);
+}
+
 /**
  * i40e_clean_tx_irq - Reclaim resources after transmit completes
  * @tx_ring:  tx ring to clean
@@ -179,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 {
        u16 i = tx_ring->next_to_clean;
        struct i40e_tx_buffer *tx_buf;
+       struct i40e_tx_desc *tx_head;
        struct i40e_tx_desc *tx_desc;
        unsigned int total_packets = 0;
        unsigned int total_bytes = 0;
@@ -187,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_desc = I40E_TX_DESC(tx_ring, i);
        i -= tx_ring->count;
 
+       tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
+
        do {
                struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 
@@ -197,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                /* prevent any other reads prior to eop_desc */
                read_barrier_depends();
 
-               /* if the descriptor isn't done, no work yet to do */
-               if (!(eop_desc->cmd_type_offset_bsz &
-                     cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+               /* we have caught up to head, no work left to do */
+               if (tx_head == tx_desc)
                        break;
 
                /* clear next_to_watch to prevent false hangs */
@@ -431,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
 
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+       /* add u32 for head writeback, align after this takes care of
+        * guaranteeing this is at least one cache line in size
+        */
+       tx_ring->size += sizeof(u32);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
        tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
                                           &tx_ring->dma, GFP_KERNEL);
@@ -722,7 +743,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
              rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
                return;
 
-       /* likely incorrect csum if alternate IP extention headers found */
+       /* likely incorrect csum if alternate IP extension headers found */
        if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                return;
 
@@ -785,6 +806,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
                return 0;
 }
 
+/**
+ * i40e_ptype_to_hash - get a hash type
+ * @ptype: the ptype value from the descriptor
+ *
+ * Returns a hash type to be used by skb_set_hash
+ **/
+static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
+{
+       struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
+
+       if (!decoded.known)
+               return PKT_HASH_TYPE_NONE;
+
+       if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+           decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
+               return PKT_HASH_TYPE_L4;
+       else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+                decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
+               return PKT_HASH_TYPE_L3;
+       else
+               return PKT_HASH_TYPE_L2;
+}
+
 /**
  * i40e_clean_rx_irq - Reclaim resources after receive completes
  * @rx_ring:  rx ring to clean
@@ -802,13 +846,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        u16 i = rx_ring->next_to_clean;
        union i40e_rx_desc *rx_desc;
        u32 rx_error, rx_status;
+       u8 rx_ptype;
        u64 qword;
-       u16 rx_ptype;
 
        rx_desc = I40E_RX_DESC(rx_ring, i);
        qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-       rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
-                               >> I40E_RXD_QW1_STATUS_SHIFT;
+       rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+                   I40E_RXD_QW1_STATUS_SHIFT;
 
        while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
                union i40e_rx_desc *next_rxd;
@@ -912,7 +956,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        goto next_desc;
                }
 
-               skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+               skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
+                            i40e_ptype_to_hash(rx_ptype));
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
                total_rx_packets++;
@@ -1241,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        struct i40e_tx_context_desc *context_desc;
        int i = tx_ring->next_to_use;
 
-       if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+       if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
+           !cd_tunneling && !cd_l2tag2)
                return;
 
        /* grab the next descriptor */
@@ -1352,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                tx_bi = &tx_ring->tx_bi[i];
        }
 
-       tx_desc->cmd_type_offset_bsz =
-               build_ctob(td_cmd, td_offset, size, td_tag) |
-               cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+       /* Place RS bit on last descriptor of any packet that spans across the
+        * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
+        */
+#define WB_STRIDE 0x3
+       if (((i & WB_STRIDE) != WB_STRIDE) &&
+           (first <= &tx_ring->tx_bi[i]) &&
+           (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
+               tx_desc->cmd_type_offset_bsz =
+                       build_ctob(td_cmd, td_offset, size, td_tag) |
+                       cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
+                                        I40E_TXD_QW1_CMD_SHIFT);
+       } else {
+               tx_desc->cmd_type_offset_bsz =
+                       build_ctob(td_cmd, td_offset, size, td_tag) |
+                       cpu_to_le64((u64)I40E_TXD_CMD <<
+                                        I40E_TXD_QW1_CMD_SHIFT);
+       }
 
        netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
                                                 tx_ring->queue_index),
@@ -1457,7 +1517,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
 
        /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
         *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
-        *       + 2 desc gap to keep tail from touching head,
+        *       + 4 desc gap to avoid the cache line where head is,
         *       + 1 desc for context descriptor,
         * otherwise try next time
         */
@@ -1468,7 +1528,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
        count += skb_shinfo(skb)->nr_frags;
 #endif
        count += TXD_USE_COUNT(skb_headlen(skb));
-       if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
                tx_ring->tx_stats.tx_busy++;
                return 0;
        }
index 3bffac06592f58557ac9028cf60fc12614c74a03..4673b3381eddaa1672edca1f60b85e89d33ab247 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
 struct i40e_hw;
 typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
 
-#define ETH_ALEN       6
-
 /* Data type manipulation macros. */
 
 #define I40E_DESC_UNUSED(R)    \
@@ -90,6 +88,7 @@ enum i40e_debug_mask {
        I40E_DEBUG_FLOW                 = 0x00000200,
        I40E_DEBUG_DCB                  = 0x00000400,
        I40E_DEBUG_DIAG                 = 0x00000800,
+       I40E_DEBUG_FD                   = 0x00001000,
 
        I40E_DEBUG_AQ_MESSAGE           = 0x01000000,
        I40E_DEBUG_AQ_DESCRIPTOR        = 0x02000000,
@@ -466,6 +465,10 @@ union i40e_32byte_rx_desc {
                        union {
                                __le32 rss; /* RSS Hash */
                                __le32 fcoe_param; /* FCoE DDP Context id */
+                               /* Flow director filter id in case of
+                                * Programming status desc WB
+                                */
+                               __le32 fd_id;
                        } hi_dword;
                } qword0;
                struct {
@@ -706,7 +709,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
 enum i40e_rx_prog_status_desc_error_bits {
        /* Note: These are predefined bit offsets */
        I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT      = 0,
-       I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT      = 1,
+       I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT      = 1,
        I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT    = 2,
        I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT    = 3
 };
@@ -1018,6 +1021,11 @@ struct i40e_hw_port_stats {
        u64 tx_size_big;                /* ptc9522 */
        u64 mac_short_packet_dropped;   /* mspdc */
        u64 checksum_error;             /* xec */
+       /* EEE LPI */
+       bool tx_lpi_status;
+       bool rx_lpi_status;
+       u64 tx_lpi_count;               /* etlpic */
+       u64 rx_lpi_count;               /* erlpic */
 };
 
 /* Checksum and Shadow RAM pointers */
index ff6529b288a137190e35fe3d86ea17ad1929db51..807807d6238738c0e111739e9dc96d1f2200d77a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
 #include <linux/ipv6.h>
 #include <net/ip6_checksum.h>
 #include <net/udp.h>
-#include <linux/sctp.h>
-
 
 #include "i40e_type.h"
 #include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
 /* Driver state. The order of these is important! */
 enum i40evf_state_t {
        __I40EVF_STARTUP,               /* driver loaded, probe complete */
-       __I40EVF_FAILED,                /* PF communication failed. Fatal. */
        __I40EVF_REMOVE,                /* driver is being unloaded */
        __I40EVF_INIT_VERSION_CHECK,    /* aq msg sent, awaiting reply */
        __I40EVF_INIT_GET_RESOURCES,    /* aq msg sent, awaiting reply */
        __I40EVF_INIT_SW,               /* got resources, setting up structs */
+       __I40EVF_RESETTING,             /* in reset */
        /* Below here, watchdog is running */
        __I40EVF_DOWN,                  /* ready, can be opened */
        __I40EVF_TESTING,               /* in ethtool self-test */
-       __I40EVF_RESETTING,             /* in reset */
        __I40EVF_RUNNING,               /* opened, working */
 };
 
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
 /* board specific private data structure */
 struct i40evf_adapter {
        struct timer_list watchdog_timer;
-       struct vlan_group *vlgrp;
        struct work_struct reset_task;
        struct work_struct adminq_task;
        struct delayed_work init_task;
        struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
        struct list_head vlan_filter_list;
-       char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
-
-       /* Interrupt Throttle Rate */
-       u32 itr_setting;
-       u16 eitr_low;
-       u16 eitr_high;
+       char misc_vector_name[IFNAMSIZ + 9];
 
        /* TX */
        struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
-       u64 restart_queue;
-       u64 hw_csum_tx_good;
-       u64 lsc_int;
-       u64 hw_tso_ctxt;
-       u64 hw_tso6_ctxt;
        u32 tx_timeout_count;
        struct list_head mac_filter_list;
-#ifdef DEBUG
-       bool detect_tx_hung;
-#endif /* DEBUG */
 
        /* RX */
        struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
-       int txd_count;
-       int rxd_count;
        u64 hw_csum_rx_error;
-       u64 hw_rx_no_dma_resources;
-       u64 hw_csum_rx_good;
-       u64 non_eop_descs;
        int num_msix_vectors;
        struct msix_entry *msix_entries;
 
-       u64 rx_hdr_split;
-
-       u32 init_state;
-       volatile unsigned long flags;
+       u32 flags;
 #define I40EVF_FLAG_RX_CSUM_ENABLED              (u32)(1)
 #define I40EVF_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
 #define I40EVF_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
@@ -234,6 +209,9 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
 #define I40EVF_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
 #define I40EVF_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
+#define I40EVF_FLAG_PF_COMMS_FAILED              (u32)(1 << 8)
+#define I40EVF_FLAG_RESET_PENDING                (u32)(1 << 9)
+#define I40EVF_FLAG_RESET_NEEDED                 (u32)(1 << 10)
 /* duplcates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED              0
 #define I40E_FLAG_DCB_ENABLED                   0
@@ -251,21 +229,19 @@ struct i40evf_adapter {
 #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES                (u32)(1 << 6)
 #define I40EVF_FLAG_AQ_MAP_VECTORS             (u32)(1 << 7)
 #define I40EVF_FLAG_AQ_HANDLE_RESET            (u32)(1 << 8)
+
        /* OS defined structs */
        struct net_device *netdev;
        struct pci_dev *pdev;
        struct net_device_stats net_stats;
 
-       /* structs defined in i40e_vf.h */
-       struct i40e_hw hw;
+       struct i40e_hw hw; /* defined in i40e_type.h */
 
        enum i40evf_state_t state;
        volatile unsigned long crit_section;
-       u64 tx_busy;
 
        struct work_struct watchdog_task;
        bool netdev_registered;
-       bool dev_closed;
        bool link_up;
        enum i40e_virtchnl_ops current_op;
        struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +252,6 @@ struct i40evf_adapter {
        u32 aq_wait_count;
 };
 
-struct i40evf_info {
-       enum i40e_mac_type      mac;
-       unsigned int            flags;
-};
-
 
 /* needed by i40evf_ethtool.c */
 extern char i40evf_driver_name[];
@@ -315,6 +286,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
 void i40evf_del_vlans(struct i40evf_adapter *adapter);
 void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
 void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_request_reset(struct i40evf_adapter *adapter);
 void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                                enum i40e_virtchnl_ops v_opcode,
                                i40e_status v_retval, u8 *msg, u16 msglen);
index b0b1f4bf5ac08afb7c6cf62ac76419f36cfbd6c1..8b0db1ce179c5447ce83240098e6c076d6782ed6 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
        u32 new_rx_count, new_tx_count;
+       int i;
 
        if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
                return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
        new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
 
        /* if nothing to do return success */
-       if ((new_tx_count == adapter->txd_count) &&
-           (new_rx_count == adapter->rxd_count))
+       if ((new_tx_count == adapter->tx_rings[0]->count) &&
+           (new_rx_count == adapter->rx_rings[0]->count))
                return 0;
 
-       adapter->txd_count = new_tx_count;
-       adapter->rxd_count = new_rx_count;
+       for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+               adapter->tx_rings[0]->count = new_tx_count;
+               adapter->rx_rings[0]->count = new_rx_count;
+       }
 
        if (netif_running(netdev))
                i40evf_reinit_locked(adapter);
index f5caf441924370145b0fb3a5e409033893c40aff..51c84c19d2bee9d834615e967c1dc92ec082ee75 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710 X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "0.9.11"
+#define DRV_VERSION "0.9.16"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
-       "Copyright (c) 2013 Intel Corporation.";
+       "Copyright (c) 2013 - 2014 Intel Corporation.";
 
 /* i40evf_pci_tbl - PCI Device ID Table
  *
@@ -167,9 +167,11 @@ static void i40evf_tx_timeout(struct net_device *netdev)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
        adapter->tx_timeout_count++;
-
-       /* Do the reset outside of interrupt context */
-       schedule_work(&adapter->reset_task);
+       dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
+       if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+               adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
+               schedule_work(&adapter->reset_task);
+       }
 }
 
 /**
@@ -211,6 +213,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
        int i;
        struct i40e_hw *hw = &adapter->hw;
 
+       if (!adapter->msix_entries)
+               return;
+
        for (i = 1; i < adapter->num_msix_vectors; i++) {
                wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
                synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +516,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        int err;
 
-       sprintf(adapter->name[0], "i40evf:mbx");
+       sprintf(adapter->misc_vector_name, "i40evf:mbx");
        err = request_irq(adapter->msix_entries[0].vector,
-                         &i40evf_msix_aq, 0, adapter->name[0], netdev);
+                         &i40evf_msix_aq, 0,
+                         adapter->misc_vector_name, netdev);
        if (err) {
                dev_err(&adapter->pdev->dev,
-                       "request_irq for msix_aq failed: %d\n", err);
+                       "request_irq for %s failed: %d\n",
+                       adapter->misc_vector_name, err);
                free_irq(adapter->msix_entries[0].vector, netdev);
        }
        return err;
@@ -963,16 +970,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        struct i40evf_mac_filter *f;
 
-       /* remove all MAC filters from the VSI */
+       /* remove all MAC filters */
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
                f->remove = true;
        }
-       adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-       /* disable receives */
-       adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
-       mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
-       msleep(20);
-
+       /* remove all VLAN filters */
+       list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+               f->remove = true;
+       }
+       if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
+           adapter->state != __I40EVF_RESETTING) {
+               adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+               adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+               /* disable receives */
+               adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+               mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+               msleep(20);
+       }
        netif_tx_disable(netdev);
 
        netif_tx_stop_all_queues(netdev);
@@ -1124,8 +1138,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
         * than CPU's.  So let's be conservative and only ask for
         * (roughly) twice the number of vectors as there are CPU's.
         */
-       v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
-       v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1);
+       v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
+       v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
 
        /* A failure in MSI-X entry allocation isn't fatal, but it does
         * mean we disable MSI-X capabilities of the adapter.
@@ -1291,19 +1305,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
                                          watchdog_task);
        struct i40e_hw *hw = &adapter->hw;
 
-       if (adapter->state < __I40EVF_DOWN)
+       if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+               goto restart_watchdog;
+
+       if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+               dev_info(&adapter->pdev->dev, "Checking for redemption\n");
+               if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
+                       /* A chance for redemption! */
+                       dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
+                       adapter->state = __I40EVF_STARTUP;
+                       adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+                       schedule_delayed_work(&adapter->init_task, 10);
+                       clear_bit(__I40EVF_IN_CRITICAL_TASK,
+                                 &adapter->crit_section);
+                       /* Don't reschedule the watchdog, since we've restarted
+                        * the init task. When init_task contacts the PF and
+                        * gets everything set up again, it'll restart the
+                        * watchdog for us. Down, boy. Sit. Stay. Woof.
+                        */
+                       return;
+               }
+               adapter->aq_pending = 0;
+               adapter->aq_required = 0;
+               adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
                goto watchdog_done;
+       }
 
-       if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+       if ((adapter->state < __I40EVF_DOWN) ||
+           (adapter->flags & I40EVF_FLAG_RESET_PENDING))
                goto watchdog_done;
 
-       /* check for unannounced reset */
-       if ((adapter->state != __I40EVF_RESETTING) &&
+       /* check for reset */
+       if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
            (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
                adapter->state = __I40EVF_RESETTING;
+               adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+               dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
+               dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
                schedule_work(&adapter->reset_task);
-               dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
-                        __func__);
+               adapter->aq_pending = 0;
+               adapter->aq_required = 0;
+               adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
                goto watchdog_done;
        }
 
@@ -1358,16 +1400,25 @@ static void i40evf_watchdog_task(struct work_struct *work)
 
        i40evf_irq_enable(adapter, true);
        i40evf_fire_sw_int(adapter, 0xFF);
+
 watchdog_done:
+       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+restart_watchdog:
        if (adapter->aq_required)
                mod_timer(&adapter->watchdog_timer,
                          jiffies + msecs_to_jiffies(20));
        else
                mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
-       clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
        schedule_work(&adapter->adminq_task);
 }
 
+static int next_queue(struct i40evf_adapter *adapter, int j)
+{
+       j += 1;
+
+       return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
+}
+
 /**
  * i40evf_configure_rss - Prepare for RSS if used
  * @adapter: board private structure
@@ -1398,19 +1449,19 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
        wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
 
        /* Populate the LUT with max no. of queues in round robin fashion */
-       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
-               if (j == adapter->vsi_res->num_queue_pairs)
-                       j = 0;
-               /* lut = 4-byte sliding window of 4 lut entries */
-               lut = (lut << 8) | (j &
-                        ((0x1 << 8) - 1));
-               /* On i = 3, we have 4 entries in lut; write to the register */
-               if ((i & 3) == 3)
-                       wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
+       j = adapter->vsi_res->num_queue_pairs;
+       for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+               lut = next_queue(adapter, j);
+               lut |= next_queue(adapter, j) << 8;
+               lut |= next_queue(adapter, j) << 16;
+               lut |= next_queue(adapter, j) << 24;
+               wr32(hw, I40E_VFQF_HLUT(i), lut);
        }
        i40e_flush(hw);
 }
 
+#define I40EVF_RESET_WAIT_MS 100
+#define I40EVF_RESET_WAIT_COUNT 200
 /**
  * i40evf_reset_task - Call-back task to handle hardware reset
  * @work: pointer to work_struct
@@ -1421,8 +1472,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
  **/
 static void i40evf_reset_task(struct work_struct *work)
 {
-       struct i40evf_adapter *adapter =
-                       container_of(work, struct i40evf_adapter, reset_task);
+       struct i40evf_adapter *adapter = container_of(work,
+                                                     struct i40evf_adapter,
+                                                     reset_task);
        struct i40e_hw *hw = &adapter->hw;
        int i = 0, err;
        uint32_t rstat_val;
@@ -1431,21 +1483,61 @@ static void i40evf_reset_task(struct work_struct *work)
                                &adapter->crit_section))
                udelay(500);
 
-       /* wait until the reset is complete */
-       for (i = 0; i < 20; i++) {
+       if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) {
+               dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+               i40evf_request_reset(adapter);
+       }
+
+       /* poll until we see the reset actually happen */
+       for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
                rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
                            I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-               if (rstat_val == I40E_VFR_COMPLETED)
+               if (rstat_val != I40E_VFR_VFACTIVE) {
+                       dev_info(&adapter->pdev->dev, "Reset now occurring\n");
                        break;
-               else
-                       mdelay(100);
+               } else {
+                       msleep(I40EVF_RESET_WAIT_MS);
+               }
+       }
+       if (i == I40EVF_RESET_WAIT_COUNT) {
+               dev_err(&adapter->pdev->dev, "Reset was not detected\n");
+               adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+               goto continue_reset; /* act like the reset happened */
+       }
+
+       /* wait until the reset is complete and the PF is responding to us */
+       for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+               rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+               if (rstat_val == I40E_VFR_VFACTIVE) {
+                       dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
+                       break;
+               } else {
+                       msleep(I40EVF_RESET_WAIT_MS);
+               }
        }
-       if (i == 20) {
+       if (i == I40EVF_RESET_WAIT_COUNT) {
                /* reset never finished */
-               dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
-                       __func__, rstat_val);
-               /* carry on anyway */
+               dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+                       rstat_val);
+               adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+               if (netif_running(adapter->netdev))
+                       i40evf_close(adapter->netdev);
+
+               i40evf_free_misc_irq(adapter);
+               i40evf_reset_interrupt_capability(adapter);
+               i40evf_free_queues(adapter);
+               kfree(adapter->vf_res);
+               i40evf_shutdown_adminq(hw);
+               adapter->netdev->flags &= ~IFF_UP;
+               clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+               return; /* Do not attempt to reinit. It's dead, Jim. */
        }
+
+continue_reset:
+       adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
        i40evf_down(adapter);
        adapter->state = __I40EVF_RESETTING;
 
@@ -1505,6 +1597,9 @@ static void i40evf_adminq_task(struct work_struct *work)
        i40e_status ret;
        u16 pending;
 
+       if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+               return;
+
        event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
        event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
        if (!event.msg_buf) {
@@ -1636,6 +1731,10 @@ static int i40evf_open(struct net_device *netdev)
        struct i40evf_adapter *adapter = netdev_priv(netdev);
        int err;
 
+       if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+               dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
+               return -EIO;
+       }
        if (adapter->state != __I40EVF_DOWN)
                return -EBUSY;
 
@@ -1690,8 +1789,12 @@ static int i40evf_close(struct net_device *netdev)
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
+       if (adapter->state <= __I40EVF_DOWN)
+               return 0;
+
        /* signal that we are down to the interrupt handler */
        adapter->state = __I40EVF_DOWN;
+
        set_bit(__I40E_DOWN, &adapter->vsi.state);
 
        i40evf_down(adapter);
@@ -1842,16 +1945,18 @@ static void i40evf_init_task(struct work_struct *work)
        switch (adapter->state) {
        case __I40EVF_STARTUP:
                /* driver loaded, probe complete */
+               adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+               adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
                err = i40e_set_mac_type(hw);
                if (err) {
-                       dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
-                               __func__, err);
+                       dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
+                               err);
                goto err;
                }
                err = i40evf_check_reset_complete(hw);
                if (err) {
-                       dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
-                               __func__, err);
+                       dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+                               err);
                        goto err;
                }
                hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1966,13 @@ static void i40evf_init_task(struct work_struct *work)
 
                err = i40evf_init_adminq(hw);
                if (err) {
-                       dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
-                               __func__, err);
+                       dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+                               err);
                        goto err;
                }
                err = i40evf_send_api_ver(adapter);
                if (err) {
-                       dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
-                               __func__, err);
+                       dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
                        i40evf_shutdown_adminq(hw);
                        goto err;
                }
@@ -1876,19 +1980,21 @@ static void i40evf_init_task(struct work_struct *work)
                goto restart;
                break;
        case __I40EVF_INIT_VERSION_CHECK:
-               if (!i40evf_asq_done(hw))
+               if (!i40evf_asq_done(hw)) {
+                       dev_err(&pdev->dev, "Admin queue command never completed.\n");
                        goto err;
+               }
 
                /* aq msg sent, awaiting reply */
                err = i40evf_verify_api_ver(adapter);
                if (err) {
-                       dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+                       dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
                                err);
                        goto err;
                }
                err = i40evf_send_vf_config_msg(adapter);
                if (err) {
-                       dev_err(&pdev->dev, "Unable send config request, error %d\n",
+                       dev_err(&pdev->dev, "Unable send config request (%d)\n",
                                err);
                        goto err;
                }
@@ -1902,18 +2008,15 @@ static void i40evf_init_task(struct work_struct *work)
                                (I40E_MAX_VF_VSI *
                                 sizeof(struct i40e_virtchnl_vsi_resource));
                        adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
-                       if (!adapter->vf_res) {
-                               dev_err(&pdev->dev, "%s: unable to allocate memory\n",
-                                       __func__);
+                       if (!adapter->vf_res)
                                goto err;
-                       }
                }
                err = i40evf_get_vf_config(adapter);
                if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                        goto restart;
                if (err) {
-                       dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
-                               __func__, err);
+                       dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
+                               err);
                        goto err_alloc;
                }
                adapter->state = __I40EVF_INIT_SW;
@@ -1927,25 +2030,23 @@ static void i40evf_init_task(struct work_struct *work)
                        adapter->vsi_res = &adapter->vf_res->vsi_res[i];
        }
        if (!adapter->vsi_res) {
-               dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+               dev_err(&pdev->dev, "No LAN VSI found\n");
                goto err_alloc;
        }
 
        adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
 
-       adapter->txd_count = I40EVF_DEFAULT_TXD;
-       adapter->rxd_count = I40EVF_DEFAULT_RXD;
-
        netdev->netdev_ops = &i40evf_netdev_ops;
        i40evf_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
-
-       netdev->features |= NETIF_F_SG |
+       netdev->features |= NETIF_F_HIGHDMA |
+                           NETIF_F_SG |
                            NETIF_F_IP_CSUM |
                            NETIF_F_SCTP_CSUM |
                            NETIF_F_IPV6_CSUM |
                            NETIF_F_TSO |
                            NETIF_F_TSO6 |
+                           NETIF_F_RXCSUM |
                            NETIF_F_GRO;
 
        if (adapter->vf_res->vf_offload_flags
@@ -1956,11 +2057,13 @@ static void i40evf_init_task(struct work_struct *work)
                                    NETIF_F_HW_VLAN_CTAG_FILTER;
        }
 
-       /* The HW MAC address was set and/or determined in sw_init */
+       /* copy netdev features into list of user selectable features */
+       netdev->hw_features |= netdev->features;
+       netdev->hw_features &= ~NETIF_F_RXCSUM;
+
        if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
-               dev_info(&pdev->dev,
-                       "Invalid MAC address %pMAC, using random\n",
-                       adapter->hw.mac.addr);
+               dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+                        adapter->hw.mac.addr);
                random_ether_addr(adapter->hw.mac.addr);
        }
        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2097,6 @@ static void i40evf_init_task(struct work_struct *work)
 
        netif_carrier_off(netdev);
 
-       strcpy(netdev->name, "eth%d");
-
        adapter->vsi.id = adapter->vsi_res->vsi_id;
        adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
        adapter->vsi.back = adapter;
@@ -2005,9 +2106,11 @@ static void i40evf_init_task(struct work_struct *work)
        adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
        adapter->vsi.netdev = adapter->netdev;
 
-       err = register_netdev(netdev);
-       if (err)
-               goto err_register;
+       if (!adapter->netdev_registered) {
+               err = register_netdev(netdev);
+               if (err)
+                       goto err_register;
+       }
 
        adapter->netdev_registered = true;
 
@@ -2031,7 +2134,6 @@ err_register:
        i40evf_free_misc_irq(adapter);
 err_sw_init:
        i40evf_reset_interrupt_capability(adapter);
-       adapter->state = __I40EVF_FAILED;
 err_alloc:
        kfree(adapter->vf_res);
        adapter->vf_res = NULL;
@@ -2039,9 +2141,7 @@ err:
        /* Things went into the weeds, so try again later */
        if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
                dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
-               if (hw->aq.asq.count)
-                       i40evf_shutdown_adminq(hw); /* ignore error */
-               adapter->state = __I40EVF_FAILED;
+               adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
                return; /* do not reschedule */
        }
        schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,25 +2184,18 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct net_device *netdev;
        struct i40evf_adapter *adapter = NULL;
        struct i40e_hw *hw = NULL;
-       int err, pci_using_dac;
+       int err;
 
        err = pci_enable_device(pdev);
        if (err)
                return err;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               pci_using_dac = true;
-               /* coherent mask for the same size will always succeed if
-                * dma_set_mask does
-                */
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
-               pci_using_dac = false;
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-       } else {
-               dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
-                        __func__, err);
-               err = -EIO;
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (err)
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err) {
+               dev_err(&pdev->dev,
+                       "DMA configuration failed: 0x%x\n", err);
                goto err_dma;
        }
 
@@ -2128,8 +2221,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
-       if (pci_using_dac)
-               netdev->features |= NETIF_F_HIGHDMA;
 
        adapter->netdev = netdev;
        adapter->pdev = pdev;
@@ -2271,6 +2362,7 @@ static void i40evf_remove(struct pci_dev *pdev)
        struct i40e_hw *hw = &adapter->hw;
 
        cancel_delayed_work_sync(&adapter->init_task);
+       cancel_work_sync(&adapter->reset_task);
 
        if (adapter->netdev_registered) {
                unregister_netdev(netdev);
@@ -2278,17 +2370,15 @@ static void i40evf_remove(struct pci_dev *pdev)
        }
        adapter->state = __I40EVF_REMOVE;
 
-       if (adapter->num_msix_vectors) {
+       if (adapter->msix_entries) {
                i40evf_misc_irq_disable(adapter);
-               del_timer_sync(&adapter->watchdog_timer);
-
-               flush_scheduled_work();
-
                i40evf_free_misc_irq(adapter);
-
                i40evf_reset_interrupt_capability(adapter);
        }
 
+       del_timer_sync(&adapter->watchdog_timer);
+       flush_scheduled_work();
+
        if (hw->aq.asq.count)
                i40evf_shutdown_adminq(hw);
 
index e6978d79e62bb691a0434b3935b58d870f98f2f4..e294f012647d801417af4ca1f68d0629cbaf08cc 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
  *
  * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
        struct i40e_hw *hw = &adapter->hw;
        i40e_status err;
 
+       if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+               return 0; /* nothing to see here, move along */
+
        err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
        if (err)
                dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
                /* if the request failed, don't lock out others */
                adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 }
+/**
+ * i40evf_request_reset
+ * @adapter: adapter structure
+ *
+ * Request that the PF reset this VF. No response is expected.
+ **/
+void i40evf_request_reset(struct i40evf_adapter *adapter)
+{
+       /* Don't check CURRENT_OP - this is always higher priority */
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+       adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
 
 /**
  * i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                        }
                        break;
                case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
-                       adapter->state = __I40EVF_RESETTING;
-                       schedule_work(&adapter->reset_task);
-                       dev_info(&adapter->pdev->dev,
-                                "%s: hardware reset pending\n", __func__);
+                       dev_info(&adapter->pdev->dev, "PF reset warning received\n");
+                       if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+                               adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+                               dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+                               schedule_work(&adapter->reset_task);
+                       }
                        break;
                default:
                        dev_err(&adapter->pdev->dev,
index f19700e285bb5f5397b3ace776453974f4e8a9d5..5bcb2de75933ecad8866b6988f6c5d4cf6ad34c8 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
 # more details.
 #
 # You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# this program; if not, see <http://www.gnu.org/licenses/>.
 #
 # The full GNU General Public License is included in this distribution in
 # the file called "COPYING".
index 06df6928f44c2942f3e9b51647d047d790a2d6af..fa36fe12e77502658cfe864780849d6f00e93c2e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -77,8 +76,6 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
 static const u16 e1000_82580_rxpbs_table[] =
        { 36, 72, 144, 1, 2, 4, 8, 16,
          35, 70, 140 };
-#define E1000_82580_RXPBS_TABLE_SIZE \
-       (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
 
 /**
  *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -2308,7 +2305,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
 {
        u16 ret_val = 0;
 
-       if (data < E1000_82580_RXPBS_TABLE_SIZE)
+       if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
                ret_val = e1000_82580_rxpbs_table[data];
 
        return ret_val;
@@ -2714,13 +2711,14 @@ static const u8 e1000_emc_therm_limit[4] = {
        E1000_EMC_DIODE3_THERM_LIMIT
 };
 
+#ifdef CONFIG_IGB_HWMON
 /**
  *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
  *  @hw: pointer to hardware structure
  *
  *  Updates the temperatures in mac.thermal_sensor_data
  **/
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
 {
        s32 status = E1000_SUCCESS;
        u16 ets_offset;
@@ -2774,7 +2772,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
  *  Sets the thermal sensor thresholds according to the NVM map
  *  and save off the threshold and location values into mac.thermal_sensor_data
  **/
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
 {
        s32 status = E1000_SUCCESS;
        u16 ets_offset;
@@ -2836,6 +2834,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
        return status;
 }
 
+#endif
 static struct e1000_mac_operations e1000_mac_ops_82575 = {
        .init_hw              = igb_init_hw_82575,
        .check_for_link       = igb_check_for_link_82575,
index 8c2437722aad2b32fec48bfc3f17bdf71db5c164..09d78be72416563beeda5e3cb72a7338b2aa7060 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -231,6 +230,10 @@ struct e1000_adv_tx_context_desc {
 #define E1000_VMOLR_STRVLAN    0x40000000 /* Vlan stripping enable */
 #define E1000_VMOLR_STRCRC     0x80000000 /* CRC stripping enable */
 
+#define E1000_DVMOLR_HIDEVLAN  0x20000000 /* Hide vlan enable */
+#define E1000_DVMOLR_STRVLAN   0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC    0x80000000 /* CRC stripping enable */
+
 #define E1000_VLVF_ARRAY_SIZE     32
 #define E1000_VLVF_VLANID_MASK    0x00000FFF
 #define E1000_VLVF_POOLSEL_SHIFT  12
@@ -266,8 +269,7 @@ u16 igb_rxpbs_adjust_82580(u32 data);
 s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
 s32 igb_set_eee_i350(struct e1000_hw *);
 s32 igb_set_eee_i354(struct e1000_hw *);
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
 
 #define E1000_I2C_THERMAL_SENSOR_ADDR  0xF8
 #define E1000_EMC_INTERNAL_DATA                0x00
index 0571b973be80d357bbbe4fec67c0fef0b3859546..b05bf925ac721982d8ded6d3aa647d64236890f4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
 
 /* Extended Device Control */
+#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
 #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+#define E1000_CTRL_EXT_SDP2_DIR  0x00000400 /* SDP2 Data direction */
+#define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* SDP3 Data direction */
+
 /* Physical Func Reset Done Indication */
 #define E1000_CTRL_EXT_PFRSTD    0x00004000
 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
 /* enable link status from external LINK_0 and LINK_1 pins */
 #define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
 #define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
-#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
+#define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
 #define E1000_CTRL_RST      0x04000000  /* Global reset */
 #define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
 #define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
 
 #define E1000_TIMINCA_16NS_SHIFT 24
 
-#define E1000_TSICR_TXTS 0x00000002
-#define E1000_TSIM_TXTS 0x00000002
+/* Time Sync Interrupt Cause/Mask Register Bits */
+
+#define TSINTR_SYS_WRAP  (1 << 0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS      (1 << 1) /* Transmit Timestamp. */
+#define TSINTR_RXTS      (1 << 2) /* Receive Timestamp. */
+#define TSINTR_TT0       (1 << 3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1       (1 << 4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0     (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1     (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ      (1 << 7) /* Time Adjust Done. */
+
+#define TSYNC_INTERRUPTS TSINTR_TXTS
+#define E1000_TSICR_TXTS TSINTR_TXTS
+
+/* TSAUXC Configuration Bits */
+#define TSAUXC_EN_TT0    (1 << 0)  /* Enable target time 0. */
+#define TSAUXC_EN_TT1    (1 << 1)  /* Enable target time 1. */
+#define TSAUXC_EN_CLK0   (1 << 2)  /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 (1 << 3)  /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0       (1 << 4)  /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1   (1 << 5)  /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 (1 << 6)  /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1       (1 << 7)  /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0    (1 << 8)  /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0     (1 << 9)  /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1    (1 << 10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1     (1 << 11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG      (1 << 17) /* Generate a pulse. */
+#define TSAUXC_DISABLE   (1 << 31) /* Disable SYSTIM Count Operation. */
+
+/* SDP Configuration Bits */
+#define AUX0_SEL_SDP0    (0 << 0)  /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1    (1 << 0)  /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2    (2 << 0)  /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3    (3 << 0)  /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN   (1 << 2)  /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0    (0 << 3)  /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1    (1 << 3)  /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2    (2 << 3)  /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3    (3 << 3)  /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN   (1 << 5)  /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0  (0 << 6)  /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1  (1 << 6)  /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0  (2 << 6)  /* Freq clock  0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1  (3 << 6)  /* Freq clock  1 is output on SDP0. */
+#define TS_SDP0_EN       (1 << 8)  /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0  (0 << 9)  /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1  (1 << 9)  /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0  (2 << 9)  /* Freq clock  0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1  (3 << 9)  /* Freq clock  1 is output on SDP1. */
+#define TS_SDP1_EN       (1 << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0  (0 << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1  (1 << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0  (2 << 12) /* Freq clock  0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1  (3 << 12) /* Freq clock  1 is output on SDP2. */
+#define TS_SDP2_EN       (1 << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0  (0 << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1  (1 << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0  (2 << 15) /* Freq clock  0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1  (3 << 15) /* Freq clock  1 is output on SDP3. */
+#define TS_SDP3_EN       (1 << 17) /* SDP3 is assigned to Tsync. */
 
 #define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
 #define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
index ab99e2b582a85c7b445d54ec85f867c431cf1e29..10741d170f2ddad46b2fad14d362998bdc19d639 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index 0c0393316a3a4eb0b3784e72003cf6367bba8061..db963397cc27f42fd15829ec6dc540e19af5f562 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -35,6 +34,8 @@
 #include "e1000_hw.h"
 #include "e1000_i210.h"
 
+static s32 igb_update_flash_i210(struct e1000_hw *hw);
+
 /**
  * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
  *  @hw: pointer to the HW structure
@@ -111,7 +112,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
  *  Return successful if access grant bit set, else clear the request for
  *  EEPROM access and return -E1000_ERR_NVM (-1).
  **/
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
 {
        return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
 }
@@ -123,7 +124,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
  *  then release the semaphores acquired.
  **/
-void igb_release_nvm_i210(struct e1000_hw *hw)
+static void igb_release_nvm_i210(struct e1000_hw *hw)
 {
        igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
 }
@@ -206,8 +207,8 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
  *  Uses necessary synchronization semaphores.
  **/
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
-                            u16 *data)
+static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                                 u16 *data)
 {
        s32 status = E1000_SUCCESS;
        u16 i, count;
@@ -306,8 +307,8 @@ out:
  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
  *  partially written.
  **/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
-                             u16 *data)
+static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                                  u16 *data)
 {
        s32 status = E1000_SUCCESS;
        u16 i, count;
@@ -555,7 +556,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
  **/
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
 {
        s32 status = E1000_SUCCESS;
        s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
@@ -590,7 +591,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
  **/
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
 {
        s32 ret_val = E1000_SUCCESS;
        u16 checksum = 0;
@@ -684,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
  *  @hw: pointer to the HW structure
  *
  **/
-s32 igb_update_flash_i210(struct e1000_hw *hw)
+static s32 igb_update_flash_i210(struct e1000_hw *hw)
 {
        s32 ret_val = E1000_SUCCESS;
        u32 flup;
index 2d913716573a29a830610de841164e30e505cdfb..907fe99a9813130e45a3dddf0d5d48c6dfdc492d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 #ifndef _E1000_I210_H_
 #define _E1000_I210_H_
 
-s32 igb_update_flash_i210(struct e1000_hw *hw);
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
-                           u16 *data);
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
-                          u16 *data);
 s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
 void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-void igb_release_nvm_i210(struct e1000_hw *hw);
 s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
 s32 igb_read_invm_version(struct e1000_hw *hw,
                          struct e1000_fw_version *invm_ver);
index 298f0ed50670c0388a53e82d3509beca6aaf351e..5910a932ea7c92cb67223a7c900f7c3b3e36a990 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index e4cbe8ef67b3094b32b73cbccda9be5673243d1f..99299ba8ee3a2def53ab2e6fcc8c5c039aca0265 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index dac1447fabf74620e562ebf38feb4f367db2ec4f..d5b121771c313716543b16f5a8464866afd4f8e2 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index de9bba41acf3c01b2f744e6cb30511ca97a514ac..f52f5515e5a8a8aedcc1568f90fdf1b986b2947e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index a7db7f3db914daafb957d11372b885614152020c..9abf82919c65535d7b3fd7f7d7200f80fffd1f0a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index 433b7419cb98ad15bb3da0532fb120ff25ff4596..5b101170b17e4bbc9af310c8aacd5e0b891344a0 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index ad2b74d95138c1542bb78a55b7d620e5695f2abb..4009bbab7407d21945e7c1af20df5deedba564a3 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -393,77 +392,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
        return 0;
 }
 
-/**
- *  e1000_write_sfp_data_byte - Writes SFP module data.
- *  @hw: pointer to the HW structure
- *  @offset: byte location offset to write to
- *  @data: data to write
- *
- *  Writes one byte to SFP module data stored
- *  in SFP resided EEPROM memory or SFP diagnostic area.
- *  Function should be called with
- *  E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- *  E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- *  access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
-       u32 i = 0;
-       u32 i2ccmd = 0;
-       u32 data_local = 0;
-
-       if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
-               hw_dbg("I2CCMD command address exceeds upper limit\n");
-               return -E1000_ERR_PHY;
-       }
-       /* The programming interface is 16 bits wide
-        * so we need to read the whole word first
-        * then update appropriate byte lane and write
-        * the updated word back.
-        */
-       /* Set up Op-code, EEPROM Address,in the I2CCMD
-        * register. The MAC will take care of interfacing
-        * with an EEPROM to write the data given.
-        */
-       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
-                 E1000_I2CCMD_OPCODE_READ);
-       /* Set a command to read single word */
-       wr32(E1000_I2CCMD, i2ccmd);
-       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
-               udelay(50);
-               /* Poll the ready bit to see if lastly
-                * launched I2C operation completed
-                */
-               i2ccmd = rd32(E1000_I2CCMD);
-               if (i2ccmd & E1000_I2CCMD_READY) {
-                       /* Check if this is READ or WRITE phase */
-                       if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
-                           E1000_I2CCMD_OPCODE_READ) {
-                               /* Write the selected byte
-                                * lane and update whole word
-                                */
-                               data_local = i2ccmd & 0xFF00;
-                               data_local |= data;
-                               i2ccmd = ((offset <<
-                                       E1000_I2CCMD_REG_ADDR_SHIFT) |
-                                       E1000_I2CCMD_OPCODE_WRITE | data_local);
-                               wr32(E1000_I2CCMD, i2ccmd);
-                       } else {
-                               break;
-                       }
-               }
-       }
-       if (!(i2ccmd & E1000_I2CCMD_READY)) {
-               hw_dbg("I2CCMD Write did not complete\n");
-               return -E1000_ERR_PHY;
-       }
-       if (i2ccmd & E1000_I2CCMD_ERROR) {
-               hw_dbg("I2CCMD Error bit set\n");
-               return -E1000_ERR_PHY;
-       }
-       return 0;
-}
-
 /**
  *  igb_read_phy_reg_igp - Read igp PHY register
  *  @hw: pointer to the HW structure
index 6a0873f2095a49affef31c68ced1dd0d785aa265..4c2c36c46a7398d217c1418b3966b4cde5813812 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -70,7 +69,6 @@ s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32  e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
 s32  igb_copper_link_setup_82580(struct e1000_hw *hw);
 s32  igb_get_phy_info_82580(struct e1000_hw *hw);
 s32  igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
index 82632c6c53afcb37239f2898eea9a01f5f95edfd..bdb246e848e13bb5e569f279336dbb5a2c5bfe86 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -41,6 +40,7 @@
 #define E1000_FCT      0x00030  /* Flow Control Type - RW */
 #define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
 #define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_TSSDP    0x0003C  /* Time Sync SDP Configuration Register - RW */
 #define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
 #define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
 #define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
 #define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
 #define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
 #define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_TRGTTIML0  0x0B644 /* Target Time Register 0 Low  - RW */
+#define E1000_TRGTTIMH0  0x0B648 /* Target Time Register 0 High - RW */
+#define E1000_TRGTTIML1  0x0B64C /* Target Time Register 1 Low  - RW */
+#define E1000_TRGTTIMH1  0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_AUXSTMPL0  0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */
+#define E1000_AUXSTMPH0  0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+#define E1000_AUXSTMPL1  0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */
+#define E1000_AUXSTMPH1  0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
 #define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
 #define E1000_TSICR      0x0B66C /* Interrupt Cause Register */
 #define E1000_TSIM       0x0B674 /* Interrupt Mask Register */
 #define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
 #define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
 #define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
+#define E1000_DVMOLR(_n)       (0x0C038 + (64 * (_n)))
 #define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
                                                        * Filter - RW */
 #define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
 
-#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
-#define rd32(reg) (readl(hw->hw_addr + reg))
+struct e1000_hw;
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+       u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
+       if (!E1000_REMOVED(hw_addr)) \
+               writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igb_rd32(hw, reg))
+
 #define wrfl() ((void)rd32(E1000_STATUS))
 
 #define array_wr32(reg, offset, value) \
-       (writel(value, hw->hw_addr + reg + ((offset) << 2)))
+       wr32((reg) + ((offset) << 2), (value))
+
 #define array_rd32(reg, offset) \
        (readl(hw->hw_addr + reg + ((offset) << 2)))
 
 #define E1000_INVM_DATA_REG(_n)        (0x12120 + 4*(_n))
 #define E1000_INVM_SIZE                64 /* Number of INVM Data Registers */
 
+#define E1000_REMOVED(h) unlikely(!(h))
+
 #endif
index ccf472f073ddde8865b1733ce1c88aae53225801..7fbe1e925143efb7b4af4b9f06c29fa3027d2e7e 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -42,6 +41,7 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/pci.h>
+#include <linux/mdio.h>
 
 struct igb_adapter;
 
@@ -434,6 +434,7 @@ struct igb_adapter {
        struct delayed_work ptp_overflow_work;
        struct work_struct ptp_tx_work;
        struct sk_buff *ptp_tx_skb;
+       struct hwtstamp_config tstamp_config;
        unsigned long ptp_tx_start;
        unsigned long last_rx_ptp_check;
        spinlock_t tmreg_lock;
@@ -456,6 +457,7 @@ struct igb_adapter {
        unsigned long link_check_timeout;
        int copper_tries;
        struct e1000_info ei;
+       u16 eee_advert;
 };
 
 #define IGB_FLAG_HAS_MSI               (1 << 0)
@@ -472,6 +474,7 @@ struct igb_adapter {
 #define IGB_FLAG_MAS_CAPABLE           (1 << 11)
 #define IGB_FLAG_MAS_ENABLE            (1 << 12)
 #define IGB_FLAG_HAS_MSIX              (1 << 13)
+#define IGB_FLAG_EEE                   (1 << 14)
 
 /* Media Auto Sense */
 #define IGB_MAS_ENABLE_0               0X0001
@@ -489,7 +492,8 @@ struct igb_adapter {
 enum e1000_state_t {
        __IGB_TESTING,
        __IGB_RESETTING,
-       __IGB_DOWN
+       __IGB_DOWN,
+       __IGB_PTP_TX_IN_PROGRESS,
 };
 
 enum igb_boards {
@@ -525,9 +529,7 @@ void igb_set_fw_version(struct igb_adapter *);
 void igb_ptp_init(struct igb_adapter *adapter);
 void igb_ptp_stop(struct igb_adapter *adapter);
 void igb_ptp_reset(struct igb_adapter *adapter);
-void igb_ptp_tx_work(struct work_struct *work);
 void igb_ptp_rx_hang(struct igb_adapter *adapter);
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
 void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
                         struct sk_buff *skb);
@@ -545,8 +547,8 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
-                          int cmd);
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 #ifdef CONFIG_IGB_HWMON
 void igb_sysfs_exit(struct igb_adapter *adapter);
 int igb_sysfs_init(struct igb_adapter *adapter);
index 1df02378de6949ddf5fd11aff02548ba2fba6625..e5570acbeea84509855a98383ab128876e7447c9 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -2274,15 +2273,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
 
                ring = adapter->tx_ring[j];
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
                        data[i]   = ring->tx_stats.packets;
                        data[i+1] = ring->tx_stats.bytes;
                        data[i+2] = ring->tx_stats.restart_queue;
-               } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
+                       start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
                        restart2  = ring->tx_stats.restart_queue2;
-               } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
+               } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
                data[i+2] += restart2;
 
                i += IGB_TX_QUEUE_STATS_LEN;
@@ -2290,13 +2289,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
        for (j = 0; j < adapter->num_rx_queues; j++) {
                ring = adapter->rx_ring[j];
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
                        data[i]   = ring->rx_stats.packets;
                        data[i+1] = ring->rx_stats.bytes;
                        data[i+2] = ring->rx_stats.drops;
                        data[i+3] = ring->rx_stats.csum_err;
                        data[i+4] = ring->rx_stats.alloc_failed;
-               } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
                i += IGB_RX_QUEUE_STATS_LEN;
        }
        spin_unlock(&adapter->stats64_lock);
@@ -2354,6 +2353,11 @@ static int igb_get_ts_info(struct net_device *dev,
 {
        struct igb_adapter *adapter = netdev_priv(dev);
 
+       if (adapter->ptp_clock)
+               info->phc_index = ptp_clock_index(adapter->ptp_clock);
+       else
+               info->phc_index = -1;
+
        switch (adapter->hw.mac.type) {
        case e1000_82575:
                info->so_timestamping =
@@ -2375,11 +2379,6 @@ static int igb_get_ts_info(struct net_device *dev,
                        SOF_TIMESTAMPING_RX_HARDWARE |
                        SOF_TIMESTAMPING_RAW_HARDWARE;
 
-               if (adapter->ptp_clock)
-                       info->phc_index = ptp_clock_index(adapter->ptp_clock);
-               else
-                       info->phc_index = -1;
-
                info->tx_types =
                        (1 << HWTSTAMP_TX_OFF) |
                        (1 << HWTSTAMP_TX_ON);
@@ -2588,7 +2587,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 ipcnfg, eeer, ret_val;
+       u32 ret_val;
        u16 phy_data;
 
        if ((hw->mac.type < e1000_i350) ||
@@ -2597,16 +2596,25 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
 
        edata->supported = (SUPPORTED_1000baseT_Full |
                            SUPPORTED_100baseT_Full);
+       if (!hw->dev_spec._82575.eee_disable)
+               edata->advertised =
+                       mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+       /* The IPCNFG and EEER registers are not supported on I354. */
+       if (hw->mac.type == e1000_i354) {
+               igb_get_eee_status_i354(hw, (bool *)&edata->eee_active);
+       } else {
+               u32 eeer;
 
-       ipcnfg = rd32(E1000_IPCNFG);
-       eeer = rd32(E1000_EEER);
+               eeer = rd32(E1000_EEER);
 
-       /* EEE status on negotiated link */
-       if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
-               edata->advertised = ADVERTISED_1000baseT_Full;
+               /* EEE status on negotiated link */
+               if (eeer & E1000_EEER_EEE_NEG)
+                       edata->eee_active = true;
 
-       if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
-               edata->advertised |= ADVERTISED_100baseT_Full;
+               if (eeer & E1000_EEER_TX_LPI_EN)
+                       edata->tx_lpi_enabled = true;
+       }
 
        /* EEE Link Partner Advertised */
        switch (hw->mac.type) {
@@ -2617,8 +2625,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
                        return -ENODATA;
 
                edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
-
                break;
+       case e1000_i354:
        case e1000_i210:
        case e1000_i211:
                ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
@@ -2634,12 +2642,10 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
                break;
        }
 
-       if (eeer & E1000_EEER_EEE_NEG)
-               edata->eee_active = true;
-
        edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
 
-       if (eeer & E1000_EEER_TX_LPI_EN)
+       if ((hw->mac.type == e1000_i354) &&
+           (edata->eee_enabled))
                edata->tx_lpi_enabled = true;
 
        /* Report correct negotiated EEE status for devices that
@@ -2687,9 +2693,10 @@ static int igb_set_eee(struct net_device *netdev,
                        return -EINVAL;
                }
 
-               if (eee_curr.advertised != edata->advertised) {
+               if (edata->advertised &
+                   ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
                        dev_err(&adapter->pdev->dev,
-                               "Setting EEE Advertisement is not supported\n");
+                               "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
                        return -EINVAL;
                }
 
@@ -2699,9 +2706,14 @@ static int igb_set_eee(struct net_device *netdev,
                        return -EINVAL;
                }
 
+       adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
        if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
                hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
-               igb_set_eee_i350(hw);
+               adapter->flags |= IGB_FLAG_EEE;
+               if (hw->mac.type == e1000_i350)
+                       igb_set_eee_i350(hw);
+               else
+                       igb_set_eee_i354(hw);
 
                /* reset link */
                if (netif_running(netdev))
@@ -2779,9 +2791,11 @@ static int igb_get_module_eeprom(struct net_device *netdev,
        /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
        for (i = 0; i < last_word - first_word + 1; i++) {
                status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
-               if (status != E1000_SUCCESS)
+               if (status != E1000_SUCCESS) {
                        /* Error occurred while reading module */
+                       kfree(dataword);
                        return -EIO;
+               }
 
                be16_to_cpus(&dataword[i]);
        }
index e0af5bc616139bc6540577952175345053399dd1..8333f67acf96b3a6e83746c1cedd1f0eff7636ec 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
index 46d31a49f5ea677d27a2162c672bca6bf27cc0c6..55fc5596e2d045042a3f33251440fdc137214d13 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2013 Intel Corporation.
+  Copyright(c) 2007-2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
   more details.
 
   You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+  this program; if not, see <http://www.gnu.org/licenses/>.
 
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
@@ -70,7 +69,7 @@ char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
                                "Intel(R) Gigabit Ethernet Network Driver";
 static const char igb_copyright[] =
-                               "Copyright (c) 2007-2013 Intel Corporation.";
+                               "Copyright (c) 2007-2014 Intel Corporation.";
 
 static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
@@ -752,6 +751,28 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
        }
 }
 
+u32 igb_rd32(struct e1000_hw *hw, u32 reg)
+{
+       struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+       u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
+       u32 value = 0;
+
+       if (E1000_REMOVED(hw_addr))
+               return ~value;
+
+       value = readl(&hw_addr[reg]);
+
+       /* reads should not return all F's */
+       if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+               struct net_device *netdev = igb->netdev;
+               hw->hw_addr = NULL;
+               netif_device_detach(netdev);
+               netdev_err(netdev, "PCIe link lost, device now detached\n");
+       }
+
+       return value;
+}
+
 /**
  *  igb_write_ivar - configure ivar for given MSI-X vector
  *  @hw: pointer to the HW structure
@@ -1014,6 +1035,12 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
 {
        struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
 
+       /* Coming from igb_set_interrupt_capability, the vectors are not yet
+        * allocated. So, q_vector is NULL so we should stop here.
+        */
+       if (!q_vector)
+               return;
+
        if (q_vector->tx.ring)
                adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
 
@@ -1111,16 +1138,18 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
        for (i = 0; i < numvecs; i++)
                adapter->msix_entries[i].entry = i;
 
-       err = pci_enable_msix(adapter->pdev,
-                             adapter->msix_entries,
-                             numvecs);
-       if (err == 0)
+       err = pci_enable_msix_range(adapter->pdev,
+                                   adapter->msix_entries,
+                                   numvecs,
+                                   numvecs);
+       if (err > 0)
                return;
 
        igb_reset_interrupt_capability(adapter);
 
        /* If we can't do MSI-X, try MSI */
 msi_only:
+       adapter->flags &= ~IGB_FLAG_HAS_MSIX;
 #ifdef CONFIG_PCI_IOV
        /* disable SR-IOV for non MSI-X configurations */
        if (adapter->vf_data) {
@@ -1726,6 +1755,10 @@ int igb_up(struct igb_adapter *adapter)
        hw->mac.get_link_status = 1;
        schedule_work(&adapter->watchdog_task);
 
+       if ((adapter->flags & IGB_FLAG_EEE) &&
+           (!hw->dev_spec._82575.eee_disable))
+               adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
        return 0;
 }
 
@@ -1974,6 +2007,21 @@ void igb_reset(struct igb_adapter *adapter)
                }
        }
 #endif
+       /* Re-establish EEE setting */
+       if (hw->phy.media_type == e1000_media_type_copper) {
+               switch (mac->type) {
+               case e1000_i350:
+               case e1000_i210:
+               case e1000_i211:
+                       igb_set_eee_i350(hw);
+                       break;
+               case e1000_i354:
+                       igb_set_eee_i354(hw);
+                       break;
+               default:
+                       break;
+               }
+       }
        if (!netif_running(adapter->netdev))
                igb_power_down_link(adapter);
 
@@ -2560,23 +2608,36 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
                (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
                adapter->num_rx_queues, adapter->num_tx_queues);
-       switch (hw->mac.type) {
-       case e1000_i350:
-       case e1000_i210:
-       case e1000_i211:
-               igb_set_eee_i350(hw);
-               break;
-       case e1000_i354:
-               if (hw->phy.media_type == e1000_media_type_copper) {
+       if (hw->phy.media_type == e1000_media_type_copper) {
+               switch (hw->mac.type) {
+               case e1000_i350:
+               case e1000_i210:
+               case e1000_i211:
+                       /* Enable EEE for internal copper PHY devices */
+                       err = igb_set_eee_i350(hw);
+                       if ((!err) &&
+                           (!hw->dev_spec._82575.eee_disable)) {
+                               adapter->eee_advert =
+                                       MDIO_EEE_100TX | MDIO_EEE_1000T;
+                               adapter->flags |= IGB_FLAG_EEE;
+                       }
+                       break;
+               case e1000_i354:
                        if ((rd32(E1000_CTRL_EXT) &
-                           E1000_CTRL_EXT_LINK_MODE_SGMII))
-                               igb_set_eee_i354(hw);
+                           E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+                               err = igb_set_eee_i354(hw);
+                               if ((!err) &&
+                                       (!hw->dev_spec._82575.eee_disable)) {
+                                       adapter->eee_advert =
+                                          MDIO_EEE_100TX | MDIO_EEE_1000T;
+                                       adapter->flags |= IGB_FLAG_EEE;
+                               }
+                       }
+                       break;
+               default:
+                       break;
                }
-               break;
-       default:
-               break;
        }
-
        pm_runtime_put_noidle(&pdev->dev);
        return 0;
 
@@ -3510,6 +3571,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
 
        vmolr = rd32(E1000_VMOLR(vfn));
        vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+       if (hw->mac.type == e1000_i350) {
+               u32 dvmolr;
+
+               dvmolr = rd32(E1000_DVMOLR(vfn));
+               dvmolr |= E1000_DVMOLR_STRVLAN;
+               wr32(E1000_DVMOLR(vfn), dvmolr);
+       }
        if (aupe)
                vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
        else
@@ -4158,6 +4226,15 @@ static void igb_watchdog_task(struct work_struct *work)
                               (ctrl & E1000_CTRL_RFCE) ?  "RX" :
                               (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
 
+                       /* disable EEE if enabled */
+                       if ((adapter->flags & IGB_FLAG_EEE) &&
+                               (adapter->link_duplex == HALF_DUPLEX)) {
+                               dev_info(&adapter->pdev->dev,
+                               "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
+                               adapter->hw.dev_spec._82575.eee_disable = true;
+                               adapter->flags &= ~IGB_FLAG_EEE;
+                       }
+
                        /* check if SmartSpeed worked */
                        igb_check_downshift(hw);
                        if (phy->speed_downgraded)
@@ -4306,8 +4383,7 @@ enum latency_range {
  *  were determined based on theoretical maximum wire speed and testing
  *  data, in order to minimize response time while increasing bulk
  *  throughput.
- *  This functionality is controlled by the InterruptThrottleRate module
- *  parameter (see igb_param.c)
+ *  This functionality is controlled by ethtool's coalescing settings.
  *  NOTE:  This function is called only when operating in a multiqueue
  *         receive environment.
  **/
@@ -4381,8 +4457,7 @@ clear_counts:
  *  based on theoretical maximum wire speed and thresholds were set based
  *  on testing data as well as attempting to minimize response time
  *  while increasing bulk throughput.
- *  this functionality is controlled by the InterruptThrottleRate module
- *  parameter (see igb_param.c)
+ *  This functionality is controlled by ethtool's coalescing settings.
  *  NOTE:  These calculations are only valid when operating in a single-
  *         queue environment.
  **/
@@ -4546,7 +4621,7 @@ static int igb_tso(struct igb_ring *tx_ring,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == __constant_htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -4602,12 +4677,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
        } else {
                u8 l4_hdr = 0;
                switch (first->protocol) {
-               case __constant_htons(ETH_P_IP):
+               case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
                        l4_hdr = ip_hdr(skb)->protocol;
                        break;
-               case __constant_htons(ETH_P_IPV6):
+               case htons(ETH_P_IPV6):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        l4_hdr = ipv6_hdr(skb)->nexthdr;
                        break;
@@ -4905,12 +4980,11 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-       skb_tx_timestamp(skb);
-
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
                struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
 
-               if (!(adapter->ptp_tx_skb)) {
+               if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
+                                          &adapter->state)) {
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        tx_flags |= IGB_TX_FLAGS_TSTAMP;
 
@@ -4921,6 +4995,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
                }
        }
 
+       skb_tx_timestamp(skb);
+
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
                tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
@@ -5127,10 +5203,10 @@ void igb_update_stats(struct igb_adapter *adapter,
                }
 
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
                        _bytes = ring->rx_stats.bytes;
                        _packets = ring->rx_stats.packets;
-               } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
                bytes += _bytes;
                packets += _packets;
        }
@@ -5143,10 +5219,10 @@ void igb_update_stats(struct igb_adapter *adapter,
        for (i = 0; i < adapter->num_tx_queues; i++) {
                struct igb_ring *ring = adapter->tx_ring[i];
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
                        _bytes = ring->tx_stats.bytes;
                        _packets = ring->tx_stats.packets;
-               } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
                bytes += _bytes;
                packets += _packets;
        }
@@ -6620,7 +6696,9 @@ static inline void igb_rx_hash(struct igb_ring *ring,
                               struct sk_buff *skb)
 {
        if (ring->netdev->features & NETIF_F_RXHASH)
-               skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+               skb_set_hash(skb,
+                            le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+                            PKT_HASH_TYPE_L3);
 }
 
 /**
@@ -6690,7 +6768,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
        hdr.network += ETH_HLEN;
 
        /* handle any vlan tag if present */
-       if (protocol == __constant_htons(ETH_P_8021Q)) {
+       if (protocol == htons(ETH_P_8021Q)) {
                if ((hdr.network - data) > (max_len - VLAN_HLEN))
                        return max_len;
 
@@ -6699,7 +6777,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
        }
 
        /* handle L3 protocols */
-       if (protocol == __constant_htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
                        return max_len;
 
@@ -6713,7 +6791,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
                /* record next protocol if header is present */
                if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
                        nexthdr = hdr.ipv4->protocol;
-       } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+       } else if (protocol == htons(ETH_P_IPV6)) {
                if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
                        return max_len;
 
@@ -6903,7 +6981,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        unsigned int total_bytes = 0, total_packets = 0;
        u16 cleaned_count = igb_desc_unused(rx_ring);
 
-       do {
+       while (likely(total_packets < budget)) {
                union e1000_adv_rx_desc *rx_desc;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -6955,7 +7033,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
 
                /* update budget accounting */
                total_packets++;
-       } while (likely(total_packets < budget));
+       }
 
        /* place incomplete frames back on ring for completion */
        rx_ring->skb = skb;
@@ -7114,8 +7192,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        case SIOCGMIIREG:
        case SIOCSMIIREG:
                return igb_mii_ioctl(netdev, ifr, cmd);
+       case SIOCGHWTSTAMP:
+               return igb_ptp_get_ts_config(netdev, ifr);
        case SIOCSHWTSTAMP:
-               return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
+               return igb_ptp_set_ts_config(netdev, ifr);
        default:
                return -EOPNOTSUPP;
        }
index 5a54e3dc535de95e9525ccf4bc24ae6f8dcdbcaf..2cca8fd5e574feb779be5cbc05f0e176574267da 100644 (file)
@@ -12,9 +12,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/module.h>
 #include <linux/device.h>
@@ -75,6 +74,8 @@
 #define INCVALUE_82576                 (16 << IGB_82576_TSYNC_SHIFT)
 #define IGB_NBITS_82580                        40
 
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+
 /* SYSTIM read access for the 82576 */
 static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
 {
@@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
  * This work function polls the TSYNCTXCTL valid bit to determine when a
  * timestamp has been taken for the current stored skb.
  **/
-void igb_ptp_tx_work(struct work_struct *work)
+static void igb_ptp_tx_work(struct work_struct *work)
 {
        struct igb_adapter *adapter = container_of(work, struct igb_adapter,
                                                   ptp_tx_work);
@@ -386,6 +387,7 @@ void igb_ptp_tx_work(struct work_struct *work)
                                   IGB_PTP_TX_TIMEOUT)) {
                dev_kfree_skb_any(adapter->ptp_tx_skb);
                adapter->ptp_tx_skb = NULL;
+               clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
                adapter->tx_hwtstamp_timeouts++;
                dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
                return;
@@ -466,7 +468,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
  * available, then it must have been for this skb here because we only
  * allow only one such packet into the queue.
  **/
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
        struct skb_shared_hwtstamps shhwtstamps;
@@ -479,6 +481,7 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
        skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
        dev_kfree_skb_any(adapter->ptp_tx_skb);
        adapter->ptp_tx_skb = NULL;
+       clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
 }
 
 /**
@@ -540,10 +543,26 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
 }
 
 /**
- * igb_ptp_hwtstamp_ioctl - control hardware time stamping
+ * igb_ptp_get_ts_config - get hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ * Get the hwtstamp_config settings to return to the user. Rather than attempt
+ * to deconstruct the settings from the registers, just return a shadow copy
+ * of the last known settings.
+ **/
+int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       struct hwtstamp_config *config = &adapter->tstamp_config;
+
+       return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+               -EFAULT : 0;
+}
+/**
+ * igb_ptp_set_ts_config - control hardware time stamping
  * @netdev:
  * @ifreq:
- * @cmd:
  *
  * Outgoing time stamping can be enabled and disabled. Play nice and
  * disable it when requested, although it shouldn't case any overhead
@@ -557,12 +576,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
  * not supported, with the exception of "all V2 events regardless of
  * level 2 or 4".
  **/
-int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
-                          struct ifreq *ifr, int cmd)
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       struct hwtstamp_config config;
+       struct hwtstamp_config *config = &adapter->tstamp_config;
        u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
        u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
        u32 tsync_rx_cfg = 0;
@@ -570,14 +588,14 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
        bool is_l2 = false;
        u32 regval;
 
-       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+       if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
                return -EFAULT;
 
        /* reserved for future extensions */
-       if (config.flags)
+       if (config->flags)
                return -EINVAL;
 
-       switch (config.tx_type) {
+       switch (config->tx_type) {
        case HWTSTAMP_TX_OFF:
                tsync_tx_ctl = 0;
        case HWTSTAMP_TX_ON:
@@ -586,7 +604,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
                return -ERANGE;
        }
 
-       switch (config.rx_filter) {
+       switch (config->rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                tsync_rx_ctl = 0;
                break;
@@ -610,7 +628,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
                tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
-               config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                is_l2 = true;
                is_l4 = true;
                break;
@@ -621,12 +639,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
                 */
                if (hw->mac.type != e1000_82576) {
                        tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
-                       config.rx_filter = HWTSTAMP_FILTER_ALL;
+                       config->rx_filter = HWTSTAMP_FILTER_ALL;
                        break;
                }
                /* fall through */
        default:
-               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               config->rx_filter = HWTSTAMP_FILTER_NONE;
                return -ERANGE;
        }
 
@@ -643,7 +661,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
        if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
                tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
                tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
-               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               config->rx_filter = HWTSTAMP_FILTER_ALL;
                is_l2 = true;
                is_l4 = true;
 
@@ -707,7 +725,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
        regval = rd32(E1000_RXSTMPL);
        regval = rd32(E1000_RXSTMPH);
 
-       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+       return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
                -EFAULT : 0;
 }
 
@@ -798,7 +816,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
 
        /* Initialize the time sync interrupts for devices that support it. */
        if (hw->mac.type >= e1000_82580) {
-               wr32(E1000_TSIM, E1000_TSIM_TXTS);
+               wr32(E1000_TSIM, TSYNC_INTERRUPTS);
                wr32(E1000_IMS, E1000_IMS_TS);
        }
 
@@ -841,6 +859,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
        if (adapter->ptp_tx_skb) {
                dev_kfree_skb_any(adapter->ptp_tx_skb);
                adapter->ptp_tx_skb = NULL;
+               clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
        }
 
        if (adapter->ptp_clock) {
@@ -864,6 +883,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
        if (!(adapter->flags & IGB_FLAG_PTP))
                return;
 
+       /* reset the tstamp_config */
+       memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+
        switch (adapter->hw.mac.type) {
        case e1000_82576:
                /* Dial the nominal frequency. */
@@ -876,7 +898,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
        case e1000_i211:
                /* Enable the timer functions and interrupts. */
                wr32(E1000_TSAUXC, 0x0);
-               wr32(E1000_TSIM, E1000_TSIM_TXTS);
+               wr32(E1000_TSIM, TSYNC_INTERRUPTS);
                wr32(E1000_IMS, E1000_IMS_TS);
                break;
        default:
index 675435fc2e53c8b30c84571a02184e50088a242f..b7ab03a2f28f702f604e9f5accd9fb4529b85e70 100644 (file)
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
                for (i = 0; i < 3; i++)
                        adapter->msix_entries[i].entry = i;
 
-               err = pci_enable_msix(adapter->pdev,
-                                     adapter->msix_entries, 3);
+               err = pci_enable_msix_range(adapter->pdev,
+                                           adapter->msix_entries, 3, 3);
        }
 
-       if (err) {
+       if (err < 0) {
                /* MSI-X failed */
                dev_err(&adapter->pdev->dev,
                        "Failed to initialize MSI-X interrupts.\n");
@@ -2014,12 +2014,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
                        switch (skb->protocol) {
-                       case __constant_htons(ETH_P_IP):
+                       case htons(ETH_P_IP):
                                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
                                        tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
                                break;
-                       case __constant_htons(ETH_P_IPV6):
+                       case htons(ETH_P_IPV6):
                                if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
                                        tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
                                break;
index 57e390cbe6d0d21630f6bf6904a0c8a663601931..f42c201f727fc7fdb46062808cd0435927081256 100644 (file)
@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        int tso;
 
        if (test_bit(__IXGB_DOWN, &adapter->flags)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
        if (skb->len <= 0) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        tso = ixgb_tso(adapter, skb);
        if (tso < 0) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index 0186ea2969fe9cb0d9ed53ad122c95739caec78e..2fff0fc4e6e8a0b76a1ed880da906d08397c7ed2 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -765,6 +766,7 @@ struct ixgbe_adapter {
        struct ptp_clock_info ptp_caps;
        struct work_struct ptp_tx_work;
        struct sk_buff *ptp_tx_skb;
+       struct hwtstamp_config tstamp_config;
        unsigned long ptp_tx_start;
        unsigned long last_overflow_check;
        unsigned long last_rx_ptp_check;
@@ -884,7 +886,6 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
                                          u16 soft_id);
 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
                                          union ixgbe_atr_input *mask);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -958,8 +959,8 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
-                            int cmd);
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
index a26f3fee4f359be56b4346b9a5d871efedc8ca1d..4c78ea8946c1b48838db6d7e9ce890089ff6ab59 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -57,10 +58,12 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
  **/
 static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
        u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
        u16 pcie_devctl2;
 
+       if (ixgbe_removed(hw->hw_addr))
+               return;
+
        /* only take action if timeout value is defaulted to 0 */
        if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
                goto out;
@@ -79,11 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
         * directly in order to set the completion timeout value for
         * 16ms to 55ms
         */
-       pci_read_config_word(adapter->pdev,
-                            IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+       pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
        pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
-       pci_write_config_word(adapter->pdev,
-                             IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+       ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
 out:
        /* disable completion timeout resend */
        gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
@@ -100,6 +101,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
        mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
        mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
        mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
+       mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
        mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
        mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
        mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -201,8 +203,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
                IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
        }
 
-       hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
-
        /* set the completion timeout for interface */
        if (ret_val == 0)
                ixgbe_set_pcie_completion_timeout(hw);
@@ -1237,14 +1237,14 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
 }
 
 /**
- * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers.
- */
-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
-                                 int strategy)
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+                                 u32 headroom, int strategy)
 {
        u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
        u8  i = 0;
@@ -1315,7 +1315,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
        .release_swfw_sync      = &ixgbe_release_swfw_sync,
        .get_thermal_sensor_data = NULL,
        .init_thermal_sensor_thresh = NULL,
-       .mng_fw_enabled         = NULL,
+       .prot_autoc_read        = &prot_autoc_read_generic,
+       .prot_autoc_write       = &prot_autoc_write_generic,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
index edda6814108c9d120fe994740edfcc04f710943d..f32b3dd1ba8e18911fd58876ed20e494d92ae389 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -63,8 +64,10 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
                                     u8 dev_addr, u8 *data);
 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
                                      u8 dev_addr, u8 data);
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 
-static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
 {
        u32 fwsm, manc, factps;
 
@@ -91,7 +94,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
         * and MNG not enabled
         */
        if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
-           !hw->mng_fw_enabled) {
+           !ixgbe_mng_enabled(hw)) {
                mac->ops.disable_tx_laser =
                                       &ixgbe_disable_tx_laser_multispeed_fiber;
                mac->ops.enable_tx_laser =
@@ -122,7 +125,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
        s32 ret_val = 0;
        u16 list_offset, data_offset, data_value;
-       bool got_lock = false;
 
        if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
                ixgbe_init_mac_link_ops_82599(hw);
@@ -160,30 +162,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
                usleep_range(hw->eeprom.semaphore_delay * 1000,
                             hw->eeprom.semaphore_delay * 2000);
 
-               /* Need SW/FW semaphore around AUTOC writes if LESM on,
-                * likewise reset_pipeline requires lock as it also writes
-                * AUTOC.
-                */
-               if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
-                       if (ret_val)
-                               goto setup_sfp_out;
-
-                       got_lock = true;
-               }
-
                /* Restart DSP and set SFI mode */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
-                               IXGBE_AUTOC_LMS_10G_SERIAL));
-               hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-               ret_val = ixgbe_reset_pipeline_82599(hw);
-
-               if (got_lock) {
-                       hw->mac.ops.release_swfw_sync(hw,
-                                                     IXGBE_GSSR_MAC_CSR_SM);
-                       got_lock = false;
-               }
+               ret_val = hw->mac.ops.prot_autoc_write(hw,
+                       hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+                       false);
 
                if (ret_val) {
                        hw_dbg(hw, " sfp module setup not complete\n");
@@ -207,6 +189,81 @@ setup_sfp_err:
        return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
 }
 
+/**
+ *  prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ *  @hw: pointer to hardware structure
+ *  @locked: Return the if we locked for this read.
+ *  @reg_val: Value we read from AUTOC
+ *
+ *  For this part (82599) we need to wrap read-modify-writes with a possible
+ *  FW/SW lock.  It is assumed this lock will be freed with the next
+ *  prot_autoc_write_82599().  Note, that locked can only be true in cases
+ *  where this function doesn't return an error.
+ **/
+static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+                                u32 *reg_val)
+{
+       s32 ret_val;
+
+       *locked = false;
+       /* If LESM is on then we need to hold the SW/FW semaphore. */
+       if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                       IXGBE_GSSR_MAC_CSR_SM);
+               if (ret_val)
+                       return IXGBE_ERR_SWFW_SYNC;
+
+               *locked = true;
+       }
+
+       *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       return 0;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ *          previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold a the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ **/
+static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+       s32 ret_val = 0;
+
+       /* Blocked by MNG FW so bail */
+       if (ixgbe_check_reset_blocked(hw))
+               goto out;
+
+       /* We only need to get the lock if:
+        *  - We didn't do it already (in the read part of a read-modify-write)
+        *  - LESM is enabled.
+        */
+       if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                       IXGBE_GSSR_MAC_CSR_SM);
+               if (ret_val)
+                       return IXGBE_ERR_SWFW_SYNC;
+
+               locked = true;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+       ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+       /* Free the SW/FW semaphore as we either grabbed it here or
+        * already had it when this function was called.
+        */
+       if (locked)
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+       return ret_val;
+}
+
 static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
@@ -216,6 +273,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
        mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
        mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
        mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
+       mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
        mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
        mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
        mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -456,12 +514,20 @@ out:
  *
  * Disables link, should be called during D3 power down sequence.
  *
- */
+ **/
 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
 {
-       u32 autoc2_reg;
+       u32 autoc2_reg, fwsm;
+       u16 ee_ctrl_2 = 0;
+
+       hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
 
-       if (!hw->mng_fw_enabled && !hw->wol_enabled) {
+       /* Check to see if MNG FW could be enabled */
+       fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+
+       if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
+           !hw->wol_enabled &&
+           ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
                autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
                autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
                IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -542,6 +608,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
        u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 
+       /* Blocked by MNG FW so bail */
+       if (ixgbe_check_reset_blocked(hw))
+               return;
+
        /* Disable tx laser; allow 100us to go dark per spec */
        esdp_reg |= IXGBE_ESDP_SDP3;
        IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -582,6 +652,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
  **/
 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
 {
+       /* Blocked by MNG FW so bail */
+       if (ixgbe_check_reset_blocked(hw))
+               return;
+
        if (hw->mac.autotry_restart) {
                ixgbe_disable_tx_laser_multispeed_fiber(hw);
                ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -589,75 +663,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
        }
 }
 
-/**
- *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
- *  @hw: pointer to hardware structure
- *  @speed: link speed to set
- *
- *  We set the module speed differently for fixed fiber.  For other
- *  multi-speed devices we don't have an error value so here if we
- *  detect an error we just log it and exit.
- */
-static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
-                                       ixgbe_link_speed speed)
-{
-       s32 status;
-       u8 rs, eeprom_data;
-
-       switch (speed) {
-       case IXGBE_LINK_SPEED_10GB_FULL:
-               /* one bit mask same as setting on */
-               rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
-               break;
-       case IXGBE_LINK_SPEED_1GB_FULL:
-               rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
-               break;
-       default:
-               hw_dbg(hw, "Invalid fixed module speed\n");
-               return;
-       }
-
-       /* Set RS0 */
-       status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
-                                          IXGBE_I2C_EEPROM_DEV_ADDR2,
-                                          &eeprom_data);
-       if (status) {
-               hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
-               goto out;
-       }
-
-       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
-
-       status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
-                                           IXGBE_I2C_EEPROM_DEV_ADDR2,
-                                           eeprom_data);
-       if (status) {
-               hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
-               goto out;
-       }
-
-       /* Set RS1 */
-       status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
-                                          IXGBE_I2C_EEPROM_DEV_ADDR2,
-                                          &eeprom_data);
-       if (status) {
-               hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
-               goto out;
-       }
-
-       eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
-
-       status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
-                                           IXGBE_I2C_EEPROM_DEV_ADDR2,
-                                           eeprom_data);
-       if (status) {
-               hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
-               goto out;
-       }
-out:
-       return;
-}
-
 /**
  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
  *  @hw: pointer to hardware structure
@@ -768,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
 
                /* Set the module link speed */
                switch (hw->phy.media_type) {
-               case ixgbe_media_type_fiber_fixed:
-                       ixgbe_set_fiber_fixed_speed(hw,
-                                               IXGBE_LINK_SPEED_1GB_FULL);
-                       break;
                case ixgbe_media_type_fiber:
                        esdp_reg &= ~IXGBE_ESDP_SDP5;
                        esdp_reg |= IXGBE_ESDP_SDP5_DIR;
@@ -941,8 +942,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
 
 out:
        if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
-               hw_dbg(hw, "Smartspeed has downgraded the link speed from "
-                      "the maximum advertised\n");
+               hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
        return status;
 }
 
@@ -958,16 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                                      ixgbe_link_speed speed,
                                      bool autoneg_wait_to_complete)
 {
+       bool autoneg = false;
        s32 status = 0;
-       u32 autoc, pma_pmd_1g, link_mode, start_autoc;
+       u32 pma_pmd_1g, link_mode, links_reg, i;
        u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-       u32 orig_autoc = 0;
        u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
-       u32 links_reg;
-       u32 i;
        ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
-       bool got_lock = false;
-       bool autoneg = false;
+
+       /* holds the value of AUTOC register at this current point in time */
+       u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       /* holds the cached value of AUTOC register */
+       u32 orig_autoc = 0;
+       /* temporary variable used for comparison purposes */
+       u32 autoc = current_autoc;
 
        /* Check to see if speed passed in is supported. */
        status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -984,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
 
        /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
        if (hw->mac.orig_link_settings_stored)
-               autoc = hw->mac.orig_autoc;
+               orig_autoc = hw->mac.orig_autoc;
        else
-               autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               orig_autoc = autoc;
 
-       orig_autoc = autoc;
-       start_autoc = hw->mac.cached_autoc;
        link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
        pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
 
@@ -1029,28 +1030,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                }
        }
 
-       if (autoc != start_autoc) {
-               /* Need SW/FW semaphore around AUTOC writes if LESM is on,
-                * likewise reset_pipeline requires us to hold this lock as
-                * it also writes to AUTOC.
-                */
-               if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-                       status = hw->mac.ops.acquire_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
-                       if (status != 0)
-                               goto out;
-
-                       got_lock = true;
-               }
-
+       if (autoc != current_autoc) {
                /* Restart link */
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
-               hw->mac.cached_autoc = autoc;
-               ixgbe_reset_pipeline_82599(hw);
-
-               if (got_lock)
-                       hw->mac.ops.release_swfw_sync(hw,
-                                                     IXGBE_GSSR_MAC_CSR_SM);
+               status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+               if (status)
+                       goto out;
 
                /* Only poll for autoneg to complete if specified to do so */
                if (autoneg_wait_to_complete) {
@@ -1068,8 +1052,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                                if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
                                        status =
                                                IXGBE_ERR_AUTONEG_NOT_COMPLETE;
-                                       hw_dbg(hw, "Autoneg did not "
-                                              "complete.\n");
+                                       hw_dbg(hw, "Autoneg did not complete.\n");
                                }
                        }
                }
@@ -1117,7 +1100,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 {
        ixgbe_link_speed link_speed;
        s32 status;
-       u32 ctrl, i, autoc2;
+       u32 ctrl, i, autoc, autoc2;
        u32 curr_lms;
        bool link_up = false;
 
@@ -1151,11 +1134,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
                hw->phy.ops.reset(hw);
 
        /* remember AUTOC from before we reset */
-       if (hw->mac.cached_autoc)
-               curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
-       else
-               curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
-                          IXGBE_AUTOC_LMS_MASK;
+       curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
 
 mac_reset_top:
        /*
@@ -1205,7 +1184,7 @@ mac_reset_top:
         * stored off yet.  Otherwise restore the stored original
         * values since the reset operation sets back to defaults.
         */
-       hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 
        /* Enable link if disabled in NVM */
@@ -1216,7 +1195,7 @@ mac_reset_top:
        }
 
        if (hw->mac.orig_link_settings_stored == false) {
-               hw->mac.orig_autoc = hw->mac.cached_autoc;
+               hw->mac.orig_autoc = autoc;
                hw->mac.orig_autoc2 = autoc2;
                hw->mac.orig_link_settings_stored = true;
        } else {
@@ -1227,34 +1206,18 @@ mac_reset_top:
                 * Likewise if we support WoL we don't want change the
                 * LMS state either.
                 */
-               if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+               if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
                    hw->wol_enabled)
                        hw->mac.orig_autoc =
                                (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
                                curr_lms;
 
-               if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
-                       /* Need SW/FW semaphore around AUTOC writes if LESM is
-                        * on, likewise reset_pipeline requires us to hold
-                        * this lock as it also writes to AUTOC.
-                        */
-                       bool got_lock = false;
-                       if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-                               status = hw->mac.ops.acquire_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
-                               if (status)
-                                       goto reset_hw_out;
-
-                               got_lock = true;
-                       }
-
-                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
-                       hw->mac.cached_autoc = hw->mac.orig_autoc;
-                       ixgbe_reset_pipeline_82599(hw);
-
-                       if (got_lock)
-                               hw->mac.ops.release_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
+               if (autoc != hw->mac.orig_autoc) {
+                       status = hw->mac.ops.prot_autoc_write(hw,
+                                                       hw->mac.orig_autoc,
+                                                       false);
+                       if (status)
+                               goto reset_hw_out;
                }
 
                if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1634,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
 {
 
        u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
-       u32 bucket_hash = 0;
+       u32 bucket_hash = 0, hi_dword = 0;
+       int i;
 
        /* Apply masks to input data */
-       input->dword_stream[0]  &= input_mask->dword_stream[0];
-       input->dword_stream[1]  &= input_mask->dword_stream[1];
-       input->dword_stream[2]  &= input_mask->dword_stream[2];
-       input->dword_stream[3]  &= input_mask->dword_stream[3];
-       input->dword_stream[4]  &= input_mask->dword_stream[4];
-       input->dword_stream[5]  &= input_mask->dword_stream[5];
-       input->dword_stream[6]  &= input_mask->dword_stream[6];
-       input->dword_stream[7]  &= input_mask->dword_stream[7];
-       input->dword_stream[8]  &= input_mask->dword_stream[8];
-       input->dword_stream[9]  &= input_mask->dword_stream[9];
-       input->dword_stream[10] &= input_mask->dword_stream[10];
+       for (i = 0; i <= 10; i++)
+               input->dword_stream[i] &= input_mask->dword_stream[i];
 
        /* record the flow_vm_vlan bits as they are a key part to the hash */
        flow_vm_vlan = ntohl(input->dword_stream[0]);
 
        /* generate common hash dword */
-       hi_hash_dword = ntohl(input->dword_stream[1] ^
-                                   input->dword_stream[2] ^
-                                   input->dword_stream[3] ^
-                                   input->dword_stream[4] ^
-                                   input->dword_stream[5] ^
-                                   input->dword_stream[6] ^
-                                   input->dword_stream[7] ^
-                                   input->dword_stream[8] ^
-                                   input->dword_stream[9] ^
-                                   input->dword_stream[10]);
+       for (i = 1; i <= 10; i++)
+               hi_dword ^= input->dword_stream[i];
+       hi_hash_dword = ntohl(hi_dword);
 
        /* low dword is word swapped version of common */
        lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1681,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
        lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 
        /* Process remaining 30 bit of the key */
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
-       IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+       for (i = 1; i <= 15; i++)
+               IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
 
        /*
         * Limit hash to 13 bits since max bucket count is 8K.
@@ -2001,7 +1936,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
 
        /* We need to run link autotry after the driver loads */
        hw->mac.autotry_restart = true;
-       hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
 
        if (ret_val == 0)
                ret_val = ixgbe_verify_fw_version_82599(hw);
@@ -2260,7 +2194,7 @@ fw_version_err:
  *  Returns true if the LESM FW module is present and enabled. Otherwise
  *  returns false. Smart Speed must be disabled if LESM FW module is enabled.
  **/
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
 {
        bool lesm_enabled = false;
        u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2366,7 +2300,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
  * full pipeline reset.  Note - We must hold the SW/FW semaphore before writing
  * to AUTOC, so this function assumes the semaphore is held.
  **/
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
 {
        s32 ret_val;
        u32 anlp1_reg = 0;
@@ -2380,11 +2314,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
                IXGBE_WRITE_FLUSH(hw);
        }
 
-       autoc_reg = hw->mac.cached_autoc;
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 
        /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+                       autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
 
        /* Wait for AN to leave state 0 */
        for (i = 0; i < 10; i++) {
@@ -2565,7 +2500,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .release_swfw_sync      = &ixgbe_release_swfw_sync,
        .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
        .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
-       .mng_fw_enabled         = &ixgbe_mng_enabled,
+       .prot_autoc_read        = &prot_autoc_read_82599,
+       .prot_autoc_write       = &prot_autoc_write_82599,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
index b5c434b617b129411abe676efef64f259ffc6518..24fba39e194e682640391e42e5e618907ff230d9 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -72,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
        bool link_up;
 
        switch (hw->phy.media_type) {
-       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber:
                hw->mac.ops.check_link(hw, &speed, &link_up, false);
                /* if link is down, assume supported */
@@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
        s32 ret_val = 0;
        u32 reg = 0, reg_bp = 0;
        u16 reg_cu = 0;
-       bool got_lock = false;
+       bool locked = false;
 
        /*
         * Validate the requested mode.  Strict IEEE mode does not allow
@@ -139,11 +139,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
         * we link at 10G, the 1G advertisement is harmless and vice versa.
         */
        switch (hw->phy.media_type) {
-       case ixgbe_media_type_fiber_fixed:
-       case ixgbe_media_type_fiber:
        case ixgbe_media_type_backplane:
+               /* some MAC's need RMW protection on AUTOC */
+               ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+               if (ret_val)
+                       goto out;
+
+               /* only backplane uses autoc so fall though */
+       case ixgbe_media_type_fiber:
                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
-               reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
                break;
        case ixgbe_media_type_copper:
                hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
@@ -240,27 +245,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
                 * LESM is on, likewise reset_pipeline requries the lock as
                 * it also writes AUTOC.
                 */
-               if ((hw->mac.type == ixgbe_mac_82599EB) &&
-                   ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
-                       if (ret_val)
-                               goto out;
-
-                       got_lock = true;
-               }
-
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
-
-               if (hw->mac.type == ixgbe_mac_82599EB)
-                       ixgbe_reset_pipeline_82599(hw);
-
-               if (got_lock)
-                       hw->mac.ops.release_swfw_sync(hw,
-                                                     IXGBE_GSSR_MAC_CSR_SM);
+               ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+               if (ret_val)
+                       goto out;
 
        } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-                   ixgbe_device_supports_autoneg_fc(hw)) {
+                  ixgbe_device_supports_autoneg_fc(hw)) {
                hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
                                      MDIO_MMD_AN, reg_cu);
        }
@@ -656,20 +646,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
  **/
 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
-       struct ixgbe_mac_info *mac = &hw->mac;
        u16 link_status;
 
        hw->bus.type = ixgbe_bus_type_pci_express;
 
        /* Get the negotiated link width and speed from PCI config space */
-       pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
-                            &link_status);
+       link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
 
        hw->bus.width = ixgbe_convert_bus_width(link_status);
        hw->bus.speed = ixgbe_convert_bus_speed(link_status);
 
-       mac->ops.set_lan_id(hw);
+       hw->mac.ops.set_lan_id(hw);
 
        return 0;
 }
@@ -2406,7 +2393,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
 
        switch (hw->phy.media_type) {
        /* Autoneg flow control on fiber adapters */
-       case ixgbe_media_type_fiber_fixed:
        case ixgbe_media_type_fiber:
                if (speed == IXGBE_LINK_SPEED_1GB_FULL)
                        ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2436,6 +2422,53 @@ out:
        }
 }
 
+/**
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ *  Add 10% to specified maximum and return the number of times to poll for
+ *  completion timeout, in units of 100 microsec.  Never return less than
+ *  800 = 80 millisec.
+ **/
+static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+       s16 devctl2;
+       u32 pollcnt;
+
+       devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
+       devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+       switch (devctl2) {
+       case IXGBE_PCIDEVCTRL2_65_130ms:
+                pollcnt = 1300;         /* 130 millisec */
+               break;
+       case IXGBE_PCIDEVCTRL2_260_520ms:
+               pollcnt = 5200;         /* 520 millisec */
+               break;
+       case IXGBE_PCIDEVCTRL2_1_2s:
+               pollcnt = 20000;        /* 2 sec */
+               break;
+       case IXGBE_PCIDEVCTRL2_4_8s:
+               pollcnt = 80000;        /* 8 sec */
+               break;
+       case IXGBE_PCIDEVCTRL2_17_34s:
+               pollcnt = 34000;        /* 34 sec */
+               break;
+       case IXGBE_PCIDEVCTRL2_50_100us:        /* 100 microsecs */
+       case IXGBE_PCIDEVCTRL2_1_2ms:           /* 2 millisecs */
+       case IXGBE_PCIDEVCTRL2_16_32ms:         /* 32 millisec */
+       case IXGBE_PCIDEVCTRL2_16_32ms_def:     /* 32 millisec default */
+       default:
+               pollcnt = 800;          /* 80 millisec minimum */
+               break;
+       }
+
+       /* add 10% to spec maximum */
+       return (pollcnt * 11) / 10;
+}
+
 /**
  *  ixgbe_disable_pcie_master - Disable PCI-express master access
  *  @hw: pointer to hardware structure
@@ -2447,16 +2480,16 @@ out:
  **/
 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
        s32 status = 0;
-       u32 i;
+       u32 i, poll;
        u16 value;
 
        /* Always set this bit to ensure any future transactions are blocked */
        IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
 
        /* Exit if master requests are blocked */
-       if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+       if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+           ixgbe_removed(hw->hw_addr))
                goto out;
 
        /* Poll for master request bit to clear */
@@ -2481,10 +2514,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
         * Before proceeding, make sure that the PCIe block does not have
         * transactions pending.
         */
-       for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+       poll = ixgbe_pcie_timeout_poll(hw);
+       for (i = 0; i < poll; i++) {
                udelay(100);
-               pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
-                                                        &value);
+               value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
+               if (ixgbe_removed(hw->hw_addr))
+                       goto out;
                if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
                        goto out;
        }
@@ -2563,6 +2598,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
        ixgbe_release_eeprom_semaphore(hw);
 }
 
+/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @reg_val: Value we read from AUTOC
+ * @locked: bool to indicate whether the SW/FW lock should be taken.  Never
+ *         true in this the generic case.
+ *
+ * The default case requires no protection so just to the register read.
+ **/
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+       *locked = false;
+       *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       return 0;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ *         previous read.
+ **/
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+       return 0;
+}
+
 /**
  *  ixgbe_disable_rx_buff_generic - Stops the receive data path
  *  @hw: pointer to hardware structure
@@ -2641,6 +2705,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
        s32 ret_val = 0;
+       bool locked = false;
 
        /*
         * Link must be up to auto-blink the LEDs;
@@ -2649,28 +2714,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        hw->mac.ops.check_link(hw, &speed, &link_up, false);
 
        if (!link_up) {
-               /* Need the SW/FW semaphore around AUTOC writes if 82599 and
-                * LESM is on.
-                */
-               bool got_lock = false;
-
-               if ((hw->mac.type == ixgbe_mac_82599EB) &&
-                   ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-                       ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-                                                       IXGBE_GSSR_MAC_CSR_SM);
-                       if (ret_val)
-                               goto out;
+               ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+               if (ret_val)
+                       goto out;
 
-                       got_lock = true;
-               }
                autoc_reg |= IXGBE_AUTOC_AN_RESTART;
                autoc_reg |= IXGBE_AUTOC_FLU;
-               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+               ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+               if (ret_val)
+                       goto out;
+
                IXGBE_WRITE_FLUSH(hw);
 
-               if (got_lock)
-                       hw->mac.ops.release_swfw_sync(hw,
-                                                     IXGBE_GSSR_MAC_CSR_SM);
                usleep_range(10000, 20000);
        }
 
@@ -2690,33 +2746,21 @@ out:
  **/
 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
 {
-       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc_reg = 0;
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
        s32 ret_val = 0;
-       bool got_lock = false;
+       bool locked = false;
 
-       /* Need the SW/FW semaphore around AUTOC writes if 82599 and
-        * LESM is on.
-        */
-       if ((hw->mac.type == ixgbe_mac_82599EB) &&
-           ixgbe_verify_lesm_fw_enabled_82599(hw)) {
-               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
-                                               IXGBE_GSSR_MAC_CSR_SM);
-               if (ret_val)
-                       goto out;
-
-               got_lock = true;
-       }
+       ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+       if (ret_val)
+               goto out;
 
        autoc_reg &= ~IXGBE_AUTOC_FLU;
        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
-       if (hw->mac.type == ixgbe_mac_82599EB)
-               ixgbe_reset_pipeline_82599(hw);
 
-       if (got_lock)
-               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+       ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+       if (ret_val)
+               goto out;
 
        led_reg &= ~IXGBE_LED_MODE_MASK(index);
        led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2817,7 +2861,6 @@ san_mac_addr_clr:
  **/
 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 {
-       struct ixgbe_adapter *adapter = hw->back;
        u16 msix_count = 1;
        u16 max_msix_count;
        u16 pcie_offset;
@@ -2836,7 +2879,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
                return msix_count;
        }
 
-       pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
+       msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
+       if (ixgbe_removed(hw->hw_addr))
+               msix_count = 0;
        msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
 
        /* MSI-X count is zero-based in HW */
@@ -2868,6 +2913,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
        mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
 
+       if (ixgbe_removed(hw->hw_addr))
+               goto done;
+
        if (!mpsar_lo && !mpsar_hi)
                goto done;
 
index f2e3919750ec8ca0d7d1b6ac27d63550ab4724a3..f12c40fb5537a18604ff030f4adc4287946dbff4 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -98,6 +99,10 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
                                  bool *link_up, bool link_up_wait_to_complete);
 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
                                  u16 *wwpn_prefix);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
@@ -106,10 +111,10 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
                                 u8 build, u8 ver);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
 
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
                             u32 headroom, int strategy);
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
 
 #define IXGBE_I2C_THERMAL_SENSOR_ADDR  0xF8
 #define IXGBE_EMC_INTERNAL_DATA                0x00
@@ -125,6 +130,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
 
 #define IXGBE_FAILED_READ_REG 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
 
 static inline bool ixgbe_removed(void __iomem *addr)
 {
index 05e23b80b5e37b70f1f0e18a0b3be826915623c3..bdb99b3b0f30f4ee2253e81bc72e84fb6a0d1e30 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index d71d9ce3e394b4199f297ebbcb493fa268092dd2..d5a1e3db0774a5580b8191126e682c33191f7fa6 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index c5933f6dceee928be6c225cb6eba42c48284b011..472b0f450bf90d4dc23fe5995d28d0d80c2d940e 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index 043307024c4abf542b6014eb6ccbfc739e2d54c5..6c55c14d082aa6285f43941e5d96ae86acdbb21b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -1127,10 +1128,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                }
 
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->syncp);
                        data[i]   = ring->stats.packets;
                        data[i+1] = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
 #ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
@@ -1155,10 +1156,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                }
 
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->syncp);
                        data[i]   = ring->stats.packets;
                        data[i+1] = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
 #ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
@@ -1247,6 +1248,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
        struct ixgbe_hw *hw = &adapter->hw;
        bool link_up;
        u32 link_speed = 0;
+
+       if (ixgbe_removed(hw->hw_addr)) {
+               *data = 1;
+               return 1;
+       }
        *data = 0;
 
        hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
@@ -1969,6 +1975,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
                data[1] = 1;
                data[2] = 1;
                data[3] = 1;
+               data[4] = 1;
                eth_test->flags |= ETH_TEST_FL_FAILED;
                return;
        }
@@ -1988,6 +1995,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
                                        data[1] = 1;
                                        data[2] = 1;
                                        data[3] = 1;
+                                       data[4] = 1;
                                        eth_test->flags |= ETH_TEST_FL_FAILED;
                                        clear_bit(__IXGBE_TESTING,
                                                  &adapter->state);
index f58db453a97edd8d6d6f7dfbccb2c6f8edd1f3d3..25a3dfef33e8946bfb4c8d511560a1df660f0d4d 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -407,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
 
        switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
        /* return 0 to bypass going to ULD for DDPed data */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
                /* update length of DDPed data */
                ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
                rc = 0;
                break;
        /* unmap the sg list when FCPRSP is received */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
                dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
                             ddp->sgc, DMA_FROM_DEVICE);
                ddp->err = ddp_err;
@@ -421,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                ddp->sgc = 0;
                /* fall through */
        /* if DDP length is present pass it through to ULD */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
                /* update length of DDPed data */
                ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
                if (ddp->len)
                        rc = ddp->len;
                break;
        /* no match will return as an error */
-       case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
+       case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
        default:
                break;
        }
@@ -585,7 +586,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
        struct dma_pool *pool;
        char pool_name[32];
 
-       snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
+       snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
 
        pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
                               IXGBE_FCPTR_ALIGN, PAGE_SIZE);
index 3a02759b5e95bccb7d7101a70067531ac5f43a93..b16cc786750dec8bb7bf29749db331a31f028ec9 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index 32e3eaaa160ac4c4864cae2e4e9025f56f3604ef..2067d392cc3d33850254e9a2d719a2db6491b6f5 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -698,7 +699,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
                                       int vectors)
 {
-       int err, vector_threshold;
+       int vector_threshold;
 
        /* We'll want at least 2 (vector_threshold):
         * 1) TxQ[0] + RxQ[0] handler
@@ -712,18 +713,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
         * Right now, we simply care about how many we'll get; we'll
         * set them up later while requesting irq's.
         */
-       while (vectors >= vector_threshold) {
-               err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-                                     vectors);
-               if (!err) /* Success in acquiring all requested vectors. */
-                       break;
-               else if (err < 0)
-                       vectors = 0; /* Nasty failure, quit now */
-               else /* err == number of vectors we should try again with */
-                       vectors = err;
-       }
+       vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+                                       vector_threshold, vectors);
 
-       if (vectors < vector_threshold) {
+       if (vectors < 0) {
                /* Can't allocate enough MSI-X interrupts?  Oh well.
                 * This just means we'll go with either a single MSI
                 * vector or fall back to legacy interrupts.
index 18076c4178b4ff7762a117ef00595ab71173a10d..9e5a366124321afee3663fb06b1165b6e41f2649 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -67,7 +68,7 @@ static char ixgbe_default_device_descr[] =
 #define DRV_VERSION "3.19.1-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-                               "Copyright (c) 1999-2013 Intel Corporation.";
+                               "Copyright (c) 1999-2014 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
@@ -151,6 +152,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+
 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
                                          u32 reg, u16 *value)
 {
@@ -169,6 +172,9 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
                return -1;
 
        pcie_capability_read_word(parent_dev, reg, value);
+       if (*value == IXGBE_FAILED_READ_CFG_WORD &&
+           ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
+               return -1;
        return 0;
 }
 
@@ -313,6 +319,57 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
                ixgbe_remove_adapter(hw);
 }
 
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
+{
+       u16 value;
+
+       pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
+       if (value == IXGBE_FAILED_READ_CFG_WORD) {
+               ixgbe_remove_adapter(hw);
+               return true;
+       }
+       return false;
+}
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+       struct ixgbe_adapter *adapter = hw->back;
+       u16 value;
+
+       if (ixgbe_removed(hw->hw_addr))
+               return IXGBE_FAILED_READ_CFG_WORD;
+       pci_read_config_word(adapter->pdev, reg, &value);
+       if (value == IXGBE_FAILED_READ_CFG_WORD &&
+           ixgbe_check_cfg_remove(hw, adapter->pdev))
+               return IXGBE_FAILED_READ_CFG_WORD;
+       return value;
+}
+
+#ifdef CONFIG_PCI_IOV
+static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
+{
+       struct ixgbe_adapter *adapter = hw->back;
+       u32 value;
+
+       if (ixgbe_removed(hw->hw_addr))
+               return IXGBE_FAILED_READ_CFG_DWORD;
+       pci_read_config_dword(adapter->pdev, reg, &value);
+       if (value == IXGBE_FAILED_READ_CFG_DWORD &&
+           ixgbe_check_cfg_remove(hw, adapter->pdev))
+               return IXGBE_FAILED_READ_CFG_DWORD;
+       return value;
+}
+#endif /* CONFIG_PCI_IOV */
+
+void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+       struct ixgbe_adapter *adapter = hw->back;
+
+       if (ixgbe_removed(hw->hw_addr))
+               return;
+       pci_write_config_word(adapter->pdev, reg, value);
+}
+
 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
 {
        BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1264,7 +1321,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
                                 struct sk_buff *skb)
 {
        if (ring->netdev->features & NETIF_F_RXHASH)
-               skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+               skb_set_hash(skb,
+                            le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+                            PKT_HASH_TYPE_L3);
 }
 
 #ifdef IXGBE_FCOE
@@ -1480,7 +1539,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
        hdr.network += ETH_HLEN;
 
        /* handle any vlan tag if present */
-       if (protocol == __constant_htons(ETH_P_8021Q)) {
+       if (protocol == htons(ETH_P_8021Q)) {
                if ((hdr.network - data) > (max_len - VLAN_HLEN))
                        return max_len;
 
@@ -1489,7 +1548,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
        }
 
        /* handle L3 protocols */
-       if (protocol == __constant_htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
                        return max_len;
 
@@ -1503,7 +1562,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                /* record next protocol if header is present */
                if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
                        nexthdr = hdr.ipv4->protocol;
-       } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+       } else if (protocol == htons(ETH_P_IPV6)) {
                if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
                        return max_len;
 
@@ -1511,7 +1570,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
                nexthdr = hdr.ipv6->nexthdr;
                hlen = sizeof(struct ipv6hdr);
 #ifdef IXGBE_FCOE
-       } else if (protocol == __constant_htons(ETH_P_FCOE)) {
+       } else if (protocol == htons(ETH_P_FCOE)) {
                if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
                        return max_len;
                hlen = FCOE_HEADER_LEN;
@@ -2026,7 +2085,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 
-       do {
+       while (likely(total_rx_packets < budget)) {
                union ixgbe_adv_rx_desc *rx_desc;
                struct sk_buff *skb;
 
@@ -2101,7 +2160,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
                /* update budget accounting */
                total_rx_packets++;
-       } while (likely(total_rx_packets < budget));
+       }
 
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
@@ -2630,9 +2689,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               if (eicr & IXGBE_EICR_ECC)
-                       e_info(link, "Received unrecoverable ECC Err, please "
-                              "reboot\n");
+               if (eicr & IXGBE_EICR_ECC) {
+                       e_info(link, "Received ECC Err, initiating reset\n");
+                       adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+                       ixgbe_service_event_schedule(adapter);
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+               }
                /* Handle Flow Director Full threshold interrupt */
                if (eicr & IXGBE_EICR_FLOW_DIR) {
                        int reinit_count = 0;
@@ -2846,9 +2908,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
                ixgbe_check_sfp_event(adapter, eicr);
                /* Fall through */
        case ixgbe_mac_X540:
-               if (eicr & IXGBE_EICR_ECC)
-                       e_info(link, "Received unrecoverable ECC err, please "
-                                    "reboot\n");
+               if (eicr & IXGBE_EICR_ECC) {
+                       e_info(link, "Received ECC Err, initiating reset\n");
+                       adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+                       ixgbe_service_event_schedule(adapter);
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+               }
                ixgbe_check_overtemp_event(adapter, eicr);
                break;
        default:
@@ -4590,8 +4655,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
 static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       struct net_device *upper;
-       struct list_head *iter;
        int err;
        u32 ctrl_ext;
 
@@ -4633,19 +4696,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
                        e_crit(drv, "Fan has stopped, replace the adapter\n");
        }
 
-       /* enable transmits */
-       netif_tx_start_all_queues(adapter->netdev);
-
-       /* enable any upper devices */
-       netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
-               if (netif_is_macvlan(upper)) {
-                       struct macvlan_dev *vlan = netdev_priv(upper);
-
-                       if (vlan->fwd_priv)
-                               netif_tx_start_all_queues(upper);
-               }
-       }
-
        /* bring the link up in the watchdog, this could race with our first
         * link up interrupt but shouldn't be a problem */
        adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -5502,6 +5552,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
        struct net_device *netdev = adapter->netdev;
        u32 err;
 
+       adapter->hw.hw_addr = adapter->io_addr;
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        /*
@@ -6016,6 +6067,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
+       struct net_device *upper;
+       struct list_head *iter;
        u32 link_speed = adapter->link_speed;
        bool flow_rx, flow_tx;
 
@@ -6067,6 +6120,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
        netif_carrier_on(netdev);
        ixgbe_check_vf_rate_limit(adapter);
 
+       /* enable transmits */
+       netif_tx_wake_all_queues(adapter->netdev);
+
+       /* enable any upper devices */
+       rtnl_lock();
+       netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+               if (netif_is_macvlan(upper)) {
+                       struct macvlan_dev *vlan = netdev_priv(upper);
+
+                       if (vlan->fwd_priv)
+                               netif_tx_wake_all_queues(upper);
+               }
+       }
+       rtnl_unlock();
+
        /* update the default user priority for VFs */
        ixgbe_update_default_up(adapter);
 
@@ -6454,7 +6522,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (first->protocol == __constant_htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -6514,12 +6582,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
        } else {
                u8 l4_hdr = 0;
                switch (first->protocol) {
-               case __constant_htons(ETH_P_IP):
+               case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
                        l4_hdr = ip_hdr(skb)->protocol;
                        break;
-               case __constant_htons(ETH_P_IPV6):
+               case htons(ETH_P_IPV6):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        l4_hdr = ipv6_hdr(skb)->nexthdr;
                        break;
@@ -6794,9 +6862,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
        hdr.network = skb_network_header(first->skb);
 
        /* Currently only IPv4/IPv6 with TCP is supported */
-       if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
+       if ((first->protocol != htons(ETH_P_IPV6) ||
             hdr.ipv6->nexthdr != IPPROTO_TCP) &&
-           (first->protocol != __constant_htons(ETH_P_IP) ||
+           (first->protocol != htons(ETH_P_IP) ||
             hdr.ipv4->protocol != IPPROTO_TCP))
                return;
 
@@ -6829,12 +6897,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
         * and write the value to source port portion of compressed dword
         */
        if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
-               common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+               common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
        else
                common.port.src ^= th->dest ^ first->protocol;
        common.port.dst ^= th->source;
 
-       if (first->protocol == __constant_htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
                common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
        } else {
@@ -6900,8 +6968,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
         * or FIP and we have FCoE enabled on the adapter
         */
        switch (vlan_get_protocol(skb)) {
-       case __constant_htons(ETH_P_FCOE):
-       case __constant_htons(ETH_P_FIP):
+       case htons(ETH_P_FCOE):
+       case htons(ETH_P_FIP):
                adapter = netdev_priv(dev);
 
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
@@ -6962,7 +7030,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
        /* else if it is a SW VLAN check the next protocol and store the tag */
-       } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+       } else if (protocol == htons(ETH_P_8021Q)) {
                struct vlan_hdr *vhdr, _vhdr;
                vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
                if (!vhdr)
@@ -7021,7 +7089,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 
 #ifdef IXGBE_FCOE
        /* setup tx offload for FCoE */
-       if ((protocol == __constant_htons(ETH_P_FCOE)) &&
+       if ((protocol == htons(ETH_P_FCOE)) &&
            (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
                tso = ixgbe_fso(tx_ring, first, &hdr_len);
                if (tso < 0)
@@ -7143,7 +7211,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
 
        switch (cmd) {
        case SIOCSHWTSTAMP:
-               return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
+               return ixgbe_ptp_set_ts_config(adapter, req);
+       case SIOCGHWTSTAMP:
+               return ixgbe_ptp_get_ts_config(adapter, req);
        default:
                return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
        }
@@ -7234,10 +7304,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
 
                if (ring) {
                        do {
-                               start = u64_stats_fetch_begin_bh(&ring->syncp);
+                               start = u64_stats_fetch_begin_irq(&ring->syncp);
                                packets = ring->stats.packets;
                                bytes   = ring->stats.bytes;
-                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+                       } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                        stats->rx_packets += packets;
                        stats->rx_bytes   += bytes;
                }
@@ -7250,10 +7320,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
 
                if (ring) {
                        do {
-                               start = u64_stats_fetch_begin_bh(&ring->syncp);
+                               start = u64_stats_fetch_begin_irq(&ring->syncp);
                                packets = ring->stats.packets;
                                bytes   = ring->stats.bytes;
-                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+                       } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                        stats->tx_packets += packets;
                        stats->tx_bytes   += bytes;
                }
@@ -7792,6 +7862,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
        case IXGBE_DEV_ID_82599_SFP:
                /* Only these subdevices could supports WOL */
                switch (subdevice_id) {
+               case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
                case IXGBE_SUBDEV_ID_82599_560FLR:
                        /* only support first port */
                        if (hw->bus.func != 0)
@@ -7969,10 +8040,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_sw_init;
 
-       /* Cache if MNG FW is up so we don't have to read the REG later */
-       if (hw->mac.ops.mng_fw_enabled)
-               hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
-
        /* Make it possible the adapter to be woken up via WOL */
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_82599EB:
@@ -8223,7 +8290,7 @@ skip_sriov:
        ixgbe_dbg_adapter_init(adapter);
 
        /* Need link setup for MNG FW, else wait for IXGBE_UP */
-       if (hw->mng_fw_enabled && hw->mac.ops.setup_link)
+       if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
                hw->mac.ops.setup_link(hw,
                        IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
                        true);
@@ -8331,6 +8398,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
        struct net_device *netdev = adapter->netdev;
 
 #ifdef CONFIG_PCI_IOV
+       struct ixgbe_hw *hw = &adapter->hw;
        struct pci_dev *bdev, *vfdev;
        u32 dw0, dw1, dw2, dw3;
        int vf, pos;
@@ -8351,10 +8419,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
        if (!pos)
                goto skip_bad_vf_detection;
 
-       pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
-       pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
-       pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
-       pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+       dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
+       dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
+       dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
+       dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
+       if (ixgbe_removed(hw->hw_addr))
+               goto skip_bad_vf_detection;
 
        req_id = dw1 >> 16;
        /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
@@ -8446,6 +8516,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
                e_err(probe, "Cannot re-enable PCI device after reset.\n");
                result = PCI_ERS_RESULT_DISCONNECT;
        } else {
+               adapter->hw.hw_addr = adapter->io_addr;
                pci_set_master(pdev);
                pci_restore_state(pdev);
                pci_save_state(pdev);
index cc3101afd29fe8745d0cd870271b3f5246ee88fa..f5c6af2b891bd3dc797e7c4f5640b006eac78810 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index e44ff47659b50b733cce7a17ce7a06ba010d7ce8..a9b9ad69ed0ec315d22d38c92eb1885c2900c9c3 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index 132557c318f810b5f8bce65b4b0f2ebac613fdfd..ad51c12cb26a63fc3a8975e7527450e675f4133c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -97,6 +98,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
        return status;
 }
 
+/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability.  For MAC's that don't
+ * have this bit just return false since the link can not be blocked
+ * via this method.
+ **/
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+       u32 mmngc;
+
+       /* If we don't have this bit, it can't be blocking */
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               return false;
+
+       mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+       if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+               hw_dbg(hw, "MNG_VETO bit detected.\n");
+               return true;
+       }
+
+       return false;
+}
+
 /**
  *  ixgbe_get_phy_id - Get the phy type
  *  @hw: pointer to hardware structure
@@ -172,6 +199,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
            (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
                goto out;
 
+       /* Blocked by MNG FW so bail */
+       if (ixgbe_check_reset_blocked(hw))
+               goto out;
+
        /*
         * Perform soft PHY reset to the PHY_XS.
         * This will cause a soft reset to the PHY
@@ -476,6 +507,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
                                      autoneg_reg);
        }
 
+       /* Blocked by MNG FW so don't reset PHY */
+       if (ixgbe_check_reset_blocked(hw))
+               return status;
+
        /* Restart PHY autonegotiation and wait for completion */
        hw->phy.ops.read_reg(hw, MDIO_CTRL1,
                             MDIO_MMD_AN, &autoneg_reg);
@@ -682,6 +717,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
                                      autoneg_reg);
        }
 
+       /* Blocked by MNG FW so don't reset PHY */
+       if (ixgbe_check_reset_blocked(hw))
+               return status;
+
        /* Restart PHY autonegotiation and wait for completion */
        hw->phy.ops.read_reg(hw, MDIO_CTRL1,
                             MDIO_MMD_AN, &autoneg_reg);
@@ -759,6 +798,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
        s32 ret_val = 0;
        u32 i;
 
+       /* Blocked by MNG FW so bail */
+       if (ixgbe_check_reset_blocked(hw))
+               goto out;
+
        hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
 
        /* reset the PHY and poll for completion */
index fffcbdd2bf0e49ab129abf04cd16a650a4d5cb91..4a456c974ef2c9d371f1528a5cde1d92c4bd9cfb 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -65,9 +66,6 @@
 #define IXGBE_SFF_1GBASET_CAPABLE              0x8
 #define IXGBE_SFF_10GBASESR_CAPABLE            0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE            0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK          0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G           0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G            0x0
 #define IXGBE_SFF_ADDRESSING_MODE              0x4
 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE         0x1
 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE                0x8
@@ -79,7 +77,6 @@
 #define IXGBE_I2C_EEPROM_STATUS_PASS           0x1
 #define IXGBE_I2C_EEPROM_STATUS_FAIL           0x2
 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS    0x3
-
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
 #define IXGBE_TAF_ASM_PAUSE                  0x800
@@ -131,6 +128,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
                                                ixgbe_link_speed *speed,
                                                bool *autoneg);
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
 
 /* PHY specific */
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
index 5184e2a1a7d8249bc746ce6496e55f5789a0e949..44ac9aef6a8d51d0371368525c4038e5faa0b2e1 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -576,14 +577,21 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
        shhwtstamps->hwtstamp = ns_to_ktime(ns);
 }
 
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+       struct hwtstamp_config *config = &adapter->tstamp_config;
+
+       return copy_to_user(ifr->ifr_data, config,
+                           sizeof(*config)) ? -EFAULT : 0;
+}
+
 /**
- * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
+ * ixgbe_ptp_set_ts_config - control hardware time stamping
  * @adapter: pointer to adapter struct
  * @ifreq: ioctl data
- * @cmd: particular ioctl requested
  *
  * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
+ * disable it when requested, although it shouldn't cause any overhead
  * when no packet needs it. At most one packet in the queue may be
  * marked for time stamping, otherwise it would be impossible to tell
  * for sure to which packet the hardware time stamp belongs.
@@ -599,8 +607,7 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
  * Event mode. This more accurately tells the user what the hardware is going
  * to do anyways.
  */
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
-                            struct ifreq *ifr, int cmd)
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct hwtstamp_config config;
@@ -702,6 +709,10 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
        regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
        regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
 
+       /* save these settings for future reference */
+       memcpy(&adapter->tstamp_config, &config,
+              sizeof(adapter->tstamp_config));
+
        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
                -EFAULT : 0;
 }
@@ -809,6 +820,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
        IXGBE_WRITE_FLUSH(hw);
 
+       /* Reset the saved tstamp_config */
+       memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+
        ixgbe_ptp_start_cyclecounter(adapter);
 
        spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -840,7 +854,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
 
        switch (adapter->hw.mac.type) {
        case ixgbe_mac_X540:
-               snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+               snprintf(adapter->ptp_caps.name,
+                        sizeof(adapter->ptp_caps.name),
+                        "%s", netdev->name);
                adapter->ptp_caps.owner = THIS_MODULE;
                adapter->ptp_caps.max_adj = 250000000;
                adapter->ptp_caps.n_alarm = 0;
@@ -854,7 +870,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
                adapter->ptp_caps.enable = ixgbe_ptp_enable;
                break;
        case ixgbe_mac_82599EB:
-               snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+               snprintf(adapter->ptp_caps.name,
+                        sizeof(adapter->ptp_caps.name),
+                        "%s", netdev->name);
                adapter->ptp_caps.owner = THIS_MODULE;
                adapter->ptp_caps.max_adj = 250000000;
                adapter->ptp_caps.n_alarm = 0;
index dff0977876f75448ee67ef6e1305bdc8b5e8be22..e6c68d396c992fffb329a1cca4daadb47169faab 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index 8bd29190514e4a7be7fd498cb28a0bef1336d082..139eaddfb2ed5c8c14a3891137a313b4fcaa3a6a 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index e74ae3682733df328b8b9f85631da133778bc9ee..ef6df3d6437e3102224adef7033a3b26f058ef7e 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
index 0d39cfc4a3bf2aa84294b4a12cf91b4a3a5353da..8a6ff2423f076974d1c3c408b97c497d00bdc277 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -54,6 +55,7 @@
 #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152a
 #define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529
 #define IXGBE_SUBDEV_ID_82599_SFP        0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0   0x1071
 #define IXGBE_SUBDEV_ID_82599_RNDC       0x1F72
 #define IXGBE_SUBDEV_ID_82599_560FLR     0x17D0
 #define IXGBE_SUBDEV_ID_82599_SP_560FLR  0x211B
@@ -1609,6 +1611,9 @@ enum {
 #define IXGBE_MACC_FS        0x00040000
 #define IXGBE_MAC_RX2TX_LPBK 0x00000002
 
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO  0x00000001
+
 /* LINKS Bit Masks */
 #define IXGBE_LINKS_KX_AN_COMP  0x80000000
 #define IXGBE_LINKS_UP          0x40000000
@@ -1788,6 +1793,9 @@ enum {
 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
 #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
 
+#define IXGBE_EEPROM_CTRL_2    1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT   2 /* EEPROM Core Clock Disable bit */
+
 #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
 #define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
 #endif
@@ -1853,8 +1861,19 @@ enum {
 #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
 #define IXGBE_PCI_DEVICE_CONTROL2_16ms  0x0005
 
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK   0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def  0x0
+#define IXGBE_PCIDEVCTRL2_50_100us     0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms                0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms      0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms     0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms    0x9
+#define IXGBE_PCIDEVCTRL2_1_2s         0xa
+#define IXGBE_PCIDEVCTRL2_4_8s         0xd
+#define IXGBE_PCIDEVCTRL2_17_34s       0xe
+
 /* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT       800
 
 /* RAH */
 #define IXGBE_RAH_VIND_MASK     0x003C0000
@@ -2645,7 +2664,6 @@ enum ixgbe_sfp_type {
 enum ixgbe_media_type {
        ixgbe_media_type_unknown = 0,
        ixgbe_media_type_fiber,
-       ixgbe_media_type_fiber_fixed,
        ixgbe_media_type_fiber_qsfp,
        ixgbe_media_type_fiber_lco,
        ixgbe_media_type_copper,
@@ -2858,6 +2876,8 @@ struct ixgbe_mac_operations {
        s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
        s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
        void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+       s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+       s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
 
        /* Link */
        void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2901,7 +2921,6 @@ struct ixgbe_mac_operations {
        s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
        s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
        s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
-       bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
 };
 
 struct ixgbe_phy_operations {
@@ -2957,7 +2976,6 @@ struct ixgbe_mac_info {
        u32                             max_tx_queues;
        u32                             max_rx_queues;
        u32                             orig_autoc;
-       u32                             cached_autoc;
        u32                             orig_autoc2;
        bool                            orig_link_settings_stored;
        bool                            autotry_restart;
@@ -3033,7 +3051,6 @@ struct ixgbe_hw {
        bool                            adapter_stopped;
        bool                            force_full_reset;
        bool                            allow_unsupported_sfp;
-       bool                            mng_fw_enabled;
        bool                            wol_enabled;
 };
 
index 24b80a6cfca4ec593f7bed4bf2e1423213125d90..188a5974b85c41f7b8279b6128f72f749e845f76 100644 (file)
@@ -20,6 +20,7 @@
   the file called "COPYING".
 
   Contact Information:
+  Linux NICS <linux.nics@intel.com>
   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
@@ -61,6 +62,7 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
        mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
        mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
        mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+       mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
        mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
        mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
        mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -187,7 +189,6 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
                goto out;
 
        ret_val = ixgbe_start_hw_gen2(hw);
-       hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
 out:
        return ret_val;
 }
@@ -854,7 +855,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .enable_rx_buff         = &ixgbe_enable_rx_buff_generic,
        .get_thermal_sensor_data = NULL,
        .init_thermal_sensor_thresh = NULL,
-       .mng_fw_enabled         = NULL,
+       .prot_autoc_read        = &prot_autoc_read_generic,
+       .prot_autoc_write       = &prot_autoc_write_generic,
 };
 
 static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
index f68b78c732a8626c3c011a6d83329c3779fccc51..b2d002394e5d6a8eaa5a78bf9ffdab5c0a86083f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -530,41 +530,55 @@ static const u32 register_test_patterns[] = {
        0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
 };
 
-#define REG_PATTERN_TEST(R, M, W)                                             \
-{                                                                             \
-       u32 pat, val, before;                                                 \
-       for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {      \
-               before = readl(adapter->hw.hw_addr + R);                      \
-               writel((register_test_patterns[pat] & W),                     \
-                      (adapter->hw.hw_addr + R));                            \
-               val = readl(adapter->hw.hw_addr + R);                         \
-               if (val != (register_test_patterns[pat] & W & M)) {           \
-                       hw_dbg(&adapter->hw,                                  \
-                       "pattern test reg %04X failed: got "                  \
-                       "0x%08X expected 0x%08X\n",                           \
-                       R, val, (register_test_patterns[pat] & W & M));       \
-                       *data = R;                                            \
-                       writel(before, adapter->hw.hw_addr + R);              \
-                       return 1;                                             \
-               }                                                             \
-               writel(before, adapter->hw.hw_addr + R);                      \
-       }                                                                     \
+static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
+                            int reg, u32 mask, u32 write)
+{
+       u32 pat, val, before;
+
+       if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+               *data = 1;
+               return true;
+       }
+       for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
+               before = ixgbe_read_reg(&adapter->hw, reg);
+               ixgbe_write_reg(&adapter->hw, reg,
+                               register_test_patterns[pat] & write);
+               val = ixgbe_read_reg(&adapter->hw, reg);
+               if (val != (register_test_patterns[pat] & write & mask)) {
+                       hw_dbg(&adapter->hw,
+                              "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
+                              reg, val,
+                              register_test_patterns[pat] & write & mask);
+                       *data = reg;
+                       ixgbe_write_reg(&adapter->hw, reg, before);
+                       return true;
+               }
+               ixgbe_write_reg(&adapter->hw, reg, before);
+       }
+       return false;
 }
 
-#define REG_SET_AND_CHECK(R, M, W)                                            \
-{                                                                             \
-       u32 val, before;                                                      \
-       before = readl(adapter->hw.hw_addr + R);                              \
-       writel((W & M), (adapter->hw.hw_addr + R));                           \
-       val = readl(adapter->hw.hw_addr + R);                                 \
-       if ((W & M) != (val & M)) {                                           \
-               pr_err("set/check reg %04X test failed: got 0x%08X expected " \
-                      "0x%08X\n", R, (val & M), (W & M));                    \
-               *data = R;                                                    \
-               writel(before, (adapter->hw.hw_addr + R));                    \
-               return 1;                                                     \
-       }                                                                     \
-       writel(before, (adapter->hw.hw_addr + R));                            \
+static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
+                             int reg, u32 mask, u32 write)
+{
+       u32 val, before;
+
+       if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+               *data = 1;
+               return true;
+       }
+       before = ixgbe_read_reg(&adapter->hw, reg);
+       ixgbe_write_reg(&adapter->hw, reg, write & mask);
+       val = ixgbe_read_reg(&adapter->hw, reg);
+       if ((write & mask) != (val & mask)) {
+               pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+                      reg, (val & mask), write & mask);
+               *data = reg;
+               ixgbe_write_reg(&adapter->hw, reg, before);
+               return true;
+       }
+       ixgbe_write_reg(&adapter->hw, reg, before);
+       return false;
 }
 
 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
@@ -572,6 +586,12 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
        const struct ixgbevf_reg_test *test;
        u32 i;
 
+       if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+               dev_err(&adapter->pdev->dev,
+                       "Adapter removed - register test blocked\n");
+               *data = 1;
+               return 1;
+       }
        test = reg_test_vf;
 
        /*
@@ -580,38 +600,47 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
         */
        while (test->reg) {
                for (i = 0; i < test->array_len; i++) {
+                       bool b = false;
+
                        switch (test->test_type) {
                        case PATTERN_TEST:
-                               REG_PATTERN_TEST(test->reg + (i * 0x40),
-                                               test->mask,
-                                               test->write);
+                               b = reg_pattern_test(adapter, data,
+                                                    test->reg + (i * 0x40),
+                                                    test->mask,
+                                                    test->write);
                                break;
                        case SET_READ_TEST:
-                               REG_SET_AND_CHECK(test->reg + (i * 0x40),
-                                               test->mask,
-                                               test->write);
+                               b = reg_set_and_check(adapter, data,
+                                                     test->reg + (i * 0x40),
+                                                     test->mask,
+                                                     test->write);
                                break;
                        case WRITE_NO_TEST:
-                               writel(test->write,
-                                      (adapter->hw.hw_addr + test->reg)
-                                      + (i * 0x40));
+                               ixgbe_write_reg(&adapter->hw,
+                                                 test->reg + (i * 0x40),
+                                                 test->write);
                                break;
                        case TABLE32_TEST:
-                               REG_PATTERN_TEST(test->reg + (i * 4),
-                                               test->mask,
-                                               test->write);
+                               b = reg_pattern_test(adapter, data,
+                                                    test->reg + (i * 4),
+                                                    test->mask,
+                                                    test->write);
                                break;
                        case TABLE64_TEST_LO:
-                               REG_PATTERN_TEST(test->reg + (i * 8),
-                                               test->mask,
-                                               test->write);
+                               b = reg_pattern_test(adapter, data,
+                                                    test->reg + (i * 8),
+                                                    test->mask,
+                                                    test->write);
                                break;
                        case TABLE64_TEST_HI:
-                               REG_PATTERN_TEST((test->reg + 4) + (i * 8),
-                                               test->mask,
-                                               test->write);
+                               b = reg_pattern_test(adapter, data,
+                                                    test->reg + 4 + (i * 8),
+                                                    test->mask,
+                                                    test->write);
                                break;
                        }
+                       if (b)
+                               return 1;
                }
                test++;
        }
@@ -626,6 +655,14 @@ static void ixgbevf_diag_test(struct net_device *netdev,
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        bool if_running = netif_running(netdev);
 
+       if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
+               dev_err(&adapter->pdev->dev,
+                       "Adapter removed - test blocked\n");
+               data[0] = 1;
+               data[1] = 1;
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+               return;
+       }
        set_bit(__IXGBEVF_TESTING, &adapter->state);
        if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
                /* Offline tests */
index 54829326bb09fc7196f78a4b5d9758b65c45f1e6..a08bd7c46766dd84253a116748b29086bda6a1d8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -315,6 +315,11 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
        return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
 }
 
+static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
+{
+       writel(value, ring->tail);
+}
+
 #define IXGBEVF_RX_DESC(R, i)      \
        (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
 #define IXGBEVF_TX_DESC(R, i)      \
@@ -401,6 +406,7 @@ struct ixgbevf_adapter {
        u64 bp_tx_missed;
 #endif
 
+       u8 __iomem *io_addr; /* Mainly for iounmap use */
        u32 link_speed;
        bool link_up;
 
@@ -412,7 +418,8 @@ struct ixgbevf_adapter {
 enum ixbgevf_state_t {
        __IXGBEVF_TESTING,
        __IXGBEVF_RESETTING,
-       __IXGBEVF_DOWN
+       __IXGBEVF_DOWN,
+       __IXGBEVF_REMOVING,
 };
 
 struct ixgbevf_cb {
index 9df28985eba7cf45acfcfc232520915854279151..a50e892a5d21734de8d8f733d19693d98c99a257 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -99,6 +99,49 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
 
+static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
+{
+       struct ixgbevf_adapter *adapter = hw->back;
+
+       if (!hw->hw_addr)
+               return;
+       hw->hw_addr = NULL;
+       dev_err(&adapter->pdev->dev, "Adapter removed\n");
+       schedule_work(&adapter->watchdog_task);
+}
+
+static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
+{
+       u32 value;
+
+       /* The following check not only optimizes a bit by not
+        * performing a read on the status register when the
+        * register just read was a status register read that
+        * returned IXGBE_FAILED_READ_REG. It also blocks any
+        * potential recursion.
+        */
+       if (reg == IXGBE_VFSTATUS) {
+               ixgbevf_remove_adapter(hw);
+               return;
+       }
+       value = ixgbe_read_reg(hw, IXGBE_VFSTATUS);
+       if (value == IXGBE_FAILED_READ_REG)
+               ixgbevf_remove_adapter(hw);
+}
+
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+       u32 value;
+
+       if (IXGBE_REMOVED(reg_addr))
+               return IXGBE_FAILED_READ_REG;
+       value = readl(reg_addr + reg);
+       if (unlikely(value == IXGBE_FAILED_READ_REG))
+               ixgbevf_check_remove(hw, reg);
+       return value;
+}
+
 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
                                           u32 val)
 {
@@ -111,7 +154,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
         * such as IA-64).
         */
        wmb();
-       writel(val, rx_ring->tail);
+       ixgbevf_write_tail(rx_ring, val);
 }
 
 /**
@@ -516,7 +559,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                /* Workaround hardware that can't do proper VEPA multicast
                 * source pruning.
                 */
-               if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+               if ((skb->pkt_type == PACKET_BROADCAST ||
+                   skb->pkt_type == PACKET_MULTICAST) &&
                    ether_addr_equal(rx_ring->netdev->dev_addr,
                                     eth_hdr(skb)->h_source)) {
                        dev_kfree_skb_irq(skb);
@@ -607,7 +651,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
        napi_complete(napi);
        if (adapter->rx_itr_setting & 1)
                ixgbevf_set_itr(q_vector);
-       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+           !test_bit(__IXGBEVF_REMOVING, &adapter->state))
                ixgbevf_irq_enable_queues(adapter,
                                          1 << q_vector->v_idx);
 
@@ -832,7 +877,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 
        hw->mac.get_link_status = 1;
 
-       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+           !test_bit(__IXGBEVF_REMOVING, &adapter->state))
                mod_timer(&adapter->watchdog_timer, jiffies);
 
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1136,7 +1182,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
        /* reset head and tail pointers */
        IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
-       ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx);
+       ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
 
        /* reset ntu and ntc to place SW in sync with hardwdare */
        ring->next_to_clean = 0;
@@ -1256,6 +1302,8 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
        u32 rxdctl;
        u8 reg_idx = ring->reg_idx;
 
+       if (IXGBE_REMOVED(hw->hw_addr))
+               return;
        rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
        rxdctl &= ~IXGBE_RXDCTL_ENABLE;
 
@@ -1281,6 +1329,8 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
        u32 rxdctl;
        u8 reg_idx = ring->reg_idx;
 
+       if (IXGBE_REMOVED(hw->hw_addr))
+               return;
        do {
                usleep_range(1000, 2000);
                rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
@@ -1315,7 +1365,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
        /* reset head and tail pointers */
        IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
-       ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx);
+       ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
 
        /* reset ntu and ntc to place SW in sync with hardwdare */
        ring->next_to_clean = 0;
@@ -1617,6 +1667,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
 
        spin_unlock_bh(&adapter->mbx_lock);
 
+       smp_mb__before_clear_bit();
        clear_bit(__IXGBEVF_DOWN, &adapter->state);
        ixgbevf_napi_enable_all(adapter);
 
@@ -1741,7 +1792,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
        int i;
 
        /* signal that we are down to the interrupt handler */
-       set_bit(__IXGBEVF_DOWN, &adapter->state);
+       if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
+               return; /* do nothing if already down */
 
        /* disable all enabled rx queues */
        for (i = 0; i < adapter->num_rx_queues; i++)
@@ -1817,7 +1869,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
                                        int vectors)
 {
-       int err = 0;
        int vector_threshold;
 
        /* We'll want at least 2 (vector_threshold):
@@ -1831,33 +1882,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
         * Right now, we simply care about how many we'll get; we'll
         * set them up later while requesting irq's.
         */
-       while (vectors >= vector_threshold) {
-               err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
-                                     vectors);
-               if (!err || err < 0) /* Success or a nasty failure. */
-                       break;
-               else /* err == number of vectors we should try again with */
-                       vectors = err;
-       }
-
-       if (vectors < vector_threshold)
-               err = -ENOMEM;
+       vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+                                       vector_threshold, vectors);
 
-       if (err) {
+       if (vectors < 0) {
                dev_err(&adapter->pdev->dev,
                        "Unable to allocate MSI-X interrupts\n");
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-       } else {
-               /*
-                * Adjust for only the vectors we'll use, which is minimum
-                * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
-                * vectors we were allocated.
-                */
-               adapter->num_msix_vectors = vectors;
+               return vectors;
        }
 
-       return err;
+       /* Adjust for only the vectors we'll use, which is minimum
+        * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+        * vectors we were allocated.
+        */
+       adapter->num_msix_vectors = vectors;
+
+       return 0;
 }
 
 /**
@@ -2338,6 +2380,7 @@ static void ixgbevf_reset_task(struct work_struct *work)
 
        /* If we're already down or resetting, just bail */
        if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+           test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
            test_bit(__IXGBEVF_RESETTING, &adapter->state))
                return;
 
@@ -2361,6 +2404,14 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
        bool link_up = adapter->link_up;
        s32 need_reset;
 
+       if (IXGBE_REMOVED(hw->hw_addr)) {
+               if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+                       rtnl_lock();
+                       ixgbevf_down(adapter);
+                       rtnl_unlock();
+               }
+               return;
+       }
        ixgbevf_queue_reset_subtask(adapter);
 
        adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2422,7 +2473,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
 
 pf_has_reset:
        /* Reset the timer */
-       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+           !test_bit(__IXGBEVF_REMOVING, &adapter->state))
                mod_timer(&adapter->watchdog_timer,
                          round_jiffies(jiffies + (2 * HZ)));
 
@@ -2787,6 +2839,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
        u32 vlan_macip_lens, type_tucmd;
        u32 mss_l4len_idx, l4len;
 
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
        if (!skb_is_gso(skb))
                return 0;
 
@@ -2857,12 +2912,12 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4_hdr = 0;
                switch (skb->protocol) {
-               case __constant_htons(ETH_P_IP):
+               case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
                        l4_hdr = ip_hdr(skb)->protocol;
                        break;
-               case __constant_htons(ETH_P_IPV6):
+               case htons(ETH_P_IPV6):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        l4_hdr = ipv6_hdr(skb)->nexthdr;
                        break;
@@ -3060,7 +3115,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
        tx_ring->next_to_use = i;
 
        /* notify HW of packet */
-       writel(i, tx_ring->tail);
+       ixgbevf_write_tail(tx_ring, i);
 
        return;
 dma_error:
@@ -3165,7 +3220,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        tso = ixgbevf_tso(tx_ring, first, &hdr_len);
        if (tso < 0)
                goto out_drop;
-       else
+       else if (!tso)
                ixgbevf_tx_csum(tx_ring, first);
 
        ixgbevf_tx_map(tx_ring, first, hdr_len);
@@ -3286,7 +3341,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        u32 err;
 
-       pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
        /*
         * pci_restore_state clears dev->state_saved so call
@@ -3344,10 +3398,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        for (i = 0; i < adapter->num_rx_queues; i++) {
                ring = adapter->rx_ring[i];
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->syncp);
                        bytes = ring->stats.bytes;
                        packets = ring->stats.packets;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                stats->rx_bytes += bytes;
                stats->rx_packets += packets;
        }
@@ -3355,10 +3409,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
        for (i = 0; i < adapter->num_tx_queues; i++) {
                ring = adapter->tx_ring[i];
                do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
+                       start = u64_stats_fetch_begin_irq(&ring->syncp);
                        bytes = ring->stats.bytes;
                        packets = ring->stats.packets;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                stats->tx_bytes += bytes;
                stats->tx_packets += packets;
        }
@@ -3460,6 +3514,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
                              pci_resource_len(pdev, 0));
+       adapter->io_addr = hw->hw_addr;
        if (!hw->hw_addr) {
                err = -EIO;
                goto err_ioremap;
@@ -3545,7 +3600,7 @@ err_register:
        ixgbevf_clear_interrupt_scheme(adapter);
 err_sw_init:
        ixgbevf_reset_interrupt_capability(adapter);
-       iounmap(hw->hw_addr);
+       iounmap(adapter->io_addr);
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
@@ -3570,7 +3625,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-       set_bit(__IXGBEVF_DOWN, &adapter->state);
+       set_bit(__IXGBEVF_REMOVING, &adapter->state);
 
        del_timer_sync(&adapter->watchdog_timer);
 
@@ -3583,7 +3638,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
        ixgbevf_clear_interrupt_scheme(adapter);
        ixgbevf_reset_interrupt_capability(adapter);
 
-       iounmap(adapter->hw.hw_addr);
+       iounmap(adapter->io_addr);
        pci_release_regions(pdev);
 
        hw_dbg(&adapter->hw, "Remove complete\n");
index debd8c0e1f28df74db51c04809beeff3f2a9855c..09dd8f698beacc4b91979f149354352b586ffe74 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 #define IXGBE_VFGOTC_MSB       0x02024
 #define IXGBE_VFMPRC           0x01034
 
-#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-
-#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
-
-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
-    writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
-
-#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
-    readl((a)->hw_addr + (reg) + ((offset) << 2)))
-
 #define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
 
 #endif /* _IXGBEVF_REGS_H_ */
index 7b1f502d171606366c06c767a65eb7ead6add2aa..096d33a59def24fce47028314dd406c8ba97436f 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2012 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -172,6 +172,37 @@ struct ixgbevf_info {
        const struct ixgbe_mac_operations *mac_ops;
 };
 
+#define IXGBE_FAILED_READ_REG 0xffffffffU
+
+#define IXGBE_REMOVED(a) unlikely(!(a))
+
+static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
+{
+       u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+
+       if (IXGBE_REMOVED(reg_addr))
+               return;
+       writel(value, reg_addr + reg);
+}
+#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
+
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
+#define IXGBE_READ_REG(h, r) ixgbe_read_reg(h, r)
+
+static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
+                                         u32 offset, u32 value)
+{
+       ixgbe_write_reg(hw, reg + (offset << 2), value);
+}
+#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
+
+static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
+                                       u32 offset)
+{
+       return ixgbe_read_reg(hw, reg + (offset << 2));
+}
+#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
+
 void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
 int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
index f5685c0d057911c6c2f59dd982f21f869d1b978c..b0c6050479eb460ae306cccaa93926f738e64c2e 100644 (file)
@@ -2053,19 +2053,6 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
 
 }
 
-static int
-jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
-{
-       if (unlikely(skb_shinfo(skb)->gso_size &&
-                       skb_header_cloned(skb) &&
-                       pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
-               dev_kfree_skb(skb);
-               return -1;
-       }
-
-       return 0;
-}
-
 static int
 jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
 {
@@ -2225,7 +2212,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        struct jme_adapter *jme = netdev_priv(netdev);
        int idx;
 
-       if (unlikely(jme_expand_header(jme, skb))) {
+       if (unlikely(skb_is_gso(skb) && skb_cow_head(skb, 0))) {
+               dev_kfree_skb_any(skb);
                ++(NET_STAT(jme).tx_dropped);
                return NETDEV_TX_OK;
        }
index a2565ce22b7c9af32d6c34ac011f468e36153296..b7b8d74c22d9c6f7e7f9aaa7c9211722ec5929fd 100644 (file)
@@ -730,7 +730,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
                    unlikely(tag_bytes & ~12)) {
                        if (skb_checksum_help(skb) == 0)
                                goto no_csum;
-                       kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        return 1;
                }
 
@@ -819,7 +819,7 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
                if (net_ratelimit())
                        netdev_err(dev, "tx queue full?!\n");
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index fd409d76b811487673c1cf038d9572c5f507f62a..b161a525fc5bd8accb44b002b64776f05d8d0319 100644 (file)
@@ -167,11 +167,6 @@ out:
        return ret;
 }
 
-static int orion_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
 {
        struct orion_mdio_dev *dev = dev_id;
@@ -209,7 +204,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
        bus->name = "orion_mdio_bus";
        bus->read = orion_mdio_read;
        bus->write = orion_mdio_write;
-       bus->reset = orion_mdio_reset;
        snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
                 dev_name(&pdev->dev));
        bus->parent = &pdev->dev;
index 8d76fca7fde75085da8364a621e97e332f1e23bc..d04b1c3c9b85ba8ce23e52cf3fb85b2a9b1a897d 100644 (file)
@@ -510,12 +510,12 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
 
                cpu_stats = per_cpu_ptr(pp->stats, cpu);
                do {
-                       start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
                        rx_packets = cpu_stats->rx_packets;
                        rx_bytes   = cpu_stats->rx_bytes;
                        tx_packets = cpu_stats->tx_packets;
                        tx_bytes   = cpu_stats->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 
                stats->rx_packets += rx_packets;
                stats->rx_bytes   += rx_bytes;
@@ -2761,7 +2761,6 @@ static int mvneta_probe(struct platform_device *pdev)
        const char *mac_from;
        int phy_mode;
        int err;
-       int cpu;
 
        /* Our multiqueue support is not complete, so for now, only
         * allow the usage of the first RX queue
@@ -2816,30 +2815,19 @@ static int mvneta_probe(struct platform_device *pdev)
        clk_prepare_enable(pp->clk);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               err = -ENODEV;
-               goto err_clk;
-       }
-
        pp->base = devm_ioremap_resource(&pdev->dev, res);
-       if (pp->base == NULL) {
+       if (IS_ERR(pp->base)) {
                err = PTR_ERR(pp->base);
                goto err_clk;
        }
 
        /* Alloc per-cpu stats */
-       pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+       pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
        if (!pp->stats) {
                err = -ENOMEM;
                goto err_clk;
        }
 
-       for_each_possible_cpu(cpu) {
-               struct mvneta_pcpu_stats *stats;
-               stats = per_cpu_ptr(pp->stats, cpu);
-               u64_stats_init(&stats->syncp);
-       }
-
        dt_mac_addr = of_get_mac_address(dn);
        if (dt_mac_addr) {
                mac_from = "device tree";
index 5978461938699f098f7927c7d439cd9134fbc6a7..7f81ae66cc892d664fc007feaa9fd6f447baea42 100644 (file)
@@ -2845,7 +2845,7 @@ mapping_unwind:
 mapping_error:
        if (net_ratelimit())
                dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
@@ -3172,7 +3172,7 @@ static void skge_tx_done(struct net_device *dev)
                        pkts_compl++;
                        bytes_compl += e->skb->len;
 
-                       dev_kfree_skb(e->skb);
+                       dev_consume_skb_any(e->skb);
                }
        }
        netdev_completed_queue(dev, pkts_compl, bytes_compl);
index 55a37ae11440791d78e5dd69bf486cb5b4f976f3..b81106451a0a4d2d46d831d9e629d3a24212aada 100644 (file)
@@ -44,6 +44,8 @@
 #include <linux/prefetch.h>
 #include <linux/debugfs.h>
 #include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
 
 #include <asm/irq.h>
 
@@ -2000,7 +2002,7 @@ mapping_unwind:
 mapping_error:
        if (net_ratelimit())
                dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
@@ -2733,6 +2735,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
        unsigned int total_bytes[2] = { 0 };
        unsigned int total_packets[2] = { 0 };
 
+       if (to_do <= 0)
+               return work_done;
+
        rmb();
        do {
                struct sky2_port *sky2;
@@ -3906,19 +3911,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
        u64 _bytes, _packets;
 
        do {
-               start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
                _bytes = sky2->rx_stats.bytes;
                _packets = sky2->rx_stats.packets;
-       } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
 
        stats->rx_packets = _packets;
        stats->rx_bytes = _bytes;
 
        do {
-               start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
                _bytes = sky2->tx_stats.bytes;
                _packets = sky2->tx_stats.packets;
-       } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
 
        stats->tx_packets = _packets;
        stats->tx_bytes = _bytes;
@@ -4748,6 +4753,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
 {
        struct sky2_port *sky2;
        struct net_device *dev = alloc_etherdev(sizeof(*sky2));
+       const void *iap;
 
        if (!dev)
                return NULL;
@@ -4805,8 +4811,16 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
 
        dev->features |= dev->hw_features;
 
-       /* read the mac address */
-       memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+       /* try to get mac address in the following order:
+        * 1) from device tree data
+        * 2) from internal registers set by bootloader
+        */
+       iap = of_get_mac_address(hw->pdev->dev.of_node);
+       if (iap)
+               memcpy(dev->dev_addr, iap, ETH_ALEN);
+       else
+               memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
+                             ETH_ALEN);
 
        return dev;
 }
index 563495d8975a5399c44140f94e096f199ca1a350..1a6e1887a17174ca0318271f8ed0286cfe54adfc 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config MLX4_EN
-       tristate "Mellanox Technologies 10Gbit Ethernet support"
+       tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
        depends on PCI
        select MLX4_CORE
        select PTP_1588_CLOCK
index 0d02fba9453657588b5f3db7b2e90e7e5410b010..78099eab767374319c7e258bfa1f0d6df4c64fa3 100644 (file)
@@ -800,16 +800,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
                                    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 }
 
-static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
-                    struct mlx4_vhcr *vhcr,
-                    struct mlx4_cmd_mailbox *inbox,
-                    struct mlx4_cmd_mailbox *outbox,
-                    struct mlx4_cmd_info *cmd)
-{
-       return -EPERM;
-}
-
-static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
+static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
                     struct mlx4_vhcr *vhcr,
                     struct mlx4_cmd_mailbox *inbox,
                     struct mlx4_cmd_mailbox *outbox,
@@ -963,6 +954,15 @@ static struct mlx4_cmd_info cmd_info[] = {
                .verify = NULL,
                .wrapper = NULL
        },
+       {
+               .opcode = MLX4_CMD_CONFIG_DEV,
+               .has_inbox = false,
+               .has_outbox = false,
+               .out_is_imm = false,
+               .encode_slave_id = false,
+               .verify = NULL,
+               .wrapper = mlx4_CMD_EPERM_wrapper
+       },
        {
                .opcode = MLX4_CMD_ALLOC_RES,
                .has_inbox = false,
@@ -1258,7 +1258,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = MLX4_CMD_UPDATE_QP_wrapper
+               .wrapper = mlx4_CMD_EPERM_wrapper
        },
        {
                .opcode = MLX4_CMD_GET_OP_REQ,
@@ -1267,7 +1267,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
+               .wrapper = mlx4_CMD_EPERM_wrapper,
        },
        {
                .opcode = MLX4_CMD_CONF_SPECIAL_QP,
@@ -1378,7 +1378,7 @@ static struct mlx4_cmd_info cmd_info[] = {
                .out_is_imm = false,
                .encode_slave_id = false,
                .verify = NULL,
-               .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper
+               .wrapper = mlx4_CMD_EPERM_wrapper
        },
 };
 
@@ -1643,8 +1643,16 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
        int port, err;
        struct mlx4_vport_state *vp_admin;
        struct mlx4_vport_oper_state *vp_oper;
-
-       for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+                       &priv->dev, slave);
+       int min_port = find_first_bit(actv_ports.ports,
+                                     priv->dev.caps.num_ports) + 1;
+       int max_port = min_port - 1 +
+               bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
+
+       for (port = min_port; port <= max_port; port++) {
+               if (!test_bit(port - 1, actv_ports.ports))
+                       continue;
                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
                vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
                vp_oper->state = *vp_admin;
@@ -1685,8 +1693,17 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
 {
        int port;
        struct mlx4_vport_oper_state *vp_oper;
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+                       &priv->dev, slave);
+       int min_port = find_first_bit(actv_ports.ports,
+                                     priv->dev.caps.num_ports) + 1;
+       int max_port = min_port - 1 +
+               bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
+
 
-       for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+       for (port = min_port; port <= max_port; port++) {
+               if (!test_bit(port - 1, actv_ports.ports))
+                       continue;
                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
                if (NO_INDX != vp_oper->vlan_idx) {
                        __mlx4_unregister_vlan(&priv->dev,
@@ -2234,6 +2251,112 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
        return vf+1;
 }
 
+int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
+{
+       if (slave < 1 || slave > dev->num_vfs) {
+               mlx4_err(dev,
+                        "Bad slave number:%d (number of activated slaves: %lu)\n",
+                        slave, dev->num_slaves);
+               return -EINVAL;
+       }
+       return slave - 1;
+}
+
+struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
+{
+       struct mlx4_active_ports actv_ports;
+       int vf;
+
+       bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
+
+       if (slave == 0) {
+               bitmap_fill(actv_ports.ports, dev->caps.num_ports);
+               return actv_ports;
+       }
+
+       vf = mlx4_get_vf_indx(dev, slave);
+       if (vf < 0)
+               return actv_ports;
+
+       bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
+                  min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
+                  dev->caps.num_ports));
+
+       return actv_ports;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
+
+int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
+{
+       unsigned n;
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+       unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+
+       if (port <= 0 || port > m)
+               return -EINVAL;
+
+       n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+       if (port <= n)
+               port = n + 1;
+
+       return port;
+}
+EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
+
+int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
+{
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+       if (test_bit(port - 1, actv_ports.ports))
+               return port -
+                       find_first_bit(actv_ports.ports, dev->caps.num_ports);
+
+       return -1;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
+
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
+                                                  int port)
+{
+       unsigned i;
+       struct mlx4_slaves_pport slaves_pport;
+
+       bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
+
+       if (port <= 0 || port > dev->caps.num_ports)
+               return slaves_pport;
+
+       for (i = 0; i < dev->num_vfs + 1; i++) {
+               struct mlx4_active_ports actv_ports =
+                       mlx4_get_active_ports(dev, i);
+               if (test_bit(port - 1, actv_ports.ports))
+                       set_bit(i, slaves_pport.slaves);
+       }
+
+       return slaves_pport;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
+
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
+               struct mlx4_dev *dev,
+               const struct mlx4_active_ports *crit_ports)
+{
+       unsigned i;
+       struct mlx4_slaves_pport slaves_pport;
+
+       bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
+
+       for (i = 0; i < dev->num_vfs + 1; i++) {
+               struct mlx4_active_ports actv_ports =
+                       mlx4_get_active_ports(dev, i);
+               if (bitmap_equal(crit_ports->ports, actv_ports.ports,
+                                dev->caps.num_ports))
+                       set_bit(i, slaves_pport.slaves);
+       }
+
+       return slaves_pport;
+}
+EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
+
 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2289,6 +2412,30 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
 }
 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
 
+ /* mlx4_get_slave_default_vlan -
+ * return true if VST ( default vlan)
+ * if VST, will return vlan & qos (if not NULL)
+ */
+bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
+                                u16 *vlan, u8 *qos)
+{
+       struct mlx4_vport_oper_state *vp_oper;
+       struct mlx4_priv *priv;
+
+       priv = mlx4_priv(dev);
+       vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+
+       if (MLX4_VGT != vp_oper->state.default_vlan) {
+               if (vlan)
+                       *vlan = vp_oper->state.default_vlan;
+               if (qos)
+                       *qos = vp_oper->state.default_qos;
+               return true;
+       }
+       return false;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
+
 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
index abaf6bb22416232fc055f3ef8b6c414be701734d..57dda95b67d8d4325e19f03b6d01f982cfc54cb7 100644 (file)
@@ -276,6 +276,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
        .n_alarm        = 0,
        .n_ext_ts       = 0,
        .n_per_out      = 0,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = mlx4_en_phc_adjfreq,
        .adjtime        = mlx4_en_phc_adjtime,
index b4881b6861590c16c55644fd2752abfcf569ad8e..c95ca252187c333719fe4141225cea020909c723 100644 (file)
@@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
        int has_ets_tc = 0;
 
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
+               if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
                        en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
                                        i, ets->prio_tc[i]);
                        return -EINVAL;
index d357bf5a46860314a4c74ae10c630bae8f61f096..0c59d4fe7e3aae56afee09b7e279601192c9e26e 100644 (file)
@@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
 MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
                           " Per priority bit mask");
 
+MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
+                "Threshold for using inline data (range: 17-104, default: 104)");
+
+#define MAX_PFC_TX     0xff
+#define MAX_PFC_RX     0xff
+
 int en_print(const char *level, const struct mlx4_en_priv *priv,
             const char *format, ...)
 {
@@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
                params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
                        MLX4_EN_NUM_UP;
                params->prof[i].rss_rings = 0;
+               params->prof[i].inline_thold = inline_thold;
        }
 
        return 0;
@@ -274,19 +281,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
        if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
                mlx4_en_init_timestamp(mdev);
 
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-               if (!dev->caps.comp_pool) {
-                       mdev->profile.prof[i].rx_ring_num =
-                               rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
-                                                          min_t(int,
-                                                                dev->caps.num_comp_vectors,
-                                                                DEF_RX_RINGS)));
-               } else {
-                       mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
-                               min_t(int, dev->caps.comp_pool/
-                                     dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
-               }
-       }
+       /* Set default number of RX rings*/
+       mlx4_en_set_num_rx_rings(mdev);
 
        /* Create our own workqueue for reset/multicast tasks
         * Note: we cannot use the shared workqueue because of deadlocks caused
@@ -336,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = {
        .protocol       = MLX4_PROT_ETH,
 };
 
+static void mlx4_en_verify_params(void)
+{
+       if (pfctx > MAX_PFC_TX) {
+               pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+                       pfctx, MAX_PFC_TX);
+               pfctx = 0;
+       }
+
+       if (pfcrx > MAX_PFC_RX) {
+               pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+                       pfcrx, MAX_PFC_RX);
+               pfcrx = 0;
+       }
+
+       if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
+               pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
+                       inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
+               inline_thold = MAX_INLINE;
+       }
+}
+
 static int __init mlx4_en_init(void)
 {
+       mlx4_en_verify_params();
+
        return mlx4_register_interface(&mlx4_en_interface);
 }
 
index 84a96f70dfb51ea7cecf3c5f0e03daa90f52a880..82d7eb5b79cc167c1f1a726ba4b15b1074f69ec0 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/hash.h>
 #include <net/ip.h>
 #include <net/busy_poll.h>
+#include <net/vxlan.h>
 
 #include <linux/mlx4/driver.h>
 #include <linux/mlx4/device.h>
@@ -603,7 +604,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
        int err = 0;
        u64 reg_id;
        int *qpn = &priv->base_qpn;
-       u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+       u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
 
        en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
               priv->dev->dev_addr);
@@ -672,7 +673,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
        u64 mac;
 
        if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
-               mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+               mac = mlx4_mac_to_u64(priv->dev->dev_addr);
                en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
                       priv->dev->dev_addr);
                mlx4_unregister_mac(dev, priv->port, mac);
@@ -685,7 +686,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
                for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
                        bucket = &priv->mac_hash[i];
                        hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
-                               mac = mlx4_en_mac_to_u64(entry->mac);
+                               mac = mlx4_mac_to_u64(entry->mac);
                                en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
                                       entry->mac);
                                mlx4_en_uc_steer_release(priv, entry->mac,
@@ -715,14 +716,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
        struct mlx4_en_dev *mdev = priv->mdev;
        struct mlx4_dev *dev = mdev->dev;
        int err = 0;
-       u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+       u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
 
        if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
                struct hlist_head *bucket;
                unsigned int mac_hash;
                struct mlx4_mac_entry *entry;
                struct hlist_node *tmp;
-               u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+               u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
 
                bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
                hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -759,18 +760,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
        return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
 }
 
-u64 mlx4_en_mac_to_u64(u8 *addr)
-{
-       u64 mac = 0;
-       int i;
-
-       for (i = 0; i < ETH_ALEN; i++) {
-               mac <<= 8;
-               mac |= addr[i];
-       }
-       return mac;
-}
-
 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
 {
        int err = 0;
@@ -1089,7 +1078,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
                mlx4_en_cache_mclist(dev);
                netif_addr_unlock_bh(dev);
                list_for_each_entry(mclist, &priv->mc_list, list) {
-                       mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
+                       mcast_addr = mlx4_mac_to_u64(mclist->addr);
                        mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
                                            mcast_addr, 0, MLX4_MCAST_CONFIG);
                }
@@ -1181,7 +1170,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
                                found = true;
 
                        if (!found) {
-                               mac = mlx4_en_mac_to_u64(entry->mac);
+                               mac = mlx4_mac_to_u64(entry->mac);
                                mlx4_en_uc_steer_release(priv, entry->mac,
                                                         priv->base_qpn,
                                                         entry->reg_id);
@@ -1224,7 +1213,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
                                priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
                                break;
                        }
-                       mac = mlx4_en_mac_to_u64(ha->addr);
+                       mac = mlx4_mac_to_u64(ha->addr);
                        memcpy(entry->mac, ha->addr, ETH_ALEN);
                        err = mlx4_register_mac(mdev->dev, priv->port, mac);
                        if (err < 0) {
@@ -1677,7 +1666,7 @@ int mlx4_en_start_port(struct net_device *dev)
        }
 
        if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
-               err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+               err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
                if (err) {
                        en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
                               err);
@@ -1709,6 +1698,8 @@ int mlx4_en_start_port(struct net_device *dev)
 
        mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
 
+       if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
+               vxlan_get_rx_port(dev);
        priv->port_up = true;
        netif_tx_start_all_queues(dev);
        netif_device_attach(dev);
@@ -2216,7 +2207,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
 {
        struct mlx4_en_priv *en_priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = en_priv->mdev;
-       u64 mac_u64 = mlx4_en_mac_to_u64(mac);
+       u64 mac_u64 = mlx4_mac_to_u64(mac);
 
        if (!is_valid_ether_addr(mac))
                return -EINVAL;
@@ -2276,6 +2267,81 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
        return 0;
 }
 
+static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+{
+       int ret;
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                vxlan_add_task);
+
+       ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
+       if (ret)
+               goto out;
+
+       ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+                                 VXLAN_STEER_BY_OUTER_MAC, 1);
+out:
+       if (ret)
+               en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+}
+
+static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+{
+       int ret;
+       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+                                                vxlan_del_task);
+
+       ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+                                 VXLAN_STEER_BY_OUTER_MAC, 0);
+       if (ret)
+               en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+
+       priv->vxlan_port = 0;
+}
+
+static void mlx4_en_add_vxlan_port(struct  net_device *dev,
+                                  sa_family_t sa_family, __be16 port)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       __be16 current_port;
+
+       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+               return;
+
+       if (sa_family == AF_INET6)
+               return;
+
+       current_port = priv->vxlan_port;
+       if (current_port && current_port != port) {
+               en_warn(priv, "vxlan port %d configured, can't add port %d\n",
+                       ntohs(current_port), ntohs(port));
+               return;
+       }
+
+       priv->vxlan_port = port;
+       queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
+}
+
+static void mlx4_en_del_vxlan_port(struct  net_device *dev,
+                                  sa_family_t sa_family, __be16 port)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       __be16 current_port;
+
+       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+               return;
+
+       if (sa_family == AF_INET6)
+               return;
+
+       current_port = priv->vxlan_port;
+       if (current_port != port) {
+               en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
+               return;
+       }
+
+       queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
+}
+
 static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_open               = mlx4_en_open,
        .ndo_stop               = mlx4_en_close,
@@ -2302,6 +2368,8 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_busy_poll          = mlx4_en_low_latency_recv,
 #endif
        .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
+       .ndo_add_vxlan_port     = mlx4_en_add_vxlan_port,
+       .ndo_del_vxlan_port     = mlx4_en_del_vxlan_port,
 };
 
 static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2351,7 +2419,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
 
        SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
-       dev->dev_id =  port - 1;
+       dev->dev_port = port - 1;
 
        /*
         * Initialize driver private data
@@ -2393,6 +2461,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
        INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
        INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
+       INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
+       INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
 #ifdef CONFIG_MLX4_EN_DCB
        if (!mlx4_is_slave(priv->mdev->dev)) {
                if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
@@ -2417,7 +2487,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                if (mlx4_is_slave(priv->mdev->dev)) {
                        eth_hw_addr_random(dev);
                        en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
-                       mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
+                       mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
                        mdev->dev->caps.def_mac[priv->port] = mac_u64;
                } else {
                        en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
@@ -2526,7 +2596,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        }
 
        if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
-               err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
+               err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
                if (err) {
                        en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
                               err);
index dae1a1f4ae55e38287e6bcb5ff1bc5cb73435808..c2cfb05e72905cc6961ddc773547e1d09ae26bbf 100644 (file)
@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        stats->tx_packets = 0;
        stats->tx_bytes = 0;
        priv->port_stats.tx_chksum_offload = 0;
+       priv->port_stats.queue_stopped = 0;
+       priv->port_stats.wake_queue = 0;
+
        for (i = 0; i < priv->tx_ring_num; i++) {
                stats->tx_packets += priv->tx_ring[i]->packets;
                stats->tx_bytes += priv->tx_ring[i]->bytes;
                priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
+               priv->port_stats.queue_stopped +=
+                       priv->tx_ring[i]->queue_stopped;
+               priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
        }
 
        stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
index 890922c1c8eea11d4d737d81aefffde4e157e3a2..ba049ae88749dac986a0712d281bbd649152acdd 100644 (file)
@@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
        }
 }
 
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
+{
+       int i;
+       int num_of_eqs;
+       int num_rx_rings;
+       struct mlx4_dev *dev = mdev->dev;
+
+       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+               if (!dev->caps.comp_pool)
+                       num_of_eqs = max_t(int, MIN_RX_RINGS,
+                                          min_t(int,
+                                                dev->caps.num_comp_vectors,
+                                                DEF_RX_RINGS));
+               else
+                       num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
+                                          dev->caps.comp_pool/
+                                          dev->caps.num_ports) - 1;
+
+               num_rx_rings = min_t(int, num_of_eqs,
+                                    netif_get_num_default_rss_queues());
+               mdev->profile.prof[i].rx_ring_num =
+                       rounddown_pow_of_two(num_rx_rings);
+       }
+}
+
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_rx_ring **pring,
                           u32 size, u16 stride, int node)
@@ -636,6 +661,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        if (!priv->port_up)
                return 0;
 
+       if (budget <= 0)
+               return polled;
+
        /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
         * descriptor offset can be deduced from the CQE index instead of
         * reading 'cqe->index' */
index c11d063473e5f9aaa162aacd32b4cac386d53464..03e5f6ac67e7660dbc68c6fe69e5123247588bf0 100644 (file)
@@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
        if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
                return -ENOMEM;
 
-       /* The device currently only supports 10G speed */
-       if (priv->port_state.link_speed != SPEED_10000)
+       /* The device supports 1G, 10G and 40G speeds */
+       if (priv->port_state.link_speed != 1000 &&
+           priv->port_state.link_speed != 10000 &&
+           priv->port_state.link_speed != 40000)
                return priv->port_state.link_speed;
        return 0;
 }
index 13457032d15ff09489cff354b3322f06011e1c2b..dd1f6d346459808dfe95690ce5fcf0af31e99231 100644 (file)
 
 #include "mlx4_en.h"
 
-enum {
-       MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
-       MAX_BF = 256,
-};
-
-static int inline_thold __read_mostly = MAX_INLINE;
-
-module_param_named(inline_thold, inline_thold, int, 0444);
-MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
-
 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_tx_ring **pring, int qpn, u32 size,
                           u16 stride, int node, int queue_index)
@@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->size = size;
        ring->size_mask = size - 1;
        ring->stride = stride;
-
-       inline_thold = min(inline_thold, MAX_INLINE);
+       ring->inline_thold = priv->prof->inline_thold;
 
        tmp = size * sizeof(struct mlx4_en_tx_info);
        ring->tx_info = vmalloc_node(tmp, node);
@@ -325,7 +314,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                        }
                }
        }
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return tx_info->nr_txbb;
 }
 
@@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
         */
        if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
                netif_tx_wake_queue(ring->tx_queue);
-               priv->port_stats.wake_queue++;
+               ring->wake_queue++;
        }
        return done;
 }
@@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
        return ring->buf + index * TXBB_SIZE;
 }
 
-static int is_inline(struct sk_buff *skb, void **pfrag)
+static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag)
 {
        void *ptr;
 
@@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
                }
        } else {
                *lso_header_size = 0;
-               if (!is_inline(skb, NULL))
+               if (!is_inline(priv->prof->inline_thold, skb, NULL))
                        real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
                else
                        real_size = inline_size(skb);
@@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
        int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
 
        if (skb->len <= spc) {
-               inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+               if (likely(skb->len >= MIN_PKT_LEN)) {
+                       inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+               } else {
+                       inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
+                       memset(((void *)(inl + 1)) + skb->len, 0,
+                              MIN_PKT_LEN - skb->len);
+               }
                skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
                if (skb_shinfo(skb)->nr_frags)
                        memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
@@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
                /* every full Tx ring stops queue */
                netif_tx_stop_queue(ring->tx_queue);
-               priv->port_stats.queue_stopped++;
+               ring->queue_stopped++;
 
                /* If queue was emptied after the if, and before the
                 * stop_queue - need to wake the queue, or else it will remain
@@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                if (unlikely(((int)(ring->prod - ring->cons)) <=
                             ring->size - HEADROOM - MAX_DESC_TXBBS)) {
                        netif_tx_wake_queue(ring->tx_queue);
-                       priv->port_stats.wake_queue++;
+                       ring->wake_queue++;
                } else {
                        return NETDEV_TX_BUSY;
                }
@@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_info->data_offset = (void *)data - (void *)tx_desc;
 
        tx_info->linear = (lso_header_size < skb_headlen(skb) &&
-                          !is_inline(skb, NULL)) ? 1 : 0;
+                          !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0;
 
        data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
 
-       if (is_inline(skb, &fragptr)) {
+       if (is_inline(ring->inline_thold, skb, &fragptr)) {
                tx_info->inl = 1;
        } else {
                /* Map fragments */
@@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
 
        if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
-               *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
+               tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn);
+
                op_own |= htonl((bf_index & 0xffff) << 8);
                /* Ensure new descirptor hits memory
                * before setting ownership of this descriptor to HW */
index 8992b38578d5898ecaa9de19ee62e5079b1e8010..d501a2b0fb79f18e560fd0cd067aa4c19b83b447 100644 (file)
@@ -271,7 +271,10 @@ enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
-       if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
+
+       if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+           port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
                pr_err("%s: Error: asking for slave:%d, port:%d\n",
                       __func__, slave, port);
                return SLAVE_PORT_DOWN;
@@ -285,8 +288,10 @@ static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
 
-       if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+       if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+           port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
                pr_err("%s: Error: asking for slave:%d, port:%d\n",
                       __func__, slave, port);
                return -1;
@@ -300,9 +305,13 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
 {
        int i;
        enum slave_port_gen_event gen_event;
+       struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
+                                                                         port);
 
-       for (i = 0; i < dev->num_slaves; i++)
-               set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
+       for (i = 0; i < dev->num_vfs + 1; i++)
+               if (test_bit(i, slaves_pport.slaves))
+                       set_and_calc_slave_port_state(dev, i, port,
+                                                     event, &gen_event);
 }
 /**************************************************************************
        The function get as input the new event to that port,
@@ -321,12 +330,14 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
        struct mlx4_slave_state *ctx = NULL;
        unsigned long flags;
        int ret = -1;
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
        enum slave_port_state cur_state =
                mlx4_get_slave_port_state(dev, slave, port);
 
        *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
 
-       if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
+       if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
+           port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
                pr_err("%s: Error: asking for slave:%d, port:%d\n",
                       __func__, slave, port);
                return ret;
@@ -542,15 +553,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                       be64_to_cpu(eqe->event.cmd.out_param));
                        break;
 
-               case MLX4_EVENT_TYPE_PORT_CHANGE:
+               case MLX4_EVENT_TYPE_PORT_CHANGE: {
+                       struct mlx4_slaves_pport slaves_port;
                        port = be32_to_cpu(eqe->event.port_change.port) >> 28;
+                       slaves_port = mlx4_phys_to_slaves_pport(dev, port);
                        if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
                                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
                                                    port);
                                mlx4_priv(dev)->sense.do_sense_port[port] = 1;
                                if (!mlx4_is_master(dev))
                                        break;
-                               for (i = 0; i < dev->num_slaves; i++) {
+                               for (i = 0; i < dev->num_vfs + 1; i++) {
+                                       if (!test_bit(i, slaves_port.slaves))
+                                               continue;
                                        if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
                                                if (i == mlx4_master_func_num(dev))
                                                        continue;
@@ -558,8 +573,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                         " to slave: %d, port:%d\n",
                                                         __func__, i, port);
                                                s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
-                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+                                                       eqe->event.port_change.port =
+                                                               cpu_to_be32(
+                                                               (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+                                                               | (mlx4_phys_to_slave_port(dev, i, port) << 28));
                                                        mlx4_slave_event(dev, i, eqe);
+                                               }
                                        } else {  /* IB port */
                                                set_and_calc_slave_port_state(dev, i, port,
                                                                              MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,12 +600,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                if (!mlx4_is_master(dev))
                                        break;
                                if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
-                                       for (i = 0; i < dev->num_slaves; i++) {
+                                       for (i = 0; i < dev->num_vfs + 1; i++) {
+                                               if (!test_bit(i, slaves_port.slaves))
+                                                       continue;
                                                if (i == mlx4_master_func_num(dev))
                                                        continue;
                                                s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
-                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state)
+                                               if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+                                                       eqe->event.port_change.port =
+                                                               cpu_to_be32(
+                                                               (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+                                                               | (mlx4_phys_to_slave_port(dev, i, port) << 28));
                                                        mlx4_slave_event(dev, i, eqe);
+                                               }
                                        }
                                else /* IB port */
                                        /* port-up event will be sent to a slave when the
@@ -594,6 +621,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                        set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
                        }
                        break;
+               }
 
                case MLX4_EVENT_TYPE_CQ_ERROR:
                        mlx4_warn(dev, "CQ %s on CQN %06x\n",
index 7e2995ecea6f4b3e6d4a2fbadbdfb6876a73e5c8..d16a4d11890342167a2f2c8605e3b5e4e9d25198 100644 (file)
@@ -225,13 +225,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 
        if (vhcr->op_modifier == 1) {
+               struct mlx4_active_ports actv_ports =
+                       mlx4_get_active_ports(dev, slave);
+               int converted_port = mlx4_slave_convert_port(
+                               dev, slave, vhcr->in_modifier);
+
+               if (converted_port < 0)
+                       return -EINVAL;
+
+               vhcr->in_modifier = converted_port;
                /* Set nic_info bit to mark new fields support */
                field  = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
 
-               field = vhcr->in_modifier; /* phys-port = logical-port */
+               /* phys-port = logical-port */
+               field = vhcr->in_modifier -
+                       find_first_bit(actv_ports.ports, dev->caps.num_ports);
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
 
+               field = vhcr->in_modifier;
                /* size is now the QP number */
                size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
                MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
@@ -249,12 +261,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
                         QUERY_FUNC_CAP_PHYS_PORT_ID);
 
        } else if (vhcr->op_modifier == 0) {
+               struct mlx4_active_ports actv_ports =
+                       mlx4_get_active_ports(dev, slave);
                /* enable rdma and ethernet interfaces, and new quota locations */
                field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
                         QUERY_FUNC_CAP_FLAG_QUOTAS);
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
 
-               field = dev->caps.num_ports;
+               field = min(
+                       bitmap_weight(actv_ports.ports, dev->caps.num_ports),
+                       dev->caps.num_ports);
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
 
                size = dev->caps.function_caps; /* set PF behaviours */
@@ -840,6 +856,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        int     err = 0;
        u8      field;
        u32     bmme_flags;
+       int     real_port;
+       int     slave_port;
+       int     first_port;
+       struct mlx4_active_ports actv_ports;
 
        err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
                           MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -852,8 +872,26 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
        MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
        flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
        flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
+       actv_ports = mlx4_get_active_ports(dev, slave);
+       first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
+       for (slave_port = 0, real_port = first_port;
+            real_port < first_port +
+            bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+            ++real_port, ++slave_port) {
+               if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
+                       flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
+               else
+                       flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
+       }
+       for (; slave_port < dev->caps.num_ports; ++slave_port)
+               flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
        MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
 
+       MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
+       field &= ~0x0F;
+       field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
+       MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
+
        /* For guests, disable timestamp */
        MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
        field &= 0x7f;
@@ -903,12 +941,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
        u16 short_field;
        int err;
        int admin_link_state;
+       int port = mlx4_slave_convert_port(dev, slave,
+                                          vhcr->in_modifier & 0xFF);
 
 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK        0xE0
 #define MLX4_PORT_LINK_UP_MASK         0x80
 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
 #define QUERY_PORT_CUR_MAX_GID_OFFSET  0x0e
 
+       if (port < 0)
+               return -EINVAL;
+
+       vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
+                           (port & 0xFF);
+
        err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
                           MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
                           MLX4_CMD_NATIVE);
@@ -935,7 +981,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
                MLX4_PUT(outbox->buf, port_type,
                         QUERY_PORT_SUPPORTED_TYPE_OFFSET);
 
-               short_field = 1; /* slave max gids */
+               if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
+                       short_field = mlx4_get_slave_num_gids(dev, slave, port);
+               else
+                       short_field = 1; /* slave max gids */
                MLX4_PUT(outbox->buf, short_field,
                         QUERY_PORT_CUR_MAX_GID_OFFSET);
 
@@ -1585,9 +1634,12 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
                           struct mlx4_cmd_info *cmd)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       int port = vhcr->in_modifier;
+       int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
        int err;
 
+       if (port < 0)
+               return -EINVAL;
+
        if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
                return 0;
 
@@ -1677,9 +1729,12 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
                            struct mlx4_cmd_info *cmd)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       int port = vhcr->in_modifier;
+       int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
        int err;
 
+       if (port < 0)
+               return -EINVAL;
+
        if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
            (1 << port)))
                return 0;
@@ -1724,6 +1779,46 @@ int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
                        MLX4_CMD_NATIVE);
 }
 
+struct mlx4_config_dev {
+       __be32  update_flags;
+       __be32  rsdv1[3];
+       __be16  vxlan_udp_dport;
+       __be16  rsvd2;
+};
+
+#define MLX4_VXLAN_UDP_DPORT (1 << 0)
+
+static int mlx4_CONFIG_DEV(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
+{
+       int err;
+       struct mlx4_cmd_mailbox *mailbox;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+
+       memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
+
+       err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
+                      MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
+{
+       struct mlx4_config_dev config_dev;
+
+       memset(&config_dev, 0, sizeof(config_dev));
+       config_dev.update_flags    = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
+       config_dev.vxlan_udp_dport = udp_port;
+
+       return mlx4_CONFIG_DEV(dev, &config_dev);
+}
+EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
+
+
 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
 {
        int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
@@ -1891,7 +1986,8 @@ void mlx4_opreq_action(struct work_struct *work)
                        err = EINVAL;
                        break;
                }
-               err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+               err = mlx4_cmd(dev, 0, ((u32) err |
+                                       (__force u32)cpu_to_be32(token) << 16),
                               1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
                               MLX4_CMD_NATIVE);
                if (err) {
index d413e60071d47cd7e74d86652f5a0fb813cd5eb0..f0ae95f66cebe27bd306d64001cc516ff41b4554 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
 #include <linux/delay.h>
-#include <linux/netdevice.h>
 #include <linux/kmod.h>
 
 #include <linux/mlx4/device.h>
@@ -78,13 +77,17 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
 
 #endif /* CONFIG_PCI_MSI */
 
-static int num_vfs;
-module_param(num_vfs, int, 0444);
-MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+static uint8_t num_vfs[3] = {0, 0, 0};
+static int num_vfs_argc = 3;
+module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
+                         "num_vfs=port1,port2,port1+2");
 
-static int probe_vf;
-module_param(probe_vf, int, 0644);
-MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+static uint8_t probe_vf[3] = {0, 0, 0};
+static int probe_vfs_argc = 3;
+module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
+                          "probe_vf=port1,port2,port1+2");
 
 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
 module_param_named(log_num_mgm_entry_size,
@@ -1470,7 +1473,11 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
        int i;
 
        for (i = 1; i <= dev->caps.num_ports; i++) {
-               dev->caps.gid_table_len[i] = 1;
+               if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
+                       dev->caps.gid_table_len[i] =
+                               mlx4_get_slave_num_gids(dev, 0, i);
+               else
+                       dev->caps.gid_table_len[i] = 1;
                dev->caps.pkey_table_len[i] =
                        dev->phys_caps.pkey_phys_table_len[i] - 1;
        }
@@ -1495,7 +1502,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
        if (mlx4_log_num_mgm_entry_size == -1 &&
            dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
            (!mlx4_is_mfunc(dev) ||
-            (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) &&
+            (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
            choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
                MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
                dev->oper_log_mgm_entry_size =
@@ -1981,9 +1988,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct msix_entry *entries;
        int nreq = min_t(int, dev->caps.num_ports *
-                        min_t(int, netif_get_num_default_rss_queues() + 1,
+                        min_t(int, num_online_cpus() + 1,
                               MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
-       int err;
        int i;
 
        if (msi_x) {
@@ -1997,23 +2003,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                for (i = 0; i < nreq; ++i)
                        entries[i].entry = i;
 
-       retry:
-               err = pci_enable_msix(dev->pdev, entries, nreq);
-               if (err) {
-                       /* Try again if at least 2 vectors are available */
-                       if (err > 1) {
-                               mlx4_info(dev, "Requested %d vectors, "
-                                         "but only %d MSI-X vectors available, "
-                                         "trying again\n", nreq, err);
-                               nreq = err;
-                               goto retry;
-                       }
+               nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+
+               if (nreq < 0) {
                        kfree(entries);
                        goto no_msi;
-               }
-
-               if (nreq <
-                   MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+               } else if (nreq < MSIX_LEGACY_SZ +
+                                 dev->caps.num_ports * MIN_MSIX_P_PORT) {
                        /*Working in legacy mode , all EQ's shared*/
                        dev->caps.comp_pool           = 0;
                        dev->caps.num_comp_vectors = nreq - 1;
@@ -2201,6 +2197,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
        struct mlx4_dev *dev;
        int err;
        int port;
+       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
+               {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
+       unsigned total_vfs = 0;
+       int sriov_initialized = 0;
+       unsigned int i;
 
        pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
 
@@ -2215,17 +2218,40 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
         * per port, we must limit the number of VFs to 63 (since their are
         * 128 MACs)
         */
-       if (num_vfs >= MLX4_MAX_NUM_VF) {
+       for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
+            total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
+               nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
+               if (nvfs[i] < 0) {
+                       dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
+                       return -EINVAL;
+               }
+       }
+       for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
+            i++) {
+               prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
+               if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
+                       dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
+                       return -EINVAL;
+               }
+       }
+       if (total_vfs >= MLX4_MAX_NUM_VF) {
                dev_err(&pdev->dev,
                        "Requested more VF's (%d) than allowed (%d)\n",
-                       num_vfs, MLX4_MAX_NUM_VF - 1);
+                       total_vfs, MLX4_MAX_NUM_VF - 1);
                return -EINVAL;
        }
 
-       if (num_vfs < 0) {
-               pr_err("num_vfs module parameter cannot be negative\n");
-               return -EINVAL;
+       for (i = 0; i < MLX4_MAX_PORTS; i++) {
+               if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+                       dev_err(&pdev->dev,
+                               "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+                               nvfs[i] + nvfs[2], i + 1,
+                               MLX4_MAX_NUM_VF_P_PORT - 1);
+                       return -EINVAL;
+               }
        }
+
+
        /*
         * Check for BARs.
         */
@@ -2300,11 +2326,23 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
        if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
                /* When acting as pf, we normally skip vfs unless explicitly
                 * requested to probe them. */
-               if (num_vfs && extended_func_num(pdev) > probe_vf) {
-                       mlx4_warn(dev, "Skipping virtual function:%d\n",
-                                               extended_func_num(pdev));
-                       err = -ENODEV;
-                       goto err_free_dev;
+               if (total_vfs) {
+                       unsigned vfs_offset = 0;
+                       for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
+                            vfs_offset + nvfs[i] < extended_func_num(pdev);
+                            vfs_offset += nvfs[i], i++)
+                               ;
+                       if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
+                               err = -ENODEV;
+                               goto err_free_dev;
+                       }
+                       if ((extended_func_num(pdev) - vfs_offset)
+                           > prb_vf[i]) {
+                               mlx4_warn(dev, "Skipping virtual function:%d\n",
+                                         extended_func_num(pdev));
+                               err = -ENODEV;
+                               goto err_free_dev;
+                       }
                }
                mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
                dev->flags |= MLX4_FLAG_SLAVE;
@@ -2324,22 +2362,30 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
                        }
                }
 
-               if (num_vfs) {
-                       mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
-
-                       atomic_inc(&pf_loading);
-                       err = pci_enable_sriov(pdev, num_vfs);
-                       atomic_dec(&pf_loading);
-
-                       if (err) {
-                               mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
-                                        err);
+               if (total_vfs) {
+                       mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
+                                 total_vfs);
+                       dev->dev_vfs = kzalloc(
+                                       total_vfs * sizeof(*dev->dev_vfs),
+                                       GFP_KERNEL);
+                       if (NULL == dev->dev_vfs) {
+                               mlx4_err(dev, "Failed to allocate memory for VFs\n");
                                err = 0;
                        } else {
-                               mlx4_warn(dev, "Running in master mode\n");
-                               dev->flags |= MLX4_FLAG_SRIOV |
-                                             MLX4_FLAG_MASTER;
-                               dev->num_vfs = num_vfs;
+                               atomic_inc(&pf_loading);
+                               err = pci_enable_sriov(pdev, total_vfs);
+                               atomic_dec(&pf_loading);
+                               if (err) {
+                                       mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+                                                err);
+                                       err = 0;
+                               } else {
+                                       mlx4_warn(dev, "Running in master mode\n");
+                                       dev->flags |= MLX4_FLAG_SRIOV |
+                                                     MLX4_FLAG_MASTER;
+                                       dev->num_vfs = total_vfs;
+                                       sriov_initialized = 1;
+                               }
                        }
                }
 
@@ -2404,12 +2450,37 @@ slave_start:
        /* In master functions, the communication channel must be initialized
         * after obtaining its address from fw */
        if (mlx4_is_master(dev)) {
+               unsigned sum = 0;
                err = mlx4_multi_func_init(dev);
                if (err) {
                        mlx4_err(dev, "Failed to init master mfunc"
                                 "interface, aborting.\n");
                        goto err_close;
                }
+               if (sriov_initialized) {
+                       int ib_ports = 0;
+                       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+                               ib_ports++;
+
+                       if (ib_ports &&
+                           (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
+                               mlx4_err(dev,
+                                        "Invalid syntax of num_vfs/probe_vfs "
+                                        "with IB port. Single port VFs syntax"
+                                        " is only supported when all ports "
+                                        "are configured as ethernet\n");
+                               goto err_close;
+                       }
+                       for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
+                               unsigned j;
+                               for (j = 0; j < nvfs[i]; ++sum, ++j) {
+                                       dev->dev_vfs[sum].min_port =
+                                               i < 2 ? i + 1 : 1;
+                                       dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
+                                               dev->caps.num_ports;
+                               }
+                       }
+               }
        }
 
        err = mlx4_alloc_eq_table(dev);
@@ -2517,6 +2588,8 @@ err_rel_own:
        if (!mlx4_is_slave(dev))
                mlx4_free_ownership(dev);
 
+       kfree(priv->dev.dev_vfs);
+
 err_free_dev:
        kfree(priv);
 
@@ -2603,6 +2676,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
                kfree(dev->caps.qp0_proxy);
                kfree(dev->caps.qp1_tunnel);
                kfree(dev->caps.qp1_proxy);
+               kfree(dev->dev_vfs);
 
                kfree(priv);
                pci_release_regions(pdev);
index db7dc0b6667d6f1ecb57076db2e855bd7a9cb448..80ccb4edf825f8888c6487626f2f380c7b27479b 100644 (file)
@@ -1387,9 +1387,12 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
                         struct mlx4_cmd_info *cmd)
 {
        u32 qpn = (u32) vhcr->in_param & 0xffffffff;
-       u8 port = vhcr->in_param >> 62;
+       int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
        enum mlx4_steer_type steer = vhcr->in_modifier;
 
+       if (port < 0)
+               return -EINVAL;
+
        /* Promiscuous unicast is not allowed in mfunc */
        if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
                return 0;
index 7aec6c833973c3c84c13bc3d02ca34c5a6dfef4e..cf8be41abb36f9e0b5324abce28d24e81f4963e8 100644 (file)
@@ -788,6 +788,10 @@ enum {
        MLX4_USE_RR     = 1,
 };
 
+struct mlx4_roce_gid_entry {
+       u8 raw[16];
+};
+
 struct mlx4_priv {
        struct mlx4_dev         dev;
 
@@ -834,6 +838,7 @@ struct mlx4_priv {
        int                     fs_hash_mode;
        u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
        __be64                  slave_node_guids[MLX4_MFUNC_MAX];
+       struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
 
        atomic_t                opreq_count;
        struct work_struct      opreq_task;
@@ -1242,11 +1247,6 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_cmd_mailbox *inbox,
                                         struct mlx4_cmd_mailbox *outbox,
                                         struct mlx4_cmd_info *cmd);
-int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
-                                             struct mlx4_vhcr *vhcr,
-                                             struct mlx4_cmd_mailbox *inbox,
-                                             struct mlx4_cmd_mailbox *outbox,
-                                             struct mlx4_cmd_info *cmd);
 
 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
@@ -1282,4 +1282,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
 
 void mlx4_init_quotas(struct mlx4_dev *dev);
 
+int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
+/* Returns the VF index of slave */
+int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
+
 #endif /* MLX4_H */
index b57e8c87a34ea8723ae9316747a69a32386429ea..36fc2a2b24c3156cbcd29ae97df50592cdc15fec 100644 (file)
@@ -187,6 +187,13 @@ enum {
 #define GET_AVG_PERF_COUNTER(cnt)      (0)
 #endif /* MLX4_EN_PERF_STAT */
 
+/* Constants for TX flow */
+enum {
+       MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+       MAX_BF = 256,
+       MIN_PKT_LEN = 17,
+};
+
 /*
  * Configurables
  */
@@ -267,10 +274,13 @@ struct mlx4_en_tx_ring {
        unsigned long bytes;
        unsigned long packets;
        unsigned long tx_csum;
+       unsigned long queue_stopped;
+       unsigned long wake_queue;
        struct mlx4_bf bf;
        bool bf_enabled;
        struct netdev_queue *tx_queue;
        int hwtstamp_tx_type;
+       int inline_thold;
 };
 
 struct mlx4_en_rx_desc {
@@ -346,6 +356,7 @@ struct mlx4_en_port_profile {
        u8 tx_pause;
        u8 tx_ppp;
        int rss_rings;
+       int inline_thold;
 };
 
 struct mlx4_en_profile {
@@ -548,6 +559,8 @@ struct mlx4_en_priv {
        struct work_struct linkstate_task;
        struct delayed_work stats_task;
        struct delayed_work service_task;
+       struct work_struct vxlan_add_task;
+       struct work_struct vxlan_del_task;
        struct mlx4_en_perf_stats pstats;
        struct mlx4_en_pkt_stats pkstats;
        struct mlx4_en_port_stats port_stats;
@@ -574,6 +587,7 @@ struct mlx4_en_priv {
        struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
 #endif
        u64 tunnel_reg_id;
+       __be16 vxlan_port;
 };
 
 enum mlx4_en_wol {
@@ -737,7 +751,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
                             int cq, int user_prio);
 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
                                struct mlx4_en_tx_ring *ring);
-
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
                           struct mlx4_en_rx_ring **pring,
                           u32 size, u16 stride, int node);
@@ -786,7 +800,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
 
 #define MLX4_EN_NUM_SELF_TEST  5
 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
-u64 mlx4_en_mac_to_u64(u8 *addr);
 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
 
 /*
index a58bcbf1b8067e88e5e153f25cd285ef57b90bb3..cfcad26ed40f60b0e5b992195339d8c12c0e68d7 100644 (file)
@@ -505,6 +505,84 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
        mlx4_free_cmd_mailbox(dev, outmailbox);
        return err;
 }
+static struct mlx4_roce_gid_entry zgid_entry;
+
+int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
+{
+       int vfs;
+       int slave_gid = slave;
+       unsigned i;
+       struct mlx4_slaves_pport slaves_pport;
+       struct mlx4_active_ports actv_ports;
+       unsigned max_port_p_one;
+
+       if (slave == 0)
+               return MLX4_ROCE_PF_GIDS;
+
+       /* Slave is a VF */
+       slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+       actv_ports = mlx4_get_active_ports(dev, slave);
+       max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
+               bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
+
+       for (i = 1; i < max_port_p_one; i++) {
+               struct mlx4_active_ports exclusive_ports;
+               struct mlx4_slaves_pport slaves_pport_actv;
+               bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+               set_bit(i - 1, exclusive_ports.ports);
+               if (i == port)
+                       continue;
+               slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
+                                   dev, &exclusive_ports);
+               slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
+                                          dev->num_vfs + 1);
+       }
+       vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+       if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
+               return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
+       return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
+}
+
+int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
+{
+       int gids;
+       unsigned i;
+       int slave_gid = slave;
+       int vfs;
+
+       struct mlx4_slaves_pport slaves_pport;
+       struct mlx4_active_ports actv_ports;
+       unsigned max_port_p_one;
+
+       if (slave == 0)
+               return 0;
+
+       slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+       actv_ports = mlx4_get_active_ports(dev, slave);
+       max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
+               bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
+
+       for (i = 1; i < max_port_p_one; i++) {
+               struct mlx4_active_ports exclusive_ports;
+               struct mlx4_slaves_pport slaves_pport_actv;
+               bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+               set_bit(i - 1, exclusive_ports.ports);
+               if (i == port)
+                       continue;
+               slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
+                                   dev, &exclusive_ports);
+               slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
+                                          dev->num_vfs + 1);
+       }
+       gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
+       vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+       if (slave_gid <= gids % vfs)
+               return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
+
+       return MLX4_ROCE_PF_GIDS + (gids % vfs) +
+               ((gids / vfs) * (slave_gid - 1));
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
 
 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                                u8 op_mod, struct mlx4_cmd_mailbox *inbox)
@@ -515,14 +593,18 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
        struct mlx4_slave_state *slave_st = &master->slave_state[slave];
        struct mlx4_set_port_rqp_calc_context *qpn_context;
        struct mlx4_set_port_general_context *gen_context;
+       struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
        int reset_qkey_viols;
        int port;
        int is_eth;
+       int num_gids;
+       int base;
        u32 in_modifier;
        u32 promisc;
        u16 mtu, prev_mtu;
        int err;
-       int i;
+       int i, j;
+       int offset;
        __be32 agg_cap_mask;
        __be32 slave_cap_mask;
        __be32 new_cap_mask;
@@ -535,7 +617,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
        /* Slaves cannot perform SET_PORT operations except changing MTU */
        if (is_eth) {
                if (slave != dev->caps.function &&
-                   in_modifier != MLX4_SET_PORT_GENERAL) {
+                   in_modifier != MLX4_SET_PORT_GENERAL &&
+                   in_modifier != MLX4_SET_PORT_GID_TABLE) {
                        mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
                                        slave);
                        return -EINVAL;
@@ -581,6 +664,67 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
 
                        gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
                        break;
+               case MLX4_SET_PORT_GID_TABLE:
+                       /* change to MULTIPLE entries: number of guest's gids
+                        * need a FOR-loop here over number of gids the guest has.
+                        * 1. Check no duplicates in gids passed by slave
+                        */
+                       num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+                       base = mlx4_get_base_gid_ix(dev, slave, port);
+                       gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+                       for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
+                               if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
+                                           sizeof(zgid_entry)))
+                                       continue;
+                               gid_entry_mb1 = gid_entry_mbox + 1;
+                               for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
+                                       if (!memcmp(gid_entry_mb1->raw,
+                                                   zgid_entry.raw, sizeof(zgid_entry)))
+                                               continue;
+                                       if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
+                                                   sizeof(gid_entry_mbox->raw))) {
+                                               /* found duplicate */
+                                               return -EINVAL;
+                                       }
+                               }
+                       }
+
+                       /* 2. Check that do not have duplicates in OTHER
+                        *    entries in the port GID table
+                        */
+                       for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
+                               if (i >= base && i < base + num_gids)
+                                       continue; /* don't compare to slave's current gids */
+                               gid_entry_tbl = &priv->roce_gids[port - 1][i];
+                               if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
+                                       continue;
+                               gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+                               for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
+                                       if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
+                                                   sizeof(zgid_entry)))
+                                               continue;
+                                       if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
+                                                   sizeof(gid_entry_tbl->raw))) {
+                                               /* found duplicate */
+                                               mlx4_warn(dev, "requested gid entry for slave:%d "
+                                                         "is a duplicate of gid at index %d\n",
+                                                         slave, i);
+                                               return -EINVAL;
+                                       }
+                               }
+                       }
+
+                       /* insert slave GIDs with memcpy, starting at slave's base index */
+                       gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+                       for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
+                               memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16);
+
+                       /* Now, copy roce port gids table to current mailbox for passing to FW */
+                       gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
+                       for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
+                               memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16);
+
+                       break;
                }
                return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
                                MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
@@ -646,6 +790,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
                          struct mlx4_cmd_mailbox *outbox,
                          struct mlx4_cmd_info *cmd)
 {
+       int port = mlx4_slave_convert_port(
+                       dev, slave, vhcr->in_modifier & 0xFF);
+
+       if (port < 0)
+               return -EINVAL;
+
+       vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
+                           (port & 0xFF);
+
        return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
                                    vhcr->op_modifier, inbox);
 }
@@ -835,7 +988,7 @@ struct mlx4_set_port_vxlan_context {
        u8      steering;
 };
 
-int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
 {
        int err;
        u32 in_mod;
@@ -849,7 +1002,8 @@ int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering)
        memset(context, 0, sizeof(*context));
 
        context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
-       context->enable_flags = VXLAN_ENABLE;
+       if (enable)
+               context->enable_flags = VXLAN_ENABLE;
        context->steering  = steering;
 
        in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
@@ -927,3 +1081,108 @@ void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
                *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
 }
 EXPORT_SYMBOL(mlx4_set_stats_bitmap);
+
+int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
+                                int *slave_id)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int i, found_ix = -1;
+       int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
+       struct mlx4_slaves_pport slaves_pport;
+       unsigned num_vfs;
+       int slave_gid;
+
+       if (!mlx4_is_mfunc(dev))
+               return -EINVAL;
+
+       slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
+       num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
+
+       for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
+               if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
+                       found_ix = i;
+                       break;
+               }
+       }
+
+       if (found_ix >= 0) {
+               if (found_ix < MLX4_ROCE_PF_GIDS)
+                       slave_gid = 0;
+               else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
+                        (vf_gids / num_vfs + 1))
+                       slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
+                                    (vf_gids / num_vfs + 1)) + 1;
+               else
+                       slave_gid =
+                       ((found_ix - MLX4_ROCE_PF_GIDS -
+                         ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
+                        (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
+
+               if (slave_gid) {
+                       struct mlx4_active_ports exclusive_ports;
+                       struct mlx4_active_ports actv_ports;
+                       struct mlx4_slaves_pport slaves_pport_actv;
+                       unsigned max_port_p_one;
+                       int num_slaves_before = 1;
+
+                       for (i = 1; i < port; i++) {
+                               bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+                               set_bit(i, exclusive_ports.ports);
+                               slaves_pport_actv =
+                                       mlx4_phys_to_slaves_pport_actv(
+                                                       dev, &exclusive_ports);
+                               num_slaves_before += bitmap_weight(
+                                               slaves_pport_actv.slaves,
+                                               dev->num_vfs + 1);
+                       }
+
+                       if (slave_gid < num_slaves_before) {
+                               bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
+                               set_bit(port - 1, exclusive_ports.ports);
+                               slaves_pport_actv =
+                                       mlx4_phys_to_slaves_pport_actv(
+                                                       dev, &exclusive_ports);
+                               slave_gid += bitmap_weight(
+                                               slaves_pport_actv.slaves,
+                                               dev->num_vfs + 1) -
+                                               num_slaves_before;
+                       }
+                       actv_ports = mlx4_get_active_ports(dev, slave_gid);
+                       max_port_p_one = find_first_bit(
+                               actv_ports.ports, dev->caps.num_ports) +
+                               bitmap_weight(actv_ports.ports,
+                                             dev->caps.num_ports) + 1;
+
+                       for (i = 1; i < max_port_p_one; i++) {
+                               if (i == port)
+                                       continue;
+                               bitmap_zero(exclusive_ports.ports,
+                                           dev->caps.num_ports);
+                               set_bit(i - 1, exclusive_ports.ports);
+                               slaves_pport_actv =
+                                       mlx4_phys_to_slaves_pport_actv(
+                                               dev, &exclusive_ports);
+                               slave_gid += bitmap_weight(
+                                               slaves_pport_actv.slaves,
+                                               dev->num_vfs + 1);
+                       }
+               }
+               *slave_id = slave_gid;
+       }
+
+       return (found_ix >= 0) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
+
+int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
+                                u8 *gid)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       if (!mlx4_is_master(dev))
+               return -EINVAL;
+
+       memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16);
+       return 0;
+}
+EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
index 57428a0cb9ddc37fd226cb22305323185eda22fd..3b5f53ef29b292d6edcb027f6b64b9c108a3a03b 100644 (file)
@@ -52,6 +52,8 @@
 struct mac_res {
        struct list_head list;
        u64 mac;
+       int ref_count;
+       u8 smac_index;
        u8 port;
 };
 
@@ -219,6 +221,11 @@ struct res_fs_rule {
        int                     qpn;
 };
 
+static int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+       return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 {
        struct rb_node *node = root->rb_node;
@@ -461,6 +468,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 
                spin_lock_init(&res_alloc->alloc_lock);
                for (t = 0; t < dev->num_vfs + 1; t++) {
+                       struct mlx4_active_ports actv_ports =
+                               mlx4_get_active_ports(dev, t);
                        switch (i) {
                        case RES_QP:
                                initialize_res_quotas(dev, res_alloc, RES_QP,
@@ -490,10 +499,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
                                break;
                        case RES_MAC:
                                if (t == mlx4_master_func_num(dev)) {
-                                       res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
+                                       int max_vfs_pport = 0;
+                                       /* Calculate the max vfs per port for */
+                                       /* both ports.                        */
+                                       for (j = 0; j < dev->caps.num_ports;
+                                            j++) {
+                                               struct mlx4_slaves_pport slaves_pport =
+                                                       mlx4_phys_to_slaves_pport(dev, j + 1);
+                                               unsigned current_slaves =
+                                                       bitmap_weight(slaves_pport.slaves,
+                                                                     dev->caps.num_ports) - 1;
+                                               if (max_vfs_pport < current_slaves)
+                                                       max_vfs_pport =
+                                                               current_slaves;
+                                       }
+                                       res_alloc->quota[t] =
+                                               MLX4_MAX_MAC_NUM -
+                                               2 * max_vfs_pport;
                                        res_alloc->guaranteed[t] = 2;
                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
-                                               res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
+                                               res_alloc->res_port_free[j] =
+                                                       MLX4_MAX_MAC_NUM;
                                } else {
                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
                                        res_alloc->guaranteed[t] = 2;
@@ -521,9 +547,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
                                break;
                        }
                        if (i == RES_MAC || i == RES_VLAN) {
-                               for (j = 0; j < MLX4_MAX_PORTS; j++)
-                                       res_alloc->res_port_rsvd[j] +=
-                                               res_alloc->guaranteed[t];
+                               for (j = 0; j < dev->caps.num_ports; j++)
+                                       if (test_bit(j, actv_ports.ports))
+                                               res_alloc->res_port_rsvd[j] +=
+                                                       res_alloc->guaranteed[t];
                        } else {
                                res_alloc->res_reserved += res_alloc->guaranteed[t];
                        }
@@ -600,15 +627,37 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+       int port;
 
-       if (MLX4_QP_ST_UD == ts)
-               qp_ctx->pri_path.mgid_index = 0x80 | slave;
-
-       if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
-               if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
-                       qp_ctx->pri_path.mgid_index = slave & 0x7F;
-               if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
-                       qp_ctx->alt_path.mgid_index = slave & 0x7F;
+       if (MLX4_QP_ST_UD == ts) {
+               port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+               if (mlx4_is_eth(dev, port))
+                       qp_ctx->pri_path.mgid_index =
+                               mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
+               else
+                       qp_ctx->pri_path.mgid_index = slave | 0x80;
+
+       } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
+               if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+                       port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+                       if (mlx4_is_eth(dev, port)) {
+                               qp_ctx->pri_path.mgid_index +=
+                                       mlx4_get_base_gid_ix(dev, slave, port);
+                               qp_ctx->pri_path.mgid_index &= 0x7f;
+                       } else {
+                               qp_ctx->pri_path.mgid_index = slave & 0x7F;
+                       }
+               }
+               if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+                       port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+                       if (mlx4_is_eth(dev, port)) {
+                               qp_ctx->alt_path.mgid_index +=
+                                       mlx4_get_base_gid_ix(dev, slave, port);
+                               qp_ctx->alt_path.mgid_index &= 0x7f;
+                       } else {
+                               qp_ctx->alt_path.mgid_index = slave & 0x7F;
+                       }
+               }
        }
 }
 
@@ -619,7 +668,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        struct mlx4_qp_context  *qpc = inbox->buf + 8;
        struct mlx4_vport_oper_state *vp_oper;
        struct mlx4_priv *priv;
-       u32 qp_type;
        int port;
 
        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
@@ -627,12 +675,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 
        if (MLX4_VGT != vp_oper->state.default_vlan) {
-               qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
-               if (MLX4_QP_ST_RC == qp_type ||
-                   (MLX4_QP_ST_UD == qp_type &&
-                    !mlx4_is_qp_reserved(dev, qpn)))
-                       return -EINVAL;
-
                /* the reserved QPs (special, proxy, tunnel)
                 * do not operate over vlans
                 */
@@ -1659,11 +1701,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
        return err;
 }
 
-static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
+                                    u8 smac_index, u64 *mac)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               if (res->smac_index == smac_index && res->port == (u8) port) {
+                       *mac = res->mac;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
-       struct mac_res *res;
+       struct list_head *mac_list =
+               &tracker->slave_list[slave].res_list[RES_MAC];
+       struct mac_res *res, *tmp;
+
+       list_for_each_entry_safe(res, tmp, mac_list, list) {
+               if (res->mac == mac && res->port == (u8) port) {
+                       /* mac found. update ref count */
+                       ++res->ref_count;
+                       return 0;
+               }
+       }
 
        if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
                return -EINVAL;
@@ -1674,6 +1744,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
        }
        res->mac = mac;
        res->port = (u8) port;
+       res->smac_index = smac_index;
+       res->ref_count = 1;
        list_add_tail(&res->list,
                      &tracker->slave_list[slave].res_list[RES_MAC]);
        return 0;
@@ -1690,9 +1762,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
 
        list_for_each_entry_safe(res, tmp, mac_list, list) {
                if (res->mac == mac && res->port == (u8) port) {
-                       list_del(&res->list);
-                       mlx4_release_resource(dev, slave, RES_MAC, 1, port);
-                       kfree(res);
+                       if (!--res->ref_count) {
+                               list_del(&res->list);
+                               mlx4_release_resource(dev, slave, RES_MAC, 1, port);
+                               kfree(res);
+                       }
                        break;
                }
        }
@@ -1705,10 +1779,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
        struct list_head *mac_list =
                &tracker->slave_list[slave].res_list[RES_MAC];
        struct mac_res *res, *tmp;
+       int i;
 
        list_for_each_entry_safe(res, tmp, mac_list, list) {
                list_del(&res->list);
-               __mlx4_unregister_mac(dev, res->port, res->mac);
+               /* dereference the mac the num times the slave referenced it */
+               for (i = 0; i < res->ref_count; i++)
+                       __mlx4_unregister_mac(dev, res->port, res->mac);
                mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
                kfree(res);
        }
@@ -1720,21 +1797,28 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
        int err = -EINVAL;
        int port;
        u64 mac;
+       u8 smac_index;
 
        if (op != RES_OP_RESERVE_AND_MAP)
                return err;
 
        port = !in_port ? get_param_l(out_param) : in_port;
+       port = mlx4_slave_convert_port(
+                       dev, slave, port);
+
+       if (port < 0)
+               return -EINVAL;
        mac = in_param;
 
        err = __mlx4_register_mac(dev, port, mac);
        if (err >= 0) {
+               smac_index = err;
                set_param_l(out_param, err);
                err = 0;
        }
 
        if (!err) {
-               err = mac_add_to_slave(dev, slave, mac, port);
+               err = mac_add_to_slave(dev, slave, mac, port, smac_index);
                if (err)
                        __mlx4_unregister_mac(dev, port, mac);
        }
@@ -1831,6 +1915,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
        if (!port || op != RES_OP_RESERVE_AND_MAP)
                return -EINVAL;
 
+       port = mlx4_slave_convert_port(
+                       dev, slave, port);
+
+       if (port < 0)
+               return -EINVAL;
        /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
        if (!in_port && port > 0 && port <= dev->caps.num_ports) {
                slave_state[slave].old_vlan_api = true;
@@ -2128,6 +2217,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
        switch (op) {
        case RES_OP_RESERVE_AND_MAP:
                port = !in_port ? get_param_l(out_param) : in_port;
+               port = mlx4_slave_convert_port(
+                               dev, slave, port);
+
+               if (port < 0)
+                       return -EINVAL;
                mac_del_from_slave(dev, slave, in_param, port);
                __mlx4_unregister_mac(dev, port, in_param);
                break;
@@ -2147,6 +2241,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
        int err = 0;
 
+       port = mlx4_slave_convert_port(
+                       dev, slave, port);
+
+       if (port < 0)
+               return -EINVAL;
        switch (op) {
        case RES_OP_RESERVE_AND_MAP:
                if (slave_state[slave].old_vlan_api)
@@ -2734,6 +2833,8 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
        u32                     qp_type;
        struct mlx4_qp_context  *qp_ctx;
        enum mlx4_qp_optpar     optpar;
+       int port;
+       int num_gids;
 
        qp_ctx  = inbox->buf + 8;
        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
@@ -2741,6 +2842,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
 
        switch (qp_type) {
        case MLX4_QP_ST_RC:
+       case MLX4_QP_ST_XRC:
        case MLX4_QP_ST_UC:
                switch (transition) {
                case QP_TRANS_INIT2RTR:
@@ -2749,13 +2851,24 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
                case QP_TRANS_SQD2SQD:
                case QP_TRANS_SQD2RTS:
                        if (slave != mlx4_master_func_num(dev))
-                               /* slaves have only gid index 0 */
-                               if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
-                                       if (qp_ctx->pri_path.mgid_index)
+                               if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+                                       port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+                                       if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+                                               num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+                                       else
+                                               num_gids = 1;
+                                       if (qp_ctx->pri_path.mgid_index >= num_gids)
                                                return -EINVAL;
-                               if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
-                                       if (qp_ctx->alt_path.mgid_index)
+                               }
+                               if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+                                       port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
+                                       if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+                                               num_gids = mlx4_get_slave_num_gids(dev, slave, port);
+                                       else
+                                               num_gids = 1;
+                                       if (qp_ctx->alt_path.mgid_index >= num_gids)
                                                return -EINVAL;
+                               }
                        break;
                default:
                        break;
@@ -3268,6 +3381,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
 }
 
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+                                 struct mlx4_qp_context *qpc,
+                                 struct mlx4_cmd_mailbox *inbox)
+{
+       enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
+       u8 pri_sched_queue;
+       int port = mlx4_slave_convert_port(
+                  dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
+
+       if (port < 0)
+               return -EINVAL;
+
+       pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
+                         ((port & 1) << 6);
+
+       if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
+           mlx4_is_eth(dev, port + 1)) {
+               qpc->pri_path.sched_queue = pri_sched_queue;
+       }
+
+       if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
+               port = mlx4_slave_convert_port(
+                               dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
+                               + 1) - 1;
+               if (port < 0)
+                       return -EINVAL;
+               qpc->alt_path.sched_queue =
+                       (qpc->alt_path.sched_queue & ~(1 << 6)) |
+                       (port & 1) << 6;
+       }
+       return 0;
+}
+
+static int roce_verify_mac(struct mlx4_dev *dev, int slave,
+                               struct mlx4_qp_context *qpc,
+                               struct mlx4_cmd_mailbox *inbox)
+{
+       u64 mac;
+       int port;
+       u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+       u8 sched = *(u8 *)(inbox->buf + 64);
+       u8 smac_ix;
+
+       port = (sched >> 6 & 1) + 1;
+       if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
+               smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
+               if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
+                       return -ENOENT;
+       }
+       return 0;
+}
+
 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
                             struct mlx4_vhcr *vhcr,
                             struct mlx4_cmd_mailbox *inbox,
@@ -3286,10 +3451,16 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
        u8 orig_vlan_index = qpc->pri_path.vlan_index;
        u8 orig_feup = qpc->pri_path.feup;
 
+       err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+       if (err)
+               return err;
        err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
        if (err)
                return err;
 
+       if (roce_verify_mac(dev, slave, qpc, inbox))
+               return -EINVAL;
+
        update_pkey_index(dev, slave, inbox);
        update_gid(dev, inbox, (u8)slave);
        adjust_proxy_tun_qkey(dev, vhcr, qpc);
@@ -3334,6 +3505,9 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
        int err;
        struct mlx4_qp_context *context = inbox->buf + 8;
 
+       err = adjust_qp_sched_queue(dev, slave, context, inbox);
+       if (err)
+               return err;
        err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
        if (err)
                return err;
@@ -3353,6 +3527,9 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
        int err;
        struct mlx4_qp_context *context = inbox->buf + 8;
 
+       err = adjust_qp_sched_queue(dev, slave, context, inbox);
+       if (err)
+               return err;
        err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
        if (err)
                return err;
@@ -3371,6 +3548,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
                              struct mlx4_cmd_info *cmd)
 {
        struct mlx4_qp_context *context = inbox->buf + 8;
+       int err = adjust_qp_sched_queue(dev, slave, context, inbox);
+       if (err)
+               return err;
        adjust_proxy_tun_qkey(dev, vhcr, context);
        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
 }
@@ -3384,6 +3564,9 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
        int err;
        struct mlx4_qp_context *context = inbox->buf + 8;
 
+       err = adjust_qp_sched_queue(dev, slave, context, inbox);
+       if (err)
+               return err;
        err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
        if (err)
                return err;
@@ -3403,6 +3586,9 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
        int err;
        struct mlx4_qp_context *context = inbox->buf + 8;
 
+       err = adjust_qp_sched_queue(dev, slave, context, inbox);
+       if (err)
+               return err;
        err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
        if (err)
                return err;
@@ -3506,16 +3692,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
        return err;
 }
 
-static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                    int block_loopback, enum mlx4_protocol prot,
+static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
+                    u8 gid[16], int block_loopback, enum mlx4_protocol prot,
                     enum mlx4_steer_type type, u64 *reg_id)
 {
        switch (dev->caps.steering_mode) {
-       case MLX4_STEERING_MODE_DEVICE_MANAGED:
-               return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
+       case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+               int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+               if (port < 0)
+                       return port;
+               return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
                                                block_loopback, prot,
                                                reg_id);
+       }
        case MLX4_STEERING_MODE_B0:
+               if (prot == MLX4_PROT_ETH) {
+                       int port = mlx4_slave_convert_port(dev, slave, gid[5]);
+                       if (port < 0)
+                               return port;
+                       gid[5] = port;
+               }
                return mlx4_qp_attach_common(dev, qp, gid,
                                            block_loopback, prot, type);
        default:
@@ -3523,9 +3719,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        }
 }
 
-static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
-                    enum mlx4_protocol prot, enum mlx4_steer_type type,
-                    u64 reg_id)
+static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+                    u8 gid[16], enum mlx4_protocol prot,
+                    enum mlx4_steer_type type, u64 reg_id)
 {
        switch (dev->caps.steering_mode) {
        case MLX4_STEERING_MODE_DEVICE_MANAGED:
@@ -3562,7 +3758,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 
        qp.qpn = qpn;
        if (attach) {
-               err = qp_attach(dev, &qp, gid, block_loopback, prot,
+               err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
                                type, &reg_id);
                if (err) {
                        pr_err("Fail to attach rule to qp 0x%x\n", qpn);
@@ -3698,6 +3894,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                return -EOPNOTSUPP;
 
        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
+       ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
+       if (ctrl->port <= 0)
+               return -EINVAL;
        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
        err = get_res(dev, slave, qpn, RES_QP, &rqp);
        if (err) {
@@ -3816,16 +4015,6 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
        return err;
 }
 
-int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave,
-                                             struct mlx4_vhcr *vhcr,
-                                             struct mlx4_cmd_mailbox *inbox,
-                                             struct mlx4_cmd_mailbox *outbox,
-                                             struct mlx4_cmd_info *cmd)
-{
-       return -EPERM;
-}
-
-
 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
 {
        struct res_gid *rgid;
index 23b7e2d35a93bb0598ac76b71d75cfc3ea7afff8..77ac95f052da81e31608891129141e43f9e61659 100644 (file)
@@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
        struct mlx5_eq_table *table = &dev->priv.eq_table;
        int num_eqs = 1 << dev->caps.log_max_eq;
        int nvec;
-       int err;
        int i;
 
        nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
@@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
        for (i = 0; i < nvec; i++)
                table->msix_arr[i].entry = i;
 
-retry:
-       table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
-       err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
-       if (err <= 0) {
-               return err;
-       } else if (err > 2) {
-               nvec = err;
-               goto retry;
-       }
+       nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+                                    MLX5_EQ_VEC_COMP_BASE, nvec);
+       if (nvec < 0)
+               return nvec;
 
-       mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
+       table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
 
        return 0;
 }
@@ -537,7 +531,6 @@ static int __init init(void)
 
        return 0;
 
-       mlx5_health_cleanup();
 err_debug:
        mlx5_unregister_debugfs();
        return err;
index ce84dc289c8fe785ed62a948d6bbf1f37983d3c8..14ac0e2bc09fcbc50f65ceead949ecd7d15d6130 100644 (file)
@@ -4832,7 +4832,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
        skb->csum = old->csum;
        skb_set_network_header(skb, ETH_HLEN);
 
-       dev_kfree_skb(old);
+       dev_consume_skb_any(old);
 }
 
 /**
index 68026f7e8ba308d62c66570fb5ac7c19eb9f8994..130f6b204efa29cb9c97c98b4e3b0f52b569cd35 100644 (file)
@@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
        status = 0;
        if (myri10ge_msi) {
                if (mgp->num_slices > 1) {
-                       status =
-                           pci_enable_msix(pdev, mgp->msix_vectors,
-                                           mgp->num_slices);
-                       if (status == 0) {
-                               mgp->msix_enabled = 1;
-                       } else {
+                       status = pci_enable_msix_range(pdev, mgp->msix_vectors,
+                                       mgp->num_slices, mgp->num_slices);
+                       if (status < 0) {
                                dev_err(&pdev->dev,
                                        "Error %d setting up MSI-X\n", status);
                                return status;
                        }
+                       mgp->msix_enabled = 1;
                }
                if (mgp->msix_enabled == 0) {
                        status = pci_enable_msi(pdev);
@@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
        mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
                                    GFP_KERNEL);
        if (mgp->msix_vectors == NULL)
-               goto disable_msix;
+               goto no_msix;
        for (i = 0; i < mgp->num_slices; i++) {
                mgp->msix_vectors[i].entry = i;
        }
 
        while (mgp->num_slices > 1) {
-               /* make sure it is a power of two */
-               while (!is_power_of_2(mgp->num_slices))
-                       mgp->num_slices--;
+               mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
                if (mgp->num_slices == 1)
-                       goto disable_msix;
-               status = pci_enable_msix(pdev, mgp->msix_vectors,
-                                        mgp->num_slices);
-               if (status == 0) {
-                       pci_disable_msix(pdev);
+                       goto no_msix;
+               status = pci_enable_msix_range(pdev,
+                                              mgp->msix_vectors,
+                                              mgp->num_slices,
+                                              mgp->num_slices);
+               if (status < 0)
+                       goto no_msix;
+
+               pci_disable_msix(pdev);
+
+               if (status == mgp->num_slices) {
                        if (old_allocated)
                                kfree(old_fw);
                        return;
-               }
-               if (status > 0)
+               } else {
                        mgp->num_slices = status;
-               else
-                       goto disable_msix;
+               }
        }
 
-disable_msix:
+no_msix:
        if (mgp->msix_vectors != NULL) {
                kfree(mgp->msix_vectors);
                mgp->msix_vectors = NULL;
index 9eeddbd0b2c7c749de1c863e4b142c25f35c4fb9..a2844ff322c4c62bed8957a7f3797ad321359cbf 100644 (file)
@@ -2914,6 +2914,9 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
        struct RxD1 *rxdp1;
        struct RxD3 *rxdp3;
 
+       if (budget <= 0)
+               return napi_pkts;
+
        get_info = ring_data->rx_curr_get_info;
        get_block = get_info.block_index;
        memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
@@ -3792,9 +3795,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
        writeq(rx_mat, &bar0->rx_mat);
        readq(&bar0->rx_mat);
 
-       ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
+       ret = pci_enable_msix_range(nic->pdev, nic->entries,
+                                   nic->num_entries, nic->num_entries);
        /* We fail init if error or we get less vectors than min required */
-       if (ret) {
+       if (ret < 0) {
                DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
                kfree(nic->entries);
                swstats->mem_freed += nic->num_entries *
@@ -4045,7 +4049,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
        if (!is_s2io_card_up(sp)) {
                DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
                          dev->name);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -4118,7 +4122,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
            ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
                DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
                s2io_stop_tx_queue(sp, fifo->fifo_no);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                spin_unlock_irqrestore(&fifo->tx_lock, flags);
                return NETDEV_TX_OK;
        }
@@ -4240,7 +4244,7 @@ pci_map_failed:
        swstats->pci_map_fail_cnt++;
        s2io_stop_tx_queue(sp, fifo->fifo_no);
        swstats->mem_freed += skb->truesize;
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        spin_unlock_irqrestore(&fifo->tx_lock, flags);
        return NETDEV_TX_OK;
 }
index e46e8698e6309a67945121ad86cb0ee4a61ff2b6..d107bcbb8543035110a98a82a21a7e72c8ec1303 100644 (file)
@@ -368,6 +368,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
        vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
                ring->ndev->name, __func__, __LINE__);
 
+       if (ring->budget <= 0)
+               goto out;
+
        do {
                prefetch((char *)dtr + L1_CACHE_BYTES);
                rx_priv = vxge_hw_ring_rxd_private_get(dtr);
@@ -525,6 +528,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
        if (first_dtr)
                vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
 
+out:
        vxge_debug_entryexit(VXGE_TRACE,
                                "%s:%d  Exiting...",
                                __func__, __LINE__);
@@ -820,7 +824,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb->len <= 0)) {
                vxge_debug_tx(VXGE_ERR,
                        "%s: Buffer has no data..", dev->name);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -829,7 +833,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(!is_vxge_card_up(vdev))) {
                vxge_debug_tx(VXGE_ERR,
                        "%s: vdev not initialized", dev->name);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -839,7 +843,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
                        vxge_debug_tx(VXGE_ERR,
                                "%s: Failed to store the mac address",
                                dev->name);
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        return NETDEV_TX_OK;
                }
        }
@@ -986,7 +990,7 @@ _exit1:
        vxge_hw_fifo_txdl_free(fifo_hw, dtr);
 _exit0:
        netif_tx_stop_queue(fifo->txq);
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
@@ -2349,12 +2353,18 @@ start:
        vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
        vdev->vxge_entries[j].in_use = 0;
 
-       ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
-       if (ret > 0) {
+       ret = pci_enable_msix_range(vdev->pdev,
+                                   vdev->entries, 3, vdev->intr_cnt);
+       if (ret < 0) {
+               ret = -ENODEV;
+               goto enable_msix_failed;
+       } else if (ret < vdev->intr_cnt) {
+               pci_disable_msix(vdev->pdev);
+
                vxge_debug_init(VXGE_ERR,
                        "%s: MSI-X enable failed for %d vectors, ret: %d",
                        VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
-               if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
+               if (max_config_vpath != VXGE_USE_DEFAULT) {
                        ret = -ENODEV;
                        goto enable_msix_failed;
                }
@@ -2368,9 +2378,6 @@ start:
                vxge_close_vpaths(vdev, temp);
                vdev->no_of_vpath = temp;
                goto start;
-       } else if (ret < 0) {
-               ret = -ENODEV;
-               goto enable_msix_failed;
        }
        return 0;
 
@@ -3131,12 +3138,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
                u64 packets, bytes, multicast;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&rxstats->syncp);
+                       start = u64_stats_fetch_begin_irq(&rxstats->syncp);
 
                        packets   = rxstats->rx_frms;
                        multicast = rxstats->rx_mcast;
                        bytes     = rxstats->rx_bytes;
-               } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
 
                net_stats->rx_packets += packets;
                net_stats->rx_bytes += bytes;
@@ -3146,11 +3153,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
                net_stats->rx_dropped += rxstats->rx_dropped;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&txstats->syncp);
+                       start = u64_stats_fetch_begin_irq(&txstats->syncp);
 
                        packets = txstats->tx_frms;
                        bytes   = txstats->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
 
                net_stats->tx_packets += packets;
                net_stats->tx_bytes += bytes;
index 70cf97fe67f2d0e63439d8d684f0244a212caa22..fddb464aeab3a517c362d12ad4891eb3e2529cae 100644 (file)
@@ -1753,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
 
        /* software stats */
        do {
-               syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+               syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
                storage->rx_packets       = np->stat_rx_packets;
                storage->rx_bytes         = np->stat_rx_bytes;
                storage->rx_dropped       = np->stat_rx_dropped;
                storage->rx_missed_errors = np->stat_rx_missed_errors;
-       } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+       } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
 
        do {
-               syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+               syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
                storage->tx_packets = np->stat_tx_packets;
                storage->tx_bytes   = np->stat_tx_bytes;
                storage->tx_dropped = np->stat_tx_dropped;
-       } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
+       } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
 
        /* If the nic supports hw counters then retrieve latest values */
        if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                if (pci_dma_mapping_error(np->pci_dev,
                                          np->put_tx_ctx->dma)) {
                        /* on DMA mapping error - drop the packet */
-                       kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        u64_stats_update_begin(&np->swstats_tx_syncp);
                        np->stat_tx_dropped++;
                        u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                        if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
                                                tmp_tx_ctx = np->first_tx_ctx;
                                } while (tmp_tx_ctx != np->put_tx_ctx);
-                               kfree_skb(skb);
+                               dev_kfree_skb_any(skb);
                                np->put_tx_ctx = start_tx_ctx;
                                u64_stats_update_begin(&np->swstats_tx_syncp);
                                np->stat_tx_dropped++;
@@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                if (pci_dma_mapping_error(np->pci_dev,
                                          np->put_tx_ctx->dma)) {
                        /* on DMA mapping error - drop the packet */
-                       kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        u64_stats_update_begin(&np->swstats_tx_syncp);
                        np->stat_tx_dropped++;
                        u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                                        if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
                                                tmp_tx_ctx = np->first_tx_ctx;
                                } while (tmp_tx_ctx != np->put_tx_ctx);
-                               kfree_skb(skb);
+                               dev_kfree_skb_any(skb);
                                np->put_tx_ctx = start_tx_ctx;
                                u64_stats_update_begin(&np->swstats_tx_syncp);
                                np->stat_tx_dropped++;
@@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
 {
        struct fe_priv *np = get_nvpriv(dev);
        u8 __iomem *base = get_hwbase(dev);
-       int ret = 1;
+       int ret;
        int i;
        irqreturn_t (*handler)(int foo, void *data);
 
@@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
        if (np->msi_flags & NV_MSI_X_CAPABLE) {
                for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
                        np->msi_x_entry[i].entry = i;
-               ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
-               if (ret == 0) {
+               ret = pci_enable_msix_range(np->pci_dev,
+                                           np->msi_x_entry,
+                                           np->msi_flags & NV_MSI_X_VECTORS_MASK,
+                                           np->msi_flags & NV_MSI_X_VECTORS_MASK);
+               if (ret > 0) {
                        np->msi_flags |= NV_MSI_X_ENABLED;
                        if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
                                /* Request irq for rx handling */
                                sprintf(np->name_rx, "%s-rx", dev->name);
-                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
-                                               nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+                               ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
+                                                 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
+                               if (ret) {
                                        netdev_info(dev,
                                                    "request_irq failed for rx %d\n",
                                                    ret);
@@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                }
                                /* Request irq for tx handling */
                                sprintf(np->name_tx, "%s-tx", dev->name);
-                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
-                                               nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+                               ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
+                                                 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
+                               if (ret) {
                                        netdev_info(dev,
                                                    "request_irq failed for tx %d\n",
                                                    ret);
@@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                }
                                /* Request irq for link and timer handling */
                                sprintf(np->name_other, "%s-other", dev->name);
-                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
-                                               nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+                               ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
+                                                 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
+                               if (ret) {
                                        netdev_info(dev,
                                                    "request_irq failed for link %d\n",
                                                    ret);
@@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
                        } else {
                                /* Request irq for all interrupts */
-                               if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
+                               ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
+                                                 handler, IRQF_SHARED, dev->name, dev);
+                               if (ret) {
                                        netdev_info(dev,
                                                    "request_irq failed %d\n",
                                                    ret);
@@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                                writel(0, base + NvRegMSIXMap1);
                        }
                        netdev_info(dev, "MSI-X enabled\n");
+                       return 0;
                }
        }
-       if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+       if (np->msi_flags & NV_MSI_CAPABLE) {
                ret = pci_enable_msi(np->pci_dev);
                if (ret == 0) {
                        np->msi_flags |= NV_MSI_ENABLED;
-                       if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
+                       ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
+                       if (ret) {
                                netdev_info(dev, "request_irq failed %d\n",
                                            ret);
                                pci_disable_msi(np->pci_dev);
@@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
                        /* enable msi vector 0 */
                        writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
                        netdev_info(dev, "MSI enabled\n");
+                       return 0;
                }
        }
-       if (ret != 0) {
-               if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
-                       goto out_err;
 
-       }
+       if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
+               goto out_err;
 
        return 0;
 out_free_tx:
index 464e91058c81157da7fa8d3c25a03b266ed0ffe6..73e66838cfef901e276803b2a6d136fdbd20aa02 100644 (file)
@@ -120,10 +120,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
                               int data);
 static void pch_gbe_set_multi(struct net_device *netdev);
 
-static struct sock_filter ptp_filter[] = {
-       PTP_FILTER
-};
-
 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
 {
        u8 *data = skb->data;
@@ -131,7 +127,7 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
        u16 *hi, *id;
        u32 lo;
 
-       if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
+       if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
                return 0;
 
        offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -2635,11 +2631,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
 
        adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
                                               PCI_DEVFN(12, 4));
-       if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
-               dev_err(&pdev->dev, "Bad ptp filter\n");
-               ret = -EINVAL;
-               goto err_free_netdev;
-       }
 
        netdev->netdev_ops = &pch_gbe_netdev_ops;
        netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
index 70849dea32b1306400822919ca0bd547339cbd4a..f09c35d669b3ec7d8898f96f0ffd9ce7362ddb13 100644 (file)
@@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
 
        if (adapter->msix_supported) {
                netxen_init_msix_entries(adapter, num_msix);
-               err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
-               if (err == 0) {
+               err = pci_enable_msix_range(pdev, adapter->msix_entries,
+                                           num_msix, num_msix);
+               if (err > 0) {
                        adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
                        netxen_set_msix_bit(pdev, 1);
 
index f19f81cde134ba1ed37807b460dc7ad4015b7707..b9039b569bebf54bb2d82904c0c33f00f138abd1 100644 (file)
@@ -38,8 +38,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 55
-#define QLCNIC_LINUX_VERSIONID  "5.3.55"
+#define _QLCNIC_LINUX_SUBVERSION 57
+#define QLCNIC_LINUX_VERSIONID  "5.3.57"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -169,11 +169,20 @@ struct cmd_desc_type0 {
 
        __le64 addr_buffer2;
 
-       __le16 reference_handle;
+       __le16 encap_descr;     /* 15:10 offset of outer L3 header,
+                                * 9:6 number of 32bit words in outer L3 header,
+                                * 5 offload outer L4 checksum,
+                                * 4 offload outer L3 checksum,
+                                * 3 Inner L4 type, TCP=0, UDP=1,
+                                * 2 Inner L3 type, IPv4=0, IPv6=1,
+                                * 1 Outer L3 type,IPv4=0, IPv6=1,
+                                * 0 type of encapsulation, GRE=0, VXLAN=1
+                                */
        __le16 mss;
        u8 port_ctxid;          /* 7:4 ctxid 3:0 port */
-       u8 total_hdr_length;    /* LSO only : MAC+IP+TCP Hdr size */
-       __le16 conn_id;         /* IPSec offoad only */
+       u8 hdr_length;          /* LSO only : MAC+IP+TCP Hdr size */
+       u8 outer_hdr_length;    /* Encapsulation only */
+       u8 rsvd1;
 
        __le64 addr_buffer3;
        __le64 addr_buffer1;
@@ -183,7 +192,9 @@ struct cmd_desc_type0 {
        __le64 addr_buffer4;
 
        u8 eth_addr[ETH_ALEN];
-       __le16 vlan_TCI;
+       __le16 vlan_TCI;        /* In case of  encapsulation,
+                                * this is for outer VLAN
+                                */
 
 } __attribute__ ((aligned(64)));
 
@@ -394,7 +405,7 @@ struct qlcnic_nic_intr_coalesce {
        u32     timer_out;
 };
 
-struct qlcnic_dump_template_hdr {
+struct qlcnic_83xx_dump_template_hdr {
        u32     type;
        u32     offset;
        u32     size;
@@ -411,15 +422,42 @@ struct qlcnic_dump_template_hdr {
        u32     rsvd[0];
 };
 
+struct qlcnic_82xx_dump_template_hdr {
+       u32     type;
+       u32     offset;
+       u32     size;
+       u32     cap_mask;
+       u32     num_entries;
+       u32     version;
+       u32     timestamp;
+       u32     checksum;
+       u32     drv_cap_mask;
+       u32     sys_info[3];
+       u32     saved_state[16];
+       u32     cap_sizes[8];
+       u32     rsvd[7];
+       u32     capabilities;
+       u32     rsvd1[0];
+};
+
 struct qlcnic_fw_dump {
        u8      clr;    /* flag to indicate if dump is cleared */
        bool    enable; /* enable/disable dump */
        u32     size;   /* total size of the dump */
+       u32     cap_mask; /* Current capture mask */
        void    *data;  /* dump data area */
-       struct  qlcnic_dump_template_hdr *tmpl_hdr;
+       void    *tmpl_hdr;
        dma_addr_t phys_addr;
        void    *dma_buffer;
        bool    use_pex_dma;
+       /* Read only elements which are common between 82xx and 83xx
+        * template header. Update these values immediately after we read
+        * template header from Firmware
+        */
+       u32     tmpl_hdr_size;
+       u32     version;
+       u32     num_entries;
+       u32     offset;
 };
 
 /*
@@ -497,6 +535,7 @@ struct qlcnic_hardware_context {
        u8 extend_lb_time;
        u8 phys_port_id[ETH_ALEN];
        u8 lb_mode;
+       u16 vxlan_port;
 };
 
 struct qlcnic_adapter_stats {
@@ -511,6 +550,9 @@ struct qlcnic_adapter_stats {
        u64  txbytes;
        u64  lrobytes;
        u64  lso_frames;
+       u64  encap_lso_frames;
+       u64  encap_tx_csummed;
+       u64  encap_rx_csummed;
        u64  xmit_on;
        u64  xmit_off;
        u64  skb_alloc_failure;
@@ -872,6 +914,10 @@ struct qlcnic_mac_vlan_list {
 #define QLCNIC_FW_CAPABILITY_2_BEACON          BIT_7
 #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG    BIT_9
 
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD  BIT_0
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD  BIT_1
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
+
 /* module types */
 #define LINKEVENT_MODULE_NOT_PRESENT                   1
 #define LINKEVENT_MODULE_OPTICAL_UNKNOWN               2
@@ -965,6 +1011,8 @@ struct qlcnic_ipaddr {
 #define QLCNIC_APP_CHANGED_FLAGS       0x20000
 #define QLCNIC_HAS_PHYS_PORT_ID                0x40000
 #define QLCNIC_TSS_RSS                 0x80000
+#define QLCNIC_ADD_VXLAN_PORT          0x100000
+#define QLCNIC_DEL_VXLAN_PORT          0x200000
 
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
        ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -1769,10 +1817,28 @@ struct qlcnic_hardware_ops {
                                struct qlcnic_host_tx_ring *);
        void (*disable_tx_intr) (struct qlcnic_adapter *,
                                 struct qlcnic_host_tx_ring *);
+       u32 (*get_saved_state)(void *, u32);
+       void (*set_saved_state)(void *, u32, u32);
+       void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+       u32 (*get_cap_size)(void *, int);
+       void (*set_sys_info)(void *, int, u32);
+       void (*store_cap_mask)(void *, u32);
 };
 
 extern struct qlcnic_nic_template qlcnic_vf_ops;
 
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->extra_capability[0] &
+              QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+       return adapter->ahw->extra_capability[0] &
+              QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
+}
+
 static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
 {
        return adapter->nic_ops->start_firmware(adapter);
@@ -2007,6 +2073,42 @@ static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
                adapter->ahw->hw_ops->read_phys_port_id(adapter);
 }
 
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+                                        void *t_hdr, u32 index)
+{
+       return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+                                         void *t_hdr, u32 index, u32 value)
+{
+       adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+                                               struct qlcnic_fw_dump *fw_dump)
+{
+       adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+                                     void *tmpl_hdr, int index)
+{
+       return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+                                      void *tmpl_hdr, int idx, u32 value)
+{
+       adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+                                        void *tmpl_hdr, u32 mask)
+{
+       adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
 static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
                                            u32 key)
 {
index 27c4f131863bc30618ae5996411122f91685144b..b7cffb46a75dbd8f215752218a6f1f4239c1cc98 100644 (file)
@@ -77,7 +77,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
        {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
        {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
        {QLCNIC_CMD_IDC_ACK, 5, 1},
-       {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1},
+       {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
        {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
        {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
        {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
@@ -87,6 +87,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
        {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
        {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
        {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
+       {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
 };
 
 const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -203,7 +204,12 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
        .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
        .enable_tx_intr                 = qlcnic_83xx_enable_tx_intr,
        .disable_tx_intr                = qlcnic_83xx_disable_tx_intr,
-
+       .get_saved_state                = qlcnic_83xx_get_saved_state,
+       .set_saved_state                = qlcnic_83xx_set_saved_state,
+       .cache_tmpl_hdr_values          = qlcnic_83xx_cache_tmpl_hdr_values,
+       .get_cap_size                   = qlcnic_83xx_get_cap_size,
+       .set_sys_info                   = qlcnic_83xx_set_sys_info,
+       .store_cap_mask                 = qlcnic_83xx_store_cap_mask,
 };
 
 static struct qlcnic_nic_template qlcnic_83xx_ops = {
index f92485ca21d1131d989890f16f91a470847051c1..88d809c356334675026fb1a71e37107ded60c709 100644 (file)
@@ -308,6 +308,8 @@ struct qlc_83xx_reset {
 #define QLC_83XX_IDC_FLASH_PARAM_ADDR                  0x3e8020
 
 struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
 struct qlc_83xx_idc {
        int (*state_entry) (struct qlcnic_adapter *);
        u64             sec_counter;
@@ -526,8 +528,9 @@ enum qlc_83xx_ext_regs {
 };
 
 /* Initialize/Stop NIC command bit definitions */
-#define QLC_REGISTER_DCB_AEN           BIT_1
 #define QLC_REGISTER_LB_IDC            BIT_0
+#define QLC_REGISTER_DCB_AEN           BIT_1
+#define QLC_83XX_MULTI_TENANCY_INFO    BIT_29
 #define QLC_INIT_FW_RESOURCES          BIT_31
 
 /* 83xx funcitons */
@@ -650,4 +653,10 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
 void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
 int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
 void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
 #endif
index 90a2dda351ec0eeba96930533402d5e6d098a2a8..2d91975d21f77323064b5b68fcc231981f33e6ad 100644 (file)
@@ -1020,10 +1020,97 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
        return 0;
 }
 
+#define QLC_83XX_ENCAP_TYPE_VXLAN      BIT_1
+#define QLC_83XX_MATCH_ENCAP_ID                BIT_2
+#define QLC_83XX_SET_VXLAN_UDP_DPORT   BIT_3
+#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
+
+#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
+#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
+
+static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
+{
+       u16 port = adapter->ahw->vxlan_port;
+       struct qlcnic_cmd_args cmd;
+       int ret = 0;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_INIT_NIC_FUNC);
+       if (ret)
+               return ret;
+
+       cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
+       cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
+                        QLC_83XX_SET_VXLAN_UDP_DPORT |
+                        QLC_83XX_VXLAN_UDP_DPORT(port);
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret)
+               netdev_err(adapter->netdev,
+                          "Failed to set VXLAN port %d in adapter\n",
+                          port);
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return ret;
+}
+
+static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
+                                   bool state)
+{
+       u16 vxlan_port = adapter->ahw->vxlan_port;
+       struct qlcnic_cmd_args cmd;
+       int ret = 0;
+
+       memset(&cmd, 0, sizeof(cmd));
+
+       ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+                                   QLCNIC_CMD_SET_INGRESS_ENCAP);
+       if (ret)
+               return ret;
+
+       cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+                                QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+
+       ret = qlcnic_issue_cmd(adapter, &cmd);
+       if (ret)
+               netdev_err(adapter->netdev,
+                          "Failed to %s VXLAN parsing for port %d\n",
+                          state ? "enable" : "disable", vxlan_port);
+       else
+               netdev_info(adapter->netdev,
+                           "%s VXLAN parsing for port %d\n",
+                           state ? "Enabled" : "Disabled", vxlan_port);
+
+       qlcnic_free_mbx_args(&cmd);
+
+       return ret;
+}
+
 static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
        if (adapter->fhash.fnum)
                qlcnic_prune_lb_filters(adapter);
+
+       if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
+               if (qlcnic_set_vxlan_port(adapter))
+                       return;
+
+               if (qlcnic_set_vxlan_parsing(adapter, true))
+                       return;
+
+               adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
+       } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
+               if (qlcnic_set_vxlan_parsing(adapter, false))
+                       return;
+
+               ahw->vxlan_port = 0;
+               adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
+       }
 }
 
 /**
@@ -1301,7 +1388,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        addr = (u64)dest;
 
        ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
-                                         (u32 *)p_cache, size / 16);
+                                         p_cache, size / 16);
        if (ret) {
                dev_err(&adapter->pdev->dev, "MS memory write failed\n");
                release_firmware(fw);
index acee1a5d80c6521095c755418eb8c46197d9cec7..5bacf5210aed658abc12c639be38f0649bf96cbf 100644 (file)
@@ -47,6 +47,12 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
        {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
        {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
        {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+       {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
+        QLC_OFF(stats.encap_lso_frames)},
+       {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
+        QLC_OFF(stats.encap_tx_csummed)},
+       {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
+        QLC_OFF(stats.encap_rx_csummed)},
        {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
         QLC_OFF(stats.skb_alloc_failure)},
        {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
@@ -1639,14 +1645,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
        }
 
        if (fw_dump->clr)
-               dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+               dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
        else
                dump->len = 0;
 
        if (!qlcnic_check_fw_dump_state(adapter))
                dump->flag = ETH_FW_DUMP_DISABLE;
        else
-               dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+               dump->flag = fw_dump->cap_mask;
 
        dump->version = adapter->fw_version;
        return 0;
@@ -1671,9 +1677,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
                netdev_info(netdev, "Dump not available\n");
                return -EINVAL;
        }
+
        /* Copy template header first */
-       copy_sz = fw_dump->tmpl_hdr->size;
-       hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
+       copy_sz = fw_dump->tmpl_hdr_size;
+       hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
        data = buffer;
        for (i = 0; i < copy_sz/sizeof(u32); i++)
                *data++ = cpu_to_le32(*hdr_ptr++);
@@ -1681,7 +1688,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
        /* Copy captured dump data */
        memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
        dump->len = copy_sz + fw_dump->size;
-       dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+       dump->flag = fw_dump->cap_mask;
 
        /* Free dump area once data has been captured */
        vfree(fw_dump->data);
@@ -1703,7 +1710,11 @@ static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
                return -EOPNOTSUPP;
        }
 
-       fw_dump->tmpl_hdr->drv_cap_mask = mask;
+       fw_dump->cap_mask = mask;
+
+       /* Store new capture mask in template header as well*/
+       qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
        netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
        return 0;
 }
index 03d18a0be6ce98cd8ab517d9623983de22a99672..9f3adf4e70b5f31a2d143c8d946ce6d88201a096 100644 (file)
@@ -317,9 +317,7 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
 int
 qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
 {
-       int timeout = 0;
-       int err = 0;
-       u32 done = 0;
+       int timeout = 0, err = 0, done = 0;
 
        while (!done) {
                done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
@@ -327,10 +325,20 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
                if (done == 1)
                        break;
                if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
-                       dev_err(&adapter->pdev->dev,
-                               "Failed to acquire sem=%d lock; holdby=%d\n",
-                               sem,
-                               id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
+                       if (id_reg) {
+                               done = QLCRD32(adapter, id_reg, &err);
+                               if (done != -1)
+                                       dev_err(&adapter->pdev->dev,
+                                               "Failed to acquire sem=%d lock held by=%d\n",
+                                               sem, done);
+                               else
+                                       dev_err(&adapter->pdev->dev,
+                                               "Failed to acquire sem=%d lock",
+                                               sem);
+                       } else {
+                               dev_err(&adapter->pdev->dev,
+                                       "Failed to acquire sem=%d lock", sem);
+                       }
                        return -EIO;
                }
                msleep(1);
index 63d75617d445a2de9c6e9d998cc58d1a105f7b16..cbe2399c30a0d11e9621bed426b9b45318187901 100644 (file)
@@ -98,6 +98,7 @@ enum qlcnic_regs {
 #define QLCNIC_CMD_GET_LINK_EVENT              0x48
 #define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE       0x49
 #define QLCNIC_CMD_CONFIGURE_HW_LRO            0x4A
+#define QLCNIC_CMD_SET_INGRESS_ENCAP           0x4E
 #define QLCNIC_CMD_INIT_NIC_FUNC               0x60
 #define QLCNIC_CMD_STOP_NIC_FUNC               0x61
 #define QLCNIC_CMD_IDC_ACK                     0x63
@@ -161,6 +162,7 @@ struct qlcnic_host_sds_ring;
 struct qlcnic_host_tx_ring;
 struct qlcnic_hardware_context;
 struct qlcnic_adapter;
+struct qlcnic_fw_dump;
 
 int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
 int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
@@ -213,4 +215,11 @@ int qlcnic_82xx_shutdown(struct pci_dev *);
 int qlcnic_82xx_resume(struct qlcnic_adapter *);
 void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
 void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
 #endif                         /* __QLCNIC_HW_H_ */
index 54ebf300332a353246066e5cf62f1d097355e1e5..173b3d12991f55a62751d5e6a213d20ee02c3174 100644 (file)
 
 #include "qlcnic.h"
 
-#define TX_ETHER_PKT   0x01
-#define TX_TCP_PKT     0x02
-#define TX_UDP_PKT     0x03
-#define TX_IP_PKT      0x04
-#define TX_TCP_LSO     0x05
-#define TX_TCP_LSO6    0x06
-#define TX_TCPV6_PKT   0x0b
-#define TX_UDPV6_PKT   0x0c
-#define FLAGS_VLAN_TAGGED      0x10
-#define FLAGS_VLAN_OOB         0x40
+#define QLCNIC_TX_ETHER_PKT            0x01
+#define QLCNIC_TX_TCP_PKT              0x02
+#define QLCNIC_TX_UDP_PKT              0x03
+#define QLCNIC_TX_IP_PKT               0x04
+#define QLCNIC_TX_TCP_LSO              0x05
+#define QLCNIC_TX_TCP_LSO6             0x06
+#define QLCNIC_TX_ENCAP_PKT            0x07
+#define QLCNIC_TX_ENCAP_LSO            0x08
+#define QLCNIC_TX_TCPV6_PKT            0x0b
+#define QLCNIC_TX_UDPV6_PKT            0x0c
+
+#define QLCNIC_FLAGS_VLAN_TAGGED       0x10
+#define QLCNIC_FLAGS_VLAN_OOB          0x40
 
 #define qlcnic_set_tx_vlan_tci(cmd_desc, v)    \
        (cmd_desc)->vlan_TCI = cpu_to_le16(v);
@@ -364,6 +367,101 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        spin_unlock(&adapter->mac_learn_lock);
 }
 
+#define QLCNIC_ENCAP_VXLAN_PKT         BIT_0
+#define QLCNIC_ENCAP_OUTER_L3_IP6      BIT_1
+#define QLCNIC_ENCAP_INNER_L3_IP6      BIT_2
+#define QLCNIC_ENCAP_INNER_L4_UDP      BIT_3
+#define QLCNIC_ENCAP_DO_L3_CSUM                BIT_4
+#define QLCNIC_ENCAP_DO_L4_CSUM                BIT_5
+
+static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
+                              struct cmd_desc_type0 *first_desc,
+                              struct sk_buff *skb,
+                              struct qlcnic_host_tx_ring *tx_ring)
+{
+       u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
+       int copied, copy_len, descr_size;
+       u32 producer = tx_ring->producer;
+       struct cmd_desc_type0 *hwdesc;
+       u16 flags = 0, encap_descr = 0;
+
+       opcode = QLCNIC_TX_ETHER_PKT;
+       encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
+
+       if (skb_is_gso(skb)) {
+               inner_hdr_len = skb_inner_transport_header(skb) +
+                               inner_tcp_hdrlen(skb) -
+                               skb_inner_mac_header(skb);
+
+               /* VXLAN header size = 8 */
+               outer_hdr_len = skb_transport_offset(skb) + 8 +
+                               sizeof(struct udphdr);
+               first_desc->outer_hdr_length = outer_hdr_len;
+               total_hdr_len = inner_hdr_len + outer_hdr_len;
+               encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
+                              QLCNIC_ENCAP_DO_L4_CSUM;
+               first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+               first_desc->hdr_length = inner_hdr_len;
+
+               /* Copy inner and outer headers in Tx descriptor(s)
+                * If total_hdr_len > cmd_desc_type0, use multiple
+                * descriptors
+                */
+               copied = 0;
+               descr_size = (int)sizeof(struct cmd_desc_type0);
+               while (copied < total_hdr_len) {
+                       copy_len = min(descr_size, (total_hdr_len - copied));
+                       hwdesc = &tx_ring->desc_head[producer];
+                       tx_ring->cmd_buf_arr[producer].skb = NULL;
+                       skb_copy_from_linear_data_offset(skb, copied,
+                                                        (char *)hwdesc,
+                                                        copy_len);
+                       copied += copy_len;
+                       producer = get_next_index(producer, tx_ring->num_desc);
+               }
+
+               tx_ring->producer = producer;
+
+               /* Make sure updated tx_ring->producer is visible
+                * for qlcnic_tx_avail()
+                */
+               smp_mb();
+               adapter->stats.encap_lso_frames++;
+
+               opcode = QLCNIC_TX_ENCAP_LSO;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (inner_ip_hdr(skb)->version == 6) {
+                       if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+                               encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+               } else {
+                       if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+                               encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+               }
+
+               adapter->stats.encap_tx_csummed++;
+               opcode = QLCNIC_TX_ENCAP_PKT;
+       }
+
+       /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
+       if (ip_hdr(skb)->version == 6)
+               encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
+
+       /* outer IP header's size in 32bit words size*/
+       encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
+
+       /* outer IP header offset */
+       encap_descr |= skb_network_offset(skb) << 10;
+       first_desc->encap_descr = cpu_to_le16(encap_descr);
+
+       first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
+                                    skb->data;
+       first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
+
+       qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+       return 0;
+}
+
 static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                         struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
                         struct qlcnic_host_tx_ring *tx_ring)
@@ -378,11 +476,11 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
 
        if (protocol == ETH_P_8021Q) {
                vh = (struct vlan_ethhdr *)skb->data;
-               flags = FLAGS_VLAN_TAGGED;
+               flags = QLCNIC_FLAGS_VLAN_TAGGED;
                vlan_tci = ntohs(vh->h_vlan_TCI);
                protocol = ntohs(vh->h_vlan_encapsulated_proto);
        } else if (vlan_tx_tag_present(skb)) {
-               flags = FLAGS_VLAN_OOB;
+               flags = QLCNIC_FLAGS_VLAN_OOB;
                vlan_tci = vlan_tx_tag_get(skb);
        }
        if (unlikely(adapter->tx_pvid)) {
@@ -391,7 +489,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
                        goto set_flags;
 
-               flags = FLAGS_VLAN_OOB;
+               flags = QLCNIC_FLAGS_VLAN_OOB;
                vlan_tci = adapter->tx_pvid;
        }
 set_flags:
@@ -402,25 +500,26 @@ set_flags:
                flags |= BIT_0;
                memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
        }
-       opcode = TX_ETHER_PKT;
+       opcode = QLCNIC_TX_ETHER_PKT;
        if (skb_is_gso(skb)) {
                hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
                first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
-               first_desc->total_hdr_length = hdr_len;
-               opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+               first_desc->hdr_length = hdr_len;
+               opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
+                                                   QLCNIC_TX_TCP_LSO;
 
                /* For LSO, we need to copy the MAC/IP/TCP headers into
                * the descriptor ring */
                copied = 0;
                offset = 2;
 
-               if (flags & FLAGS_VLAN_OOB) {
-                       first_desc->total_hdr_length += VLAN_HLEN;
+               if (flags & QLCNIC_FLAGS_VLAN_OOB) {
+                       first_desc->hdr_length += VLAN_HLEN;
                        first_desc->tcp_hdr_offset = VLAN_HLEN;
                        first_desc->ip_hdr_offset = VLAN_HLEN;
 
                        /* Only in case of TSO on vlan device */
-                       flags |= FLAGS_VLAN_TAGGED;
+                       flags |= QLCNIC_FLAGS_VLAN_TAGGED;
 
                        /* Create a TSO vlan header template for firmware */
                        hwdesc = &tx_ring->desc_head[producer];
@@ -464,16 +563,16 @@ set_flags:
                        l4proto = ip_hdr(skb)->protocol;
 
                        if (l4proto == IPPROTO_TCP)
-                               opcode = TX_TCP_PKT;
+                               opcode = QLCNIC_TX_TCP_PKT;
                        else if (l4proto == IPPROTO_UDP)
-                               opcode = TX_UDP_PKT;
+                               opcode = QLCNIC_TX_UDP_PKT;
                } else if (protocol == ETH_P_IPV6) {
                        l4proto = ipv6_hdr(skb)->nexthdr;
 
                        if (l4proto == IPPROTO_TCP)
-                               opcode = TX_TCPV6_PKT;
+                               opcode = QLCNIC_TX_TCPV6_PKT;
                        else if (l4proto == IPPROTO_UDP)
-                               opcode = TX_UDPV6_PKT;
+                               opcode = QLCNIC_TX_UDPV6_PKT;
                }
        }
        first_desc->tcp_hdr_offset += skb_transport_offset(skb);
@@ -563,6 +662,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        struct ethhdr *phdr;
        int i, k, frag_count, delta = 0;
        u32 producer, num_txd;
+       u16 protocol;
+       bool l4_is_udp = false;
 
        if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                netif_tx_stop_all_queues(netdev);
@@ -653,8 +754,23 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        tx_ring->producer = get_next_index(producer, num_txd);
        smp_mb();
 
-       if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
-               goto unwind_buff;
+       protocol = ntohs(skb->protocol);
+       if (protocol == ETH_P_IP)
+               l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
+       else if (protocol == ETH_P_IPV6)
+               l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
+
+       /* Check if it is a VXLAN packet */
+       if (!skb->encapsulation || !l4_is_udp ||
+           !qlcnic_encap_tx_offload(adapter)) {
+               if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
+                                          tx_ring)))
+                       goto unwind_buff;
+       } else {
+               if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
+                                                skb, tx_ring)))
+                       goto unwind_buff;
+       }
 
        if (adapter->drv_mac_learn)
                qlcnic_send_filter(adapter, first_desc, skb);
@@ -1587,6 +1703,13 @@ static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
                return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
 }
 
+#define QLCNIC_ENCAP_LENGTH_MASK       0x7f
+
+static inline u8 qlcnic_encap_length(u64 sts_data)
+{
+       return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
+}
+
 static struct qlcnic_rx_buffer *
 qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
                        struct qlcnic_host_sds_ring *sds_ring,
@@ -1637,6 +1760,12 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
 
        skb->protocol = eth_type_trans(skb, netdev);
 
+       if (qlcnic_encap_length(sts_data[1]) &&
+           skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               skb->encapsulation = 1;
+               adapter->stats.encap_rx_csummed++;
+       }
+
        if (vid != 0xffff)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 
index 1222865cfb7319b4ec085c035c00c862cff6dcd2..79be451a3ffc4ef0b820d91a86e403ead3b27d6a 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/aer.h>
 #include <linux/log2.h>
 #include <linux/pci.h>
+#include <net/vxlan.h>
 
 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
 MODULE_LICENSE("GPL");
@@ -90,7 +91,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *);
 static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
 static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
                                                      pci_channel_state_t);
-
 static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -462,6 +462,35 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
        return 0;
 }
 
+static void qlcnic_add_vxlan_port(struct net_device *netdev,
+                                 sa_family_t sa_family, __be16 port)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       /* Adapter supports only one VXLAN port. Use very first port
+        * for enabling offload
+        */
+       if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+               return;
+
+       ahw->vxlan_port = ntohs(port);
+       adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+}
+
+static void qlcnic_del_vxlan_port(struct net_device *netdev,
+                                 sa_family_t sa_family, __be16 port)
+{
+       struct qlcnic_adapter *adapter = netdev_priv(netdev);
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+       if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+           (ahw->vxlan_port != ntohs(port)))
+               return;
+
+       adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+}
+
 static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_open          = qlcnic_open,
        .ndo_stop          = qlcnic_close,
@@ -480,6 +509,8 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_fdb_del            = qlcnic_fdb_del,
        .ndo_fdb_dump           = qlcnic_fdb_dump,
        .ndo_get_phys_port_id   = qlcnic_get_phys_port_id,
+       .ndo_add_vxlan_port     = qlcnic_add_vxlan_port,
+       .ndo_del_vxlan_port     = qlcnic_del_vxlan_port,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = qlcnic_poll_controller,
 #endif
@@ -561,6 +592,12 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
        .disable_sds_intr               = qlcnic_82xx_disable_sds_intr,
        .enable_tx_intr                 = qlcnic_82xx_enable_tx_intr,
        .disable_tx_intr                = qlcnic_82xx_disable_tx_intr,
+       .get_saved_state                = qlcnic_82xx_get_saved_state,
+       .set_saved_state                = qlcnic_82xx_set_saved_state,
+       .cache_tmpl_hdr_values          = qlcnic_82xx_cache_tmpl_hdr_values,
+       .get_cap_size                   = qlcnic_82xx_get_cap_size,
+       .set_sys_info                   = qlcnic_82xx_set_sys_info,
+       .store_cap_mask                 = qlcnic_82xx_store_cap_mask,
 };
 
 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -684,7 +721,7 @@ restore:
 int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
 {
        struct pci_dev *pdev = adapter->pdev;
-       int err = -1, vector;
+       int err, vector;
 
        if (!adapter->msix_entries) {
                adapter->msix_entries = kcalloc(num_msix,
@@ -701,13 +738,17 @@ enable_msix:
                for (vector = 0; vector < num_msix; vector++)
                        adapter->msix_entries[vector].entry = vector;
 
-               err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
-               if (err == 0) {
+               err = pci_enable_msix_range(pdev,
+                                           adapter->msix_entries, 1, num_msix);
+
+               if (err == num_msix) {
                        adapter->flags |= QLCNIC_MSIX_ENABLED;
                        adapter->ahw->num_msix = num_msix;
                        dev_info(&pdev->dev, "using msi-x interrupts\n");
-                       return err;
+                       return 0;
                } else if (err > 0) {
+                       pci_disable_msix(pdev);
+
                        dev_info(&pdev->dev,
                                 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
                                 num_msix, err);
@@ -715,12 +756,12 @@ enable_msix:
                        if (qlcnic_82xx_check(adapter)) {
                                num_msix = rounddown_pow_of_two(err);
                                if (err < QLCNIC_82XX_MINIMUM_VECTOR)
-                                       return -EIO;
+                                       return -ENOSPC;
                        } else {
                                num_msix = rounddown_pow_of_two(err - 1);
                                num_msix += 1;
                                if (err < QLCNIC_83XX_MINIMUM_VECTOR)
-                                       return -EIO;
+                                       return -ENOSPC;
                        }
 
                        if (qlcnic_82xx_check(adapter) &&
@@ -747,7 +788,7 @@ enable_msix:
                }
        }
 
-       return err;
+       return -EIO;
 }
 
 static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
@@ -1934,6 +1975,9 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
 
        qlcnic_create_sysfs_entries(adapter);
 
+       if (qlcnic_encap_rx_offload(adapter))
+               vxlan_get_rx_port(netdev);
+
        adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
        return 0;
 
@@ -2196,6 +2240,19 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
        if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
                netdev->features |= NETIF_F_LRO;
 
+       if (qlcnic_encap_tx_offload(adapter)) {
+               netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+               /* encapsulation Tx offload supported by Adapter */
+               netdev->hw_enc_features = NETIF_F_IP_CSUM        |
+                                         NETIF_F_GSO_UDP_TUNNEL |
+                                         NETIF_F_TSO            |
+                                         NETIF_F_TSO6;
+       }
+
+       if (qlcnic_encap_rx_offload(adapter))
+               netdev->hw_enc_features |= NETIF_F_RXCSUM;
+
        netdev->hw_features = netdev->features;
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
@@ -2442,8 +2499,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (err) {
                        switch (err) {
                        case -ENOTRECOVERABLE:
-                               dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
-                               dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+                               dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+                               dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
                                goto err_out_free_hw;
                        case -ENOMEM:
                                dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
index 7763962e2ec4e9128ae22cf7b8ee694248d8058d..37b979b1266bc2e0aa552f84dd8a14a149c82665 100644 (file)
@@ -211,6 +211,107 @@ enum qlcnic_minidump_opcode {
        QLCNIC_DUMP_RDEND       = 255
 };
 
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+                                       u32 value)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr;
+
+       hdr = fw_dump->tmpl_hdr;
+       fw_dump->tmpl_hdr_size = hdr->size;
+       fw_dump->version = hdr->version;
+       fw_dump->num_entries = hdr->num_entries;
+       fw_dump->offset = hdr->offset;
+
+       hdr->drv_cap_mask = hdr->cap_mask;
+       fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->cap_sizes[index];
+}
+
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->sys_info[idx] = value;
+}
+
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+       struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+       hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+                                       u32 value)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->saved_state[index] = value;
+}
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr;
+
+       hdr = fw_dump->tmpl_hdr;
+       fw_dump->tmpl_hdr_size = hdr->size;
+       fw_dump->version = hdr->version;
+       fw_dump->num_entries = hdr->num_entries;
+       fw_dump->offset = hdr->offset;
+
+       hdr->drv_cap_mask = hdr->cap_mask;
+       fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+       hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+       struct qlcnic_83xx_dump_template_hdr *hdr;
+
+       hdr = tmpl_hdr;
+       hdr->drv_cap_mask = mask;
+}
+
 struct qlcnic_dump_operations {
        enum qlcnic_minidump_opcode opcode;
        u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
@@ -238,11 +339,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
                            struct qlcnic_dump_entry *entry, __le32 *buffer)
 {
+       void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+       struct __ctrl *ctr = &entry->region.ctrl;
        int i, k, timeout = 0;
-       u32 addr, data;
+       u32 addr, data, temp;
        u8 no_ops;
-       struct __ctrl *ctr = &entry->region.ctrl;
-       struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
 
        addr = ctr->addr;
        no_ops = ctr->no_ops;
@@ -285,29 +386,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
                                }
                                break;
                        case QLCNIC_DUMP_RD_SAVE:
-                               if (ctr->index_a)
-                                       addr = t_hdr->saved_state[ctr->index_a];
+                               temp = ctr->index_a;
+                               if (temp)
+                                       addr = qlcnic_get_saved_state(adapter,
+                                                                     hdr,
+                                                                     temp);
                                data = qlcnic_ind_rd(adapter, addr);
-                               t_hdr->saved_state[ctr->index_v] = data;
+                               qlcnic_set_saved_state(adapter, hdr,
+                                                      ctr->index_v, data);
                                break;
                        case QLCNIC_DUMP_WRT_SAVED:
-                               if (ctr->index_v)
-                                       data = t_hdr->saved_state[ctr->index_v];
+                               temp = ctr->index_v;
+                               if (temp)
+                                       data = qlcnic_get_saved_state(adapter,
+                                                                     hdr,
+                                                                     temp);
                                else
                                        data = ctr->val1;
-                               if (ctr->index_a)
-                                       addr = t_hdr->saved_state[ctr->index_a];
+
+                               temp = ctr->index_a;
+                               if (temp)
+                                       addr = qlcnic_get_saved_state(adapter,
+                                                                     hdr,
+                                                                     temp);
                                qlcnic_ind_wr(adapter, addr, data);
                                break;
                        case QLCNIC_DUMP_MOD_SAVE_ST:
-                               data = t_hdr->saved_state[ctr->index_v];
+                               data = qlcnic_get_saved_state(adapter, hdr,
+                                                             ctr->index_v);
                                data <<= ctr->shl_val;
                                data >>= ctr->shr_val;
                                if (ctr->val2)
                                        data &= ctr->val2;
                                data |= ctr->val3;
                                data += ctr->val1;
-                               t_hdr->saved_state[ctr->index_v] = data;
+                               qlcnic_set_saved_state(adapter, hdr,
+                                                      ctr->index_v, data);
                                break;
                        default:
                                dev_info(&adapter->pdev->dev,
@@ -544,7 +658,7 @@ out:
 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
                                struct __mem *mem)
 {
-       struct qlcnic_dump_template_hdr *tmpl_hdr;
+       struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
        struct device *dev = &adapter->pdev->dev;
        u32 dma_no, dma_base_addr, temp_addr;
        int i, ret, dma_sts;
@@ -596,7 +710,7 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
        struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
        u32 temp, dma_base_addr, size = 0, read_size = 0;
        struct qlcnic_pex_dma_descriptor *dma_descr;
-       struct qlcnic_dump_template_hdr *tmpl_hdr;
+       struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
        struct device *dev = &adapter->pdev->dev;
        dma_addr_t dma_phys_addr;
        void *dma_buffer;
@@ -938,8 +1052,8 @@ static int
 qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
                                       struct qlcnic_cmd_args *cmd)
 {
-       struct qlcnic_dump_template_hdr tmp_hdr;
-       u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
+       struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+       u32 size = sizeof(tmp_hdr) / sizeof(u32);
        int ret = 0;
 
        if (qlcnic_82xx_check(adapter))
@@ -1027,17 +1141,19 @@ free_mem:
        return err;
 }
 
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
 {
-       int err;
-       u32 temp_size = 0;
-       u32 version, csum, *tmp_buf;
        struct qlcnic_hardware_context *ahw;
-       struct qlcnic_dump_template_hdr *tmpl_hdr;
+       struct qlcnic_fw_dump *fw_dump;
+       u32 version, csum, *tmp_buf;
        u8 use_flash_temp = 0;
+       u32 temp_size = 0;
+       int err;
 
        ahw = adapter->ahw;
-
+       fw_dump = &ahw->fw_dump;
        err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
                                               &use_flash_temp);
        if (err) {
@@ -1046,11 +1162,11 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
                return -EIO;
        }
 
-       ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
-       if (!ahw->fw_dump.tmpl_hdr)
+       fw_dump->tmpl_hdr = vzalloc(temp_size);
+       if (!fw_dump->tmpl_hdr)
                return -ENOMEM;
 
-       tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
+       tmp_buf = (u32 *)fw_dump->tmpl_hdr;
        if (use_flash_temp)
                goto flash_temp;
 
@@ -1065,8 +1181,8 @@ flash_temp:
                        dev_err(&adapter->pdev->dev,
                                "Failed to get minidump template header %d\n",
                                err);
-                       vfree(ahw->fw_dump.tmpl_hdr);
-                       ahw->fw_dump.tmpl_hdr = NULL;
+                       vfree(fw_dump->tmpl_hdr);
+                       fw_dump->tmpl_hdr = NULL;
                        return -EIO;
                }
        }
@@ -1076,21 +1192,22 @@ flash_temp:
        if (csum) {
                dev_err(&adapter->pdev->dev,
                        "Template header checksum validation failed\n");
-               vfree(ahw->fw_dump.tmpl_hdr);
-               ahw->fw_dump.tmpl_hdr = NULL;
+               vfree(fw_dump->tmpl_hdr);
+               fw_dump->tmpl_hdr = NULL;
                return -EIO;
        }
 
-       tmpl_hdr = ahw->fw_dump.tmpl_hdr;
-       tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+       qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
        dev_info(&adapter->pdev->dev,
                 "Default minidump capture mask 0x%x\n",
-                tmpl_hdr->cap_mask);
+                fw_dump->cap_mask);
 
-       if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
-               ahw->fw_dump.use_pex_dma = true;
+       if (qlcnic_83xx_check(adapter) &&
+           (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
+               fw_dump->use_pex_dma = true;
        else
-               ahw->fw_dump.use_pex_dma = false;
+               fw_dump->use_pex_dma = false;
 
        qlcnic_enable_fw_dump_state(adapter);
 
@@ -1099,21 +1216,22 @@ flash_temp:
 
 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
 {
-       __le32 *buffer;
-       u32 ocm_window;
-       char mesg[64];
-       char *msg[] = {mesg, NULL};
-       int i, k, ops_cnt, ops_index, dump_size = 0;
-       u32 entry_offset, dump, no_entries, buf_offset = 0;
-       struct qlcnic_dump_entry *entry;
        struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
-       struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
        static const struct qlcnic_dump_operations *fw_dump_ops;
+       struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+       u32 entry_offset, dump, no_entries, buf_offset = 0;
+       int i, k, ops_cnt, ops_index, dump_size = 0;
        struct device *dev = &adapter->pdev->dev;
        struct qlcnic_hardware_context *ahw;
-       void *temp_buffer;
+       struct qlcnic_dump_entry *entry;
+       void *temp_buffer, *tmpl_hdr;
+       u32 ocm_window;
+       __le32 *buffer;
+       char mesg[64];
+       char *msg[] = {mesg, NULL};
 
        ahw = adapter->ahw;
+       tmpl_hdr = fw_dump->tmpl_hdr;
 
        /* Return if we don't have firmware dump template header */
        if (!tmpl_hdr)
@@ -1133,8 +1251,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
        netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
        /* Calculate the size for dump data area only */
        for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
-               if (i & tmpl_hdr->drv_cap_mask)
-                       dump_size += tmpl_hdr->cap_sizes[k];
+               if (i & fw_dump->cap_mask)
+                       dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
        if (!dump_size)
                return -EIO;
 
@@ -1144,10 +1263,10 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
 
        buffer = fw_dump->data;
        fw_dump->size = dump_size;
-       no_entries = tmpl_hdr->num_entries;
-       entry_offset = tmpl_hdr->offset;
-       tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
-       tmpl_hdr->sys_info[1] = adapter->fw_version;
+       no_entries = fw_dump->num_entries;
+       entry_offset = fw_dump->offset;
+       qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+       qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
 
        if (fw_dump->use_pex_dma) {
                temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
@@ -1163,16 +1282,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
                ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
                fw_dump_ops = qlcnic_fw_dump_ops;
        } else {
+               hdr_83xx = tmpl_hdr;
                ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
                fw_dump_ops = qlcnic_83xx_fw_dump_ops;
-               ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
-               tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
-               tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+               ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+               hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+               hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
        }
 
        for (i = 0; i < no_entries; i++) {
-               entry = (void *)tmpl_hdr + entry_offset;
-               if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+               entry = tmpl_hdr + entry_offset;
+               if (!(entry->hdr.mask & fw_dump->cap_mask)) {
                        entry->hdr.flags |= QLCNIC_DUMP_SKIP;
                        entry_offset += entry->hdr.offset;
                        continue;
@@ -1209,8 +1329,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
 
        fw_dump->clr = 1;
        snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
-       dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
-                adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+       netdev_info(adapter->netdev,
+                   "Dump data %d bytes captured, template header size %d bytes\n",
+                   fw_dump->size, fw_dump->tmpl_hdr_size);
        /* Send a udev event to notify availability of FW dump */
        kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
 
index e5277a632671a1ca23877fb5ca46ab1397ca3cc2..14f748cbf0deeb8cd4c6ccfa3039d08764ee1142 100644 (file)
@@ -15,6 +15,7 @@
 #define QLC_MAC_OPCODE_MASK    0x7
 #define QLC_VF_FLOOD_BIT       BIT_16
 #define QLC_FLOOD_MODE         0x5
+#define QLC_SRIOV_ALLOW_VLAN0  BIT_19
 
 static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
 
@@ -335,8 +336,11 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
                return err;
 
        cmd.req.arg[1] = 0x4;
-       if (enable)
+       if (enable) {
                cmd.req.arg[1] |= BIT_16;
+               if (qlcnic_84xx_check(adapter))
+                       cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+       }
 
        err = qlcnic_issue_cmd(adapter, &cmd);
        if (err)
index 3d64113a35af60428a0abfc0d02a0f8bd0f7a918..448d156c3d0804da56c2633113d3a7ef04b8e683 100644 (file)
@@ -350,33 +350,15 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
        return size;
 }
 
-static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_hardware_context *ahw = adapter->ahw;
-       u32 count = 0;
-
-       if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
-               return ahw->total_nic_func;
-
-       if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
-               count = QLC_DEFAULT_VNIC_COUNT;
-       else
-               count = ahw->max_vnic_func;
-
-       return count;
-}
-
 int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
 {
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        int i;
 
-       for (i = 0; i < pci_func_count; i++) {
+       for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
                if (adapter->npars[i].pci_func == pci_func)
                        return i;
        }
-
-       return -1;
+       return -EINVAL;
 }
 
 static int validate_pm_config(struct qlcnic_adapter *adapter,
@@ -464,23 +446,21 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        struct qlcnic_pm_func_cfg *pm_cfg;
-       int i, pm_cfg_size;
        u8 pci_func;
+       u32 count;
+       int i;
 
-       pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
-       if (size != pm_cfg_size)
-               return QL_STATUS_INVALID_PARAM;
-
-       memset(buf, 0, pm_cfg_size);
+       memset(buf, 0, size);
        pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
-       for (i = 0; i < pci_func_count; i++) {
+       count = size / sizeof(struct qlcnic_pm_func_cfg);
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
                pci_func = adapter->npars[i].pci_func;
-               if (!adapter->npars[i].active)
+               if (pci_func >= count) {
+                       dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+                               __func__, adapter->ahw->total_nic_func, count);
                        continue;
-
+               }
                if (!adapter->npars[i].eswitch_status)
                        continue;
 
@@ -494,7 +474,6 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
 static int validate_esw_config(struct qlcnic_adapter *adapter,
                               struct qlcnic_esw_func_cfg *esw_cfg, int count)
 {
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        int i, ret;
        u32 op_mode;
@@ -507,7 +486,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
 
        for (i = 0; i < count; i++) {
                pci_func = esw_cfg[i].pci_func;
-               if (pci_func >= pci_func_count)
+               if (pci_func >= ahw->max_vnic_func)
                        return QL_STATUS_INVALID_PARAM;
 
                if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -642,23 +621,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        struct qlcnic_esw_func_cfg *esw_cfg;
-       size_t esw_cfg_size;
-       u8 i, pci_func;
-
-       esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
-       if (size != esw_cfg_size)
-               return QL_STATUS_INVALID_PARAM;
+       u8 pci_func;
+       u32 count;
+       int i;
 
-       memset(buf, 0, esw_cfg_size);
+       memset(buf, 0, size);
        esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
-
-       for (i = 0; i < pci_func_count; i++) {
+       count = size / sizeof(struct qlcnic_esw_func_cfg);
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
                pci_func = adapter->npars[i].pci_func;
-               if (!adapter->npars[i].active)
+               if (pci_func >= count) {
+                       dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+                               __func__, adapter->ahw->total_nic_func, count);
                        continue;
-
+               }
                if (!adapter->npars[i].eswitch_status)
                        continue;
 
@@ -741,23 +718,24 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        struct qlcnic_npar_func_cfg *np_cfg;
        struct qlcnic_info nic_info;
-       size_t np_cfg_size;
        int i, ret;
-
-       np_cfg_size = pci_func_count * sizeof(*np_cfg);
-       if (size != np_cfg_size)
-               return QL_STATUS_INVALID_PARAM;
+       u32 count;
 
        memset(&nic_info, 0, sizeof(struct qlcnic_info));
-       memset(buf, 0, np_cfg_size);
+       memset(buf, 0, size);
        np_cfg = (struct qlcnic_npar_func_cfg *)buf;
 
-       for (i = 0; i < pci_func_count; i++) {
+       count = size / sizeof(struct qlcnic_npar_func_cfg);
+       for (i = 0; i < adapter->ahw->total_nic_func; i++) {
                if (qlcnic_is_valid_nic_func(adapter, i) < 0)
                        continue;
+               if (adapter->npars[i].pci_func >= count) {
+                       dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+                               __func__, adapter->ahw->total_nic_func, count);
+                       continue;
+               }
                ret = qlcnic_get_nic_info(adapter, &nic_info, i);
                if (ret)
                        return ret;
@@ -783,7 +761,6 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        struct qlcnic_esw_statistics port_stats;
        int ret;
 
@@ -793,7 +770,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
        if (size != sizeof(struct qlcnic_esw_statistics))
                return QL_STATUS_INVALID_PARAM;
 
-       if (offset >= pci_func_count)
+       if (offset >= adapter->ahw->max_vnic_func)
                return QL_STATUS_INVALID_PARAM;
 
        memset(&port_stats, 0, size);
@@ -884,13 +861,12 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
 
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        int ret;
 
        if (qlcnic_83xx_check(adapter))
                return QLC_STATUS_UNSUPPORTED_CMD;
 
-       if (offset >= pci_func_count)
+       if (offset >= adapter->ahw->max_vnic_func)
                return QL_STATUS_INVALID_PARAM;
 
        ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -914,17 +890,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-       u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
        struct qlcnic_pci_func_cfg *pci_cfg;
        struct qlcnic_pci_info *pci_info;
-       size_t pci_cfg_sz;
        int i, ret;
+       u32 count;
 
-       pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
-       if (size != pci_cfg_sz)
-               return QL_STATUS_INVALID_PARAM;
-
-       pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
+       pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
        if (!pci_info)
                return -ENOMEM;
 
@@ -935,7 +906,8 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
        }
 
        pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
-       for (i = 0; i < pci_func_count; i++) {
+       count = size / sizeof(struct qlcnic_pci_func_cfg);
+       for (i = 0; i < count; i++) {
                pci_cfg[i].pci_func = pci_info[i].id;
                pci_cfg[i].func_type = pci_info[i].type;
                pci_cfg[i].func_state = 0;
index 656c65ddadb4af03ff032f8f2310ebb91a7f0189..0a1d76acab8171929e3c6f9ad6139b48484ebbcc 100644 (file)
@@ -2556,11 +2556,10 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
 
        if (skb_is_gso(skb)) {
                int err;
-               if (skb_header_cloned(skb)) {
-                       err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-                       if (err)
-                               return err;
-               }
+
+               err = skb_cow_head(skb, 0);
+               if (err < 0)
+                       return err;
 
                mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
                mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
@@ -3331,24 +3330,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
                for (i = 0; i < qdev->intr_count; i++)
                        qdev->msi_x_entry[i].entry = i;
 
-               /* Loop to get our vectors.  We start with
-                * what we want and settle for what we get.
-                */
-               do {
-                       err = pci_enable_msix(qdev->pdev,
-                               qdev->msi_x_entry, qdev->intr_count);
-                       if (err > 0)
-                               qdev->intr_count = err;
-               } while (err > 0);
-
+               err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+                                           1, qdev->intr_count);
                if (err < 0) {
                        kfree(qdev->msi_x_entry);
                        qdev->msi_x_entry = NULL;
                        netif_warn(qdev, ifup, qdev->ndev,
                                   "MSI-X Enable failed, trying MSI.\n");
-                       qdev->intr_count = 1;
                        qlge_irq_type = MSI_IRQ;
-               } else if (err == 0) {
+               } else {
+                       qdev->intr_count = err;
                        set_bit(QL_MSIX_ENABLED, &qdev->flags);
                        netif_info(qdev, ifup, qdev->ndev,
                                   "MSI-X Enabled, got %d vectors.\n",
index 819b74cefd64653d4bad2dfe18971dfd64744f59..cd045ecb9816d90e32cb086d5ef36f5a67749548 100644 (file)
@@ -270,11 +270,6 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
        return r6040_phy_write(ioaddr, phy_addr, reg, value);
 }
 
-static int r6040_mdiobus_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static void r6040_free_txbufs(struct net_device *dev)
 {
        struct r6040_private *lp = netdev_priv(dev);
@@ -1191,7 +1186,6 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        lp->mii_bus->priv = dev;
        lp->mii_bus->read = r6040_mdiobus_read;
        lp->mii_bus->write = r6040_mdiobus_write;
-       lp->mii_bus->reset = r6040_mdiobus_reset;
        lp->mii_bus->name = "r6040_eth_mii";
        snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                dev_name(&pdev->dev), card_idx);
index 737c1a881f781917061d8b39ad43762f62ee7a6e..2bc728e65e245424e34f69797d841f5b60af4d5c 100644 (file)
@@ -476,7 +476,7 @@ rx_status_loop:
        rx = 0;
        cpw16(IntrStatus, cp_rx_intr_mask);
 
-       while (1) {
+       while (rx < budget) {
                u32 status, len;
                dma_addr_t mapping, new_mapping;
                struct sk_buff *skb, *new_skb;
@@ -554,9 +554,6 @@ rx_next:
                else
                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
                rx_tail = NEXT_RX(rx_tail);
-
-               if (rx >= budget)
-                       break;
        }
 
        cp->rx_tail = rx_tail;
@@ -899,7 +896,7 @@ out_unlock:
 
        return NETDEV_TX_OK;
 out_dma_error:
-       kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        cp->dev->stats.tx_dropped++;
        goto out_unlock;
 }
index da5972eefdd2bfc5d702fd553cf68b91c5485fb5..2e5df148af4c7d6fa36c33f7543ef5c758db9368 100644 (file)
@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
                if (len < ETH_ZLEN)
                        memset(tp->tx_buf[entry], 0, ETH_ZLEN);
                skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
        } else {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
@@ -2522,16 +2522,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        netdev_stats_to_stats64(stats, &dev->stats);
 
        do {
-               start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
                stats->rx_packets = tp->rx_stats.packets;
                stats->rx_bytes = tp->rx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
 
        do {
-               start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
                stats->tx_packets = tp->tx_stats.packets;
                stats->tx_bytes = tp->tx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
 
        return stats;
 }
index 3ff7bc3e7a23ba419c9957917a9471a939db54f2..aa1c079f231dc6f2cfc017cd4950f9ebb8e1e9da 100644 (file)
@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
                                             tp->TxDescArray + entry);
                        if (skb) {
                                tp->dev->stats.tx_dropped++;
-                               dev_kfree_skb(skb);
+                               dev_kfree_skb_any(skb);
                                tx_skb->skb = NULL;
                        }
                }
@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 err_dma_1:
        rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
 err_dma_0:
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
 err_update_stats:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
                        tp->tx_stats.packets++;
                        tp->tx_stats.bytes += tx_skb->skb->len;
                        u64_stats_update_end(&tp->tx_stats.syncp);
-                       dev_kfree_skb(tx_skb->skb);
+                       dev_kfree_skb_any(tx_skb->skb);
                        tx_skb->skb = NULL;
                }
                dirty_tx++;
@@ -6590,17 +6590,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                rtl8169_rx_missed(dev, ioaddr);
 
        do {
-               start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
                stats->rx_packets = tp->rx_stats.packets;
                stats->rx_bytes = tp->rx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
 
 
        do {
-               start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
                stats->tx_packets = tp->tx_stats.packets;
                stats->tx_bytes = tp->tx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
 
        stats->rx_dropped       = dev->stats.rx_dropped;
        stats->tx_dropped       = dev->stats.tx_dropped;
index 040cb94e8219cf8438dd2017164fca8387be45db..6a9509ccd33b29dd84ddcd0b48269f1c24b8da63 100644 (file)
@@ -1,8 +1,9 @@
 /*  SuperH Ethernet device driver
  *
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
- *  Copyright (C) 2008-2013 Renesas Solutions Corp.
- *  Copyright (C) 2013 Cogent Embedded, Inc.
+ *  Copyright (C) 2008-2014 Renesas Solutions Corp.
+ *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
+ *  Copyright (C) 2014 Codethink Limited
  *
  *  This program is free software; you can redistribute it and/or modify it
  *  under the terms and conditions of the GNU General Public License,
 #include <linux/platform_device.h>
 #include <linux/mdio-bitbang.h>
 #include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
 #include <linux/phy.h>
 #include <linux/cache.h>
 #include <linux/io.h>
@@ -36,6 +41,7 @@
 #include <linux/if_vlan.h>
 #include <linux/clk.h>
 #include <linux/sh_eth.h>
+#include <linux/of_mdio.h>
 
 #include "sh_eth.h"
 
@@ -394,7 +400,8 @@ static void sh_eth_select_mii(struct net_device *ndev)
                value = 0x0;
                break;
        default:
-               pr_warn("PHY interface mode was not setup. Set to MII.\n");
+               netdev_warn(ndev,
+                           "PHY interface mode was not setup. Set to MII.\n");
                value = 0x1;
                break;
        }
@@ -848,7 +855,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
                cnt--;
        }
        if (cnt <= 0) {
-               pr_err("Device reset failed\n");
+               netdev_err(ndev, "Device reset failed\n");
                ret = -ETIMEDOUT;
        }
        return ret;
@@ -866,7 +873,7 @@ static int sh_eth_reset(struct net_device *ndev)
 
                ret = sh_eth_check_reset(ndev);
                if (ret)
-                       goto out;
+                       return ret;
 
                /* Table Init */
                sh_eth_write(ndev, 0x0, TDLAR);
@@ -893,7 +900,6 @@ static int sh_eth_reset(struct net_device *ndev)
                             EDMR);
        }
 
-out:
        return ret;
 }
 
@@ -1257,7 +1263,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        /* Soft Reset */
        ret = sh_eth_reset(ndev);
        if (ret)
-               goto out;
+               return ret;
 
        if (mdp->cd->rmiimode)
                sh_eth_write(ndev, 0x1, RMIIMODE);
@@ -1336,7 +1342,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
                netif_start_queue(ndev);
        }
 
-out:
        return ret;
 }
 
@@ -1550,8 +1555,7 @@ ignore_link:
                /* Unused write back interrupt */
                if (intr_status & EESR_TABT) {  /* Transmit Abort int */
                        ndev->stats.tx_aborted_errors++;
-                       if (netif_msg_tx_err(mdp))
-                               dev_err(&ndev->dev, "Transmit Abort\n");
+                       netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
                }
        }
 
@@ -1560,45 +1564,38 @@ ignore_link:
                if (intr_status & EESR_RFRMER) {
                        /* Receive Frame Overflow int */
                        ndev->stats.rx_frame_errors++;
-                       if (netif_msg_rx_err(mdp))
-                               dev_err(&ndev->dev, "Receive Abort\n");
+                       netif_err(mdp, rx_err, ndev, "Receive Abort\n");
                }
        }
 
        if (intr_status & EESR_TDE) {
                /* Transmit Descriptor Empty int */
                ndev->stats.tx_fifo_errors++;
-               if (netif_msg_tx_err(mdp))
-                       dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+               netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
        }
 
        if (intr_status & EESR_TFE) {
                /* FIFO under flow */
                ndev->stats.tx_fifo_errors++;
-               if (netif_msg_tx_err(mdp))
-                       dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
+               netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
        }
 
        if (intr_status & EESR_RDE) {
                /* Receive Descriptor Empty int */
                ndev->stats.rx_over_errors++;
-
-               if (netif_msg_rx_err(mdp))
-                       dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+               netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
        }
 
        if (intr_status & EESR_RFE) {
                /* Receive FIFO Overflow int */
                ndev->stats.rx_fifo_errors++;
-               if (netif_msg_rx_err(mdp))
-                       dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+               netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
        }
 
        if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
                /* Address Error */
                ndev->stats.tx_fifo_errors++;
-               if (netif_msg_tx_err(mdp))
-                       dev_err(&ndev->dev, "Address Error\n");
+               netif_err(mdp, tx_err, ndev, "Address Error\n");
        }
 
        mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1609,9 +1606,9 @@ ignore_link:
                u32 edtrr = sh_eth_read(ndev, EDTRR);
 
                /* dmesg */
-               dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
-                       intr_status, mdp->cur_tx, mdp->dirty_tx,
-                       (u32)ndev->state, edtrr);
+               netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
+                          intr_status, mdp->cur_tx, mdp->dirty_tx,
+                          (u32)ndev->state, edtrr);
                /* dirty buffer free */
                sh_eth_txfree(ndev);
 
@@ -1656,9 +1653,9 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                                     EESIPR);
                        __napi_schedule(&mdp->napi);
                } else {
-                       dev_warn(&ndev->dev,
-                                "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
-                                intr_status, intr_enable);
+                       netdev_warn(ndev,
+                                   "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
+                                   intr_status, intr_enable);
                }
        }
 
@@ -1757,27 +1754,42 @@ static void sh_eth_adjust_link(struct net_device *ndev)
 /* PHY init function */
 static int sh_eth_phy_init(struct net_device *ndev)
 {
+       struct device_node *np = ndev->dev.parent->of_node;
        struct sh_eth_private *mdp = netdev_priv(ndev);
-       char phy_id[MII_BUS_ID_SIZE + 3];
        struct phy_device *phydev = NULL;
 
-       snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
-                mdp->mii_bus->id, mdp->phy_id);
-
        mdp->link = 0;
        mdp->speed = 0;
        mdp->duplex = -1;
 
        /* Try connect to PHY */
-       phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
-                            mdp->phy_interface);
+       if (np) {
+               struct device_node *pn;
+
+               pn = of_parse_phandle(np, "phy-handle", 0);
+               phydev = of_phy_connect(ndev, pn,
+                                       sh_eth_adjust_link, 0,
+                                       mdp->phy_interface);
+
+               if (!phydev)
+                       phydev = ERR_PTR(-ENOENT);
+       } else {
+               char phy_id[MII_BUS_ID_SIZE + 3];
+
+               snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+                        mdp->mii_bus->id, mdp->phy_id);
+
+               phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
+                                    mdp->phy_interface);
+       }
+
        if (IS_ERR(phydev)) {
-               dev_err(&ndev->dev, "phy_connect failed\n");
+               netdev_err(ndev, "failed to connect PHY\n");
                return PTR_ERR(phydev);
        }
 
-       dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n",
-                phydev->addr, phydev->irq, phydev->drv->name);
+       netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
+                   phydev->addr, phydev->irq, phydev->drv->name);
 
        mdp->phydev = phydev;
 
@@ -1958,12 +1970,12 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
 
        ret = sh_eth_ring_init(ndev);
        if (ret < 0) {
-               dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
+               netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
                return ret;
        }
        ret = sh_eth_dev_init(ndev, false);
        if (ret < 0) {
-               dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
+               netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
                return ret;
        }
 
@@ -2004,7 +2016,7 @@ static int sh_eth_open(struct net_device *ndev)
        ret = request_irq(ndev->irq, sh_eth_interrupt,
                          mdp->cd->irq_flags, ndev->name, ndev);
        if (ret) {
-               dev_err(&ndev->dev, "Can not assign IRQ number\n");
+               netdev_err(ndev, "Can not assign IRQ number\n");
                goto out_napi_off;
        }
 
@@ -2042,10 +2054,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
 
        netif_stop_queue(ndev);
 
-       if (netif_msg_timer(mdp)) {
-               dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n",
-                       ndev->name, (int)sh_eth_read(ndev, EESR));
-       }
+       netif_err(mdp, timer, ndev,
+                 "transmit timed out, status %8.8x, resetting...\n",
+                 (int)sh_eth_read(ndev, EESR));
 
        /* tx_errors count up */
        ndev->stats.tx_errors++;
@@ -2080,8 +2091,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        spin_lock_irqsave(&mdp->lock, flags);
        if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
                if (!sh_eth_txfree(ndev)) {
-                       if (netif_msg_tx_queued(mdp))
-                               dev_warn(&ndev->dev, "TxFD exhausted.\n");
+                       netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
                        netif_stop_queue(ndev);
                        spin_unlock_irqrestore(&mdp->lock, flags);
                        return NETDEV_TX_BUSY;
@@ -2098,8 +2108,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                 skb->len + 2);
        txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
                                      DMA_TO_DEVICE);
-       if (skb->len < ETHERSMALL)
-               txdesc->buffer_length = ETHERSMALL;
+       if (skb->len < ETH_ZLEN)
+               txdesc->buffer_length = ETH_ZLEN;
        else
                txdesc->buffer_length = skb->len;
 
@@ -2251,7 +2261,7 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
                udelay(10);
                timeout--;
                if (timeout <= 0) {
-                       dev_err(&ndev->dev, "%s: timeout\n", __func__);
+                       netdev_err(ndev, "%s: timeout\n", __func__);
                        return -ETIMEDOUT;
                }
        }
@@ -2571,37 +2581,30 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 }
 
 /* MDIO bus release function */
-static int sh_mdio_release(struct net_device *ndev)
+static int sh_mdio_release(struct sh_eth_private *mdp)
 {
-       struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
-
        /* unregister mdio bus */
-       mdiobus_unregister(bus);
-
-       /* remove mdio bus info from net_device */
-       dev_set_drvdata(&ndev->dev, NULL);
+       mdiobus_unregister(mdp->mii_bus);
 
        /* free bitbang info */
-       free_mdio_bitbang(bus);
+       free_mdio_bitbang(mdp->mii_bus);
 
        return 0;
 }
 
 /* MDIO bus init function */
-static int sh_mdio_init(struct net_device *ndev, int id,
+static int sh_mdio_init(struct sh_eth_private *mdp,
                        struct sh_eth_plat_data *pd)
 {
        int ret, i;
        struct bb_info *bitbang;
-       struct sh_eth_private *mdp = netdev_priv(ndev);
+       struct platform_device *pdev = mdp->pdev;
+       struct device *dev = &mdp->pdev->dev;
 
        /* create bit control struct for PHY */
-       bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
-                              GFP_KERNEL);
-       if (!bitbang) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
+       if (!bitbang)
+               return -ENOMEM;
 
        /* bitbang init */
        bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
@@ -2614,44 +2617,42 @@ static int sh_mdio_init(struct net_device *ndev, int id,
 
        /* MII controller setting */
        mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
-       if (!mdp->mii_bus) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (!mdp->mii_bus)
+               return -ENOMEM;
 
        /* Hook up MII support for ethtool */
        mdp->mii_bus->name = "sh_mii";
-       mdp->mii_bus->parent = &ndev->dev;
+       mdp->mii_bus->parent = dev;
        snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-                mdp->pdev->name, id);
+                pdev->name, pdev->id);
 
        /* PHY IRQ */
-       mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
-                                        sizeof(int) * PHY_MAX_ADDR,
+       mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
                                         GFP_KERNEL);
        if (!mdp->mii_bus->irq) {
                ret = -ENOMEM;
                goto out_free_bus;
        }
 
-       for (i = 0; i < PHY_MAX_ADDR; i++)
-               mdp->mii_bus->irq[i] = PHY_POLL;
-       if (pd->phy_irq > 0)
-               mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+       /* register MDIO bus */
+       if (dev->of_node) {
+               ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
+       } else {
+               for (i = 0; i < PHY_MAX_ADDR; i++)
+                       mdp->mii_bus->irq[i] = PHY_POLL;
+               if (pd->phy_irq > 0)
+                       mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
+
+               ret = mdiobus_register(mdp->mii_bus);
+       }
 
-       /* register mdio bus */
-       ret = mdiobus_register(mdp->mii_bus);
        if (ret)
                goto out_free_bus;
 
-       dev_set_drvdata(&ndev->dev, mdp->mii_bus);
-
        return 0;
 
 out_free_bus:
        free_mdio_bitbang(mdp->mii_bus);
-
-out:
        return ret;
 }
 
@@ -2676,7 +2677,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
                reg_offset = sh_eth_offset_fast_sh3_sh2;
                break;
        default:
-               pr_err("Unknown register type (%d)\n", register_type);
                break;
        }
 
@@ -2710,6 +2710,48 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
        .ndo_change_mtu         = eth_change_mtu,
 };
 
+#ifdef CONFIG_OF
+static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       struct sh_eth_plat_data *pdata;
+       const char *mac_addr;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       pdata->phy_interface = of_get_phy_mode(np);
+
+       mac_addr = of_get_mac_address(np);
+       if (mac_addr)
+               memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+
+       pdata->no_ether_link =
+               of_property_read_bool(np, "renesas,no-ether-link");
+       pdata->ether_link_active_low =
+               of_property_read_bool(np, "renesas,ether-link-active-low");
+
+       return pdata;
+}
+
+static const struct of_device_id sh_eth_match_table[] = {
+       { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+       { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
+       { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
+       { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
+       { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+       { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+       { }
+};
+MODULE_DEVICE_TABLE(of, sh_eth_match_table);
+#else
+static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+       return NULL;
+}
+#endif
+
 static int sh_eth_drv_probe(struct platform_device *pdev)
 {
        int ret, devno = 0;
@@ -2723,15 +2765,15 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (unlikely(res == NULL)) {
                dev_err(&pdev->dev, "invalid resource\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        ndev = alloc_etherdev(sizeof(struct sh_eth_private));
-       if (!ndev) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (!ndev)
+               return -ENOMEM;
+
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
 
        /* The sh Ether-specific entries in the device structure. */
        ndev->base_addr = res->start;
@@ -2760,9 +2802,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
 
        spin_lock_init(&mdp->lock);
        mdp->pdev = pdev;
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_resume(&pdev->dev);
 
+       if (pdev->dev.of_node)
+               pd = sh_eth_parse_dt(&pdev->dev);
        if (!pd) {
                dev_err(&pdev->dev, "no platform data\n");
                ret = -EINVAL;
@@ -2778,8 +2820,22 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        mdp->ether_link_active_low = pd->ether_link_active_low;
 
        /* set cpu data */
-       mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+       if (id) {
+               mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+       } else  {
+               const struct of_device_id *match;
+
+               match = of_match_device(of_match_ptr(sh_eth_match_table),
+                                       &pdev->dev);
+               mdp->cd = (struct sh_eth_cpu_data *)match->data;
+       }
        mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
+       if (!mdp->reg_offset) {
+               dev_err(&pdev->dev, "Unknown register type (%d)\n",
+                       mdp->cd->register_type);
+               ret = -EINVAL;
+               goto out_release;
+       }
        sh_eth_set_default_cpu_data(mdp->cd);
 
        /* set function */
@@ -2825,6 +2881,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                }
        }
 
+       /* MDIO bus init */
+       ret = sh_mdio_init(mdp, pd);
+       if (ret) {
+               dev_err(&ndev->dev, "failed to initialise MDIO\n");
+               goto out_release;
+       }
+
        netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
 
        /* network device register */
@@ -2832,31 +2895,26 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        if (ret)
                goto out_napi_del;
 
-       /* mdio bus init */
-       ret = sh_mdio_init(ndev, pdev->id, pd);
-       if (ret)
-               goto out_unregister;
-
        /* print device information */
-       pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
-               (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
+       netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
+                   (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
 
+       pm_runtime_put(&pdev->dev);
        platform_set_drvdata(pdev, ndev);
 
        return ret;
 
-out_unregister:
-       unregister_netdev(ndev);
-
 out_napi_del:
        netif_napi_del(&mdp->napi);
+       sh_mdio_release(mdp);
 
 out_release:
        /* net_dev free */
        if (ndev)
                free_netdev(ndev);
 
-out:
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
        return ret;
 }
 
@@ -2865,9 +2923,9 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
-       sh_mdio_release(ndev);
        unregister_netdev(ndev);
        netif_napi_del(&mdp->napi);
+       sh_mdio_release(mdp);
        pm_runtime_disable(&pdev->dev);
        free_netdev(ndev);
 
@@ -2920,6 +2978,7 @@ static struct platform_driver sh_eth_driver = {
        .driver = {
                   .name = CARDNAME,
                   .pm = SH_ETH_PM_OPS,
+                  .of_match_table = of_match_ptr(sh_eth_match_table),
        },
 };
 
index 6075915b88ece2f42e59a70ab160cd5bd7f6a423..d55e37cd5fec04abcd6bc4e52827351e74d5fbdb 100644 (file)
@@ -27,8 +27,7 @@
 #define RX_RING_MIN    64
 #define TX_RING_MAX    1024
 #define RX_RING_MAX    1024
-#define ETHERSMALL             60
-#define PKT_BUF_SZ             1538
+#define PKT_BUF_SZ     1538
 #define SH_ETH_TSU_TIMEOUT_MS  500
 #define SH_ETH_TSU_CAM_ENTRIES 32
 
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
new file mode 100644 (file)
index 0000000..7902341
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# Samsung Ethernet device configuration
+#
+
+config NET_VENDOR_SAMSUNG
+       bool "Samsung Ethernet device"
+       default y
+       ---help---
+         This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
+         platforms.
+
+if NET_VENDOR_SAMSUNG
+
+source "drivers/net/ethernet/samsung/sxgbe/Kconfig"
+
+endif # NET_VENDOR_SAMSUNG
diff --git a/drivers/net/ethernet/samsung/Makefile b/drivers/net/ethernet/samsung/Makefile
new file mode 100644 (file)
index 0000000..1773c29
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Samsung Ethernet device drivers.
+#
+
+obj-$(CONFIG_SXGBE_ETH) += sxgbe/
diff --git a/drivers/net/ethernet/samsung/sxgbe/Kconfig b/drivers/net/ethernet/samsung/sxgbe/Kconfig
new file mode 100644 (file)
index 0000000..d79288c
--- /dev/null
@@ -0,0 +1,9 @@
+config SXGBE_ETH
+       tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver"
+       depends on HAS_IOMEM && HAS_DMA
+       select PHYLIB
+       select CRC32
+       select PTP_1588_CLOCK
+       ---help---
+         This is the driver for the SXGBE 10G Ethernet IP block found on Samsung
+         platforms.
diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile
new file mode 100644 (file)
index 0000000..dcc80b9
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
+samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
+               sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o  sxgbe_mdio.o \
+               sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
new file mode 100644 (file)
index 0000000..6203c7d
--- /dev/null
@@ -0,0 +1,535 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SXGBE_COMMON_H__
+#define __SXGBE_COMMON_H__
+
+/* forward references */
+struct sxgbe_desc_ops;
+struct sxgbe_dma_ops;
+struct sxgbe_mtl_ops;
+
+#define SXGBE_RESOURCE_NAME    "sam_sxgbeeth"
+#define DRV_MODULE_VERSION     "November_2013"
+
+/* MAX HW feature words */
+#define SXGBE_HW_WORDS 3
+
+#define SXGBE_RX_COE_NONE      0
+
+/* CSR Frequency Access Defines*/
+#define SXGBE_CSR_F_150M       150000000
+#define SXGBE_CSR_F_250M       250000000
+#define SXGBE_CSR_F_300M       300000000
+#define SXGBE_CSR_F_350M       350000000
+#define SXGBE_CSR_F_400M       400000000
+#define SXGBE_CSR_F_500M       500000000
+
+/* pause time */
+#define SXGBE_PAUSE_TIME 0x200
+
+/* tx queues */
+#define SXGBE_TX_QUEUES   8
+#define SXGBE_RX_QUEUES   16
+
+/* Calculated based how much time does it take to fill 256KB Rx memory
+ * at 10Gb speed at 156MHz clock rate and considered little less then
+ * the actual value.
+ */
+#define SXGBE_MAX_DMA_RIWT     0x70
+#define SXGBE_MIN_DMA_RIWT     0x01
+
+/* Tx coalesce parameters */
+#define SXGBE_COAL_TX_TIMER    40000
+#define SXGBE_MAX_COAL_TX_TICK 100000
+#define SXGBE_TX_MAX_FRAMES    512
+#define SXGBE_TX_FRAMES        128
+
+/* SXGBE TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+#define SXGBE_DEFAULT_LIT_LS   0x3E8
+#define SXGBE_DEFAULT_TWT_LS   0x0
+
+/* Flow Control defines */
+#define SXGBE_FLOW_OFF         0
+#define SXGBE_FLOW_RX          1
+#define SXGBE_FLOW_TX          2
+#define SXGBE_FLOW_AUTO                (SXGBE_FLOW_TX | SXGBE_FLOW_RX)
+
+#define SF_DMA_MODE 1          /* DMA STORE-AND-FORWARD Operation Mode */
+
+/* errors */
+#define RX_GMII_ERR            0x01
+#define RX_WATCHDOG_ERR                0x02
+#define RX_CRC_ERR             0x03
+#define RX_GAINT_ERR           0x04
+#define RX_IP_HDR_ERR          0x05
+#define RX_PAYLOAD_ERR         0x06
+#define RX_OVERFLOW_ERR                0x07
+
+/* pkt type */
+#define RX_LEN_PKT             0x00
+#define RX_MACCTL_PKT          0x01
+#define RX_DCBCTL_PKT          0x02
+#define RX_ARP_PKT             0x03
+#define RX_OAM_PKT             0x04
+#define RX_UNTAG_PKT           0x05
+#define RX_OTHER_PKT           0x07
+#define RX_SVLAN_PKT           0x08
+#define RX_CVLAN_PKT           0x09
+#define RX_DVLAN_OCVLAN_ICVLAN_PKT             0x0A
+#define RX_DVLAN_OSVLAN_ISVLAN_PKT             0x0B
+#define RX_DVLAN_OSVLAN_ICVLAN_PKT             0x0C
+#define RX_DVLAN_OCVLAN_ISVLAN_PKT             0x0D
+
+#define RX_NOT_IP_PKT          0x00
+#define RX_IPV4_TCP_PKT                0x01
+#define RX_IPV4_UDP_PKT                0x02
+#define RX_IPV4_ICMP_PKT       0x03
+#define RX_IPV4_UNKNOWN_PKT    0x07
+#define RX_IPV6_TCP_PKT                0x09
+#define RX_IPV6_UDP_PKT                0x0A
+#define RX_IPV6_ICMP_PKT       0x0B
+#define RX_IPV6_UNKNOWN_PKT    0x0F
+
+#define RX_NO_PTP              0x00
+#define RX_PTP_SYNC            0x01
+#define RX_PTP_FOLLOW_UP       0x02
+#define RX_PTP_DELAY_REQ       0x03
+#define RX_PTP_DELAY_RESP      0x04
+#define RX_PTP_PDELAY_REQ      0x05
+#define RX_PTP_PDELAY_RESP     0x06
+#define RX_PTP_PDELAY_FOLLOW_UP        0x07
+#define RX_PTP_ANNOUNCE                0x08
+#define RX_PTP_MGMT            0x09
+#define RX_PTP_SIGNAL          0x0A
+#define RX_PTP_RESV_MSG                0x0F
+
+/* EEE-LPI mode  flags*/
+#define TX_ENTRY_LPI_MODE      0x10
+#define TX_EXIT_LPI_MODE       0x20
+#define RX_ENTRY_LPI_MODE      0x40
+#define RX_EXIT_LPI_MODE       0x80
+
+/* EEE-LPI Interrupt status flag */
+#define LPI_INT_STATUS         BIT(5)
+
+/* EEE-LPI Default timer values */
+#define LPI_LINK_STATUS_TIMER  0x3E8
+#define LPI_MAC_WAIT_TIMER     0x00
+
+/* EEE-LPI Control and status definitions */
+#define LPI_CTRL_STATUS_TXA    BIT(19)
+#define LPI_CTRL_STATUS_PLSDIS BIT(18)
+#define LPI_CTRL_STATUS_PLS    BIT(17)
+#define LPI_CTRL_STATUS_LPIEN  BIT(16)
+#define LPI_CTRL_STATUS_TXRSTP BIT(11)
+#define LPI_CTRL_STATUS_RXRSTP BIT(10)
+#define LPI_CTRL_STATUS_RLPIST BIT(9)
+#define LPI_CTRL_STATUS_TLPIST BIT(8)
+#define LPI_CTRL_STATUS_RLPIEX BIT(3)
+#define LPI_CTRL_STATUS_RLPIEN BIT(2)
+#define LPI_CTRL_STATUS_TLPIEX BIT(1)
+#define LPI_CTRL_STATUS_TLPIEN BIT(0)
+
+enum dma_irq_status {
+       tx_hard_error   = BIT(0),
+       tx_bump_tc      = BIT(1),
+       handle_tx       = BIT(2),
+       rx_hard_error   = BIT(3),
+       rx_bump_tc      = BIT(4),
+       handle_rx       = BIT(5),
+};
+
+#define NETIF_F_HW_VLAN_ALL     (NETIF_F_HW_VLAN_CTAG_RX |     \
+                                NETIF_F_HW_VLAN_STAG_RX |      \
+                                NETIF_F_HW_VLAN_CTAG_TX |      \
+                                NETIF_F_HW_VLAN_STAG_TX |      \
+                                NETIF_F_HW_VLAN_CTAG_FILTER |  \
+                                NETIF_F_HW_VLAN_STAG_FILTER)
+
+/* MMC control defines */
+#define SXGBE_MMC_CTRL_CNT_FRZ  0x00000008
+
+/* SXGBE HW ADDR regs */
+#define SXGBE_ADDR_HIGH(reg)    (((reg > 15) ? 0x00000800 : 0x00000040) + \
+                                (reg * 8))
+#define SXGBE_ADDR_LOW(reg)     (((reg > 15) ? 0x00000804 : 0x00000044) + \
+                                (reg * 8))
+#define SXGBE_MAX_PERFECT_ADDRESSES 32 /* Maximum unicast perfect filtering */
+#define SXGBE_FRAME_FILTER       0x00000004      /* Frame Filter */
+
+/* SXGBE Frame Filter defines */
+#define SXGBE_FRAME_FILTER_PR    0x00000001      /* Promiscuous Mode */
+#define SXGBE_FRAME_FILTER_HUC   0x00000002      /* Hash Unicast */
+#define SXGBE_FRAME_FILTER_HMC   0x00000004      /* Hash Multicast */
+#define SXGBE_FRAME_FILTER_DAIF  0x00000008      /* DA Inverse Filtering */
+#define SXGBE_FRAME_FILTER_PM    0x00000010      /* Pass all multicast */
+#define SXGBE_FRAME_FILTER_DBF   0x00000020      /* Disable Broadcast frames */
+#define SXGBE_FRAME_FILTER_SAIF  0x00000100      /* Inverse Filtering */
+#define SXGBE_FRAME_FILTER_SAF   0x00000200      /* Source Address Filter */
+#define SXGBE_FRAME_FILTER_HPF   0x00000400      /* Hash or perfect Filter */
+#define SXGBE_FRAME_FILTER_RA    0x80000000      /* Receive all mode */
+
+#define SXGBE_HASH_TABLE_SIZE    64
+#define SXGBE_HASH_HIGH          0x00000008      /* Multicast Hash Table High */
+#define SXGBE_HASH_LOW           0x0000000c      /* Multicast Hash Table Low */
+
+#define SXGBE_HI_REG_AE          0x80000000
+
+/* Minimum and maximum MTU */
+#define MIN_MTU         68
+#define MAX_MTU         9000
+
+#define SXGBE_FOR_EACH_QUEUE(max_queues, queue_num)                    \
+       for (queue_num = 0; queue_num < max_queues; queue_num++)
+
+#define DRV_VERSION "1.0.0"
+
+#define SXGBE_MAX_RX_CHANNELS  16
+#define SXGBE_MAX_TX_CHANNELS  16
+
+#define START_MAC_REG_OFFSET   0x0000
+#define MAX_MAC_REG_OFFSET     0x0DFC
+#define START_MTL_REG_OFFSET   0x1000
+#define MAX_MTL_REG_OFFSET     0x18FC
+#define START_DMA_REG_OFFSET   0x3000
+#define MAX_DMA_REG_OFFSET     0x38FC
+
+#define REG_SPACE_SIZE         0x2000
+
+/* sxgbe statistics counters */
+struct sxgbe_extra_stats {
+       /* TX/RX IRQ events */
+       unsigned long tx_underflow_irq;
+       unsigned long tx_process_stopped_irq;
+       unsigned long tx_ctxt_desc_err;
+       unsigned long tx_threshold;
+       unsigned long rx_threshold;
+       unsigned long tx_pkt_n;
+       unsigned long rx_pkt_n;
+       unsigned long normal_irq_n;
+       unsigned long tx_normal_irq_n;
+       unsigned long rx_normal_irq_n;
+       unsigned long napi_poll;
+       unsigned long tx_clean;
+       unsigned long tx_reset_ic_bit;
+       unsigned long rx_process_stopped_irq;
+       unsigned long rx_underflow_irq;
+
+       /* Bus access errors */
+       unsigned long fatal_bus_error_irq;
+       unsigned long tx_read_transfer_err;
+       unsigned long tx_write_transfer_err;
+       unsigned long tx_desc_access_err;
+       unsigned long tx_buffer_access_err;
+       unsigned long tx_data_transfer_err;
+       unsigned long rx_read_transfer_err;
+       unsigned long rx_write_transfer_err;
+       unsigned long rx_desc_access_err;
+       unsigned long rx_buffer_access_err;
+       unsigned long rx_data_transfer_err;
+
+       /* EEE-LPI stats */
+       unsigned long tx_lpi_entry_n;
+       unsigned long tx_lpi_exit_n;
+       unsigned long rx_lpi_entry_n;
+       unsigned long rx_lpi_exit_n;
+       unsigned long eee_wakeup_error_n;
+
+       /* RX specific */
+       /* L2 error */
+       unsigned long rx_code_gmii_err;
+       unsigned long rx_watchdog_err;
+       unsigned long rx_crc_err;
+       unsigned long rx_gaint_pkt_err;
+       unsigned long ip_hdr_err;
+       unsigned long ip_payload_err;
+       unsigned long overflow_error;
+
+       /* L2 Pkt type */
+       unsigned long len_pkt;
+       unsigned long mac_ctl_pkt;
+       unsigned long dcb_ctl_pkt;
+       unsigned long arp_pkt;
+       unsigned long oam_pkt;
+       unsigned long untag_okt;
+       unsigned long other_pkt;
+       unsigned long svlan_tag_pkt;
+       unsigned long cvlan_tag_pkt;
+       unsigned long dvlan_ocvlan_icvlan_pkt;
+       unsigned long dvlan_osvlan_isvlan_pkt;
+       unsigned long dvlan_osvlan_icvlan_pkt;
+       unsigned long dvan_ocvlan_icvlan_pkt;
+
+       /* L3/L4 Pkt type */
+       unsigned long not_ip_pkt;
+       unsigned long ip4_tcp_pkt;
+       unsigned long ip4_udp_pkt;
+       unsigned long ip4_icmp_pkt;
+       unsigned long ip4_unknown_pkt;
+       unsigned long ip6_tcp_pkt;
+       unsigned long ip6_udp_pkt;
+       unsigned long ip6_icmp_pkt;
+       unsigned long ip6_unknown_pkt;
+
+       /* Filter specific */
+       unsigned long vlan_filter_match;
+       unsigned long sa_filter_fail;
+       unsigned long da_filter_fail;
+       unsigned long hash_filter_pass;
+       unsigned long l3_filter_match;
+       unsigned long l4_filter_match;
+
+       /* RX context specific */
+       unsigned long timestamp_dropped;
+       unsigned long rx_msg_type_no_ptp;
+       unsigned long rx_ptp_type_sync;
+       unsigned long rx_ptp_type_follow_up;
+       unsigned long rx_ptp_type_delay_req;
+       unsigned long rx_ptp_type_delay_resp;
+       unsigned long rx_ptp_type_pdelay_req;
+       unsigned long rx_ptp_type_pdelay_resp;
+       unsigned long rx_ptp_type_pdelay_follow_up;
+       unsigned long rx_ptp_announce;
+       unsigned long rx_ptp_mgmt;
+       unsigned long rx_ptp_signal;
+       unsigned long rx_ptp_resv_msg_type;
+};
+
+struct mac_link {
+       int port;
+       int duplex;
+       int speed;
+};
+
+struct mii_regs {
+       unsigned int addr;      /* MII Address */
+       unsigned int data;      /* MII Data */
+};
+
+struct sxgbe_core_ops {
+       /* MAC core initialization */
+       void (*core_init)(void __iomem *ioaddr);
+       /* Dump MAC registers */
+       void (*dump_regs)(void __iomem *ioaddr);
+       /* Handle extra events on specific interrupts hw dependent */
+       int (*host_irq_status)(void __iomem *ioaddr,
+                              struct sxgbe_extra_stats *x);
+       /* Set power management mode (e.g. magic frame) */
+       void (*pmt)(void __iomem *ioaddr, unsigned long mode);
+       /* Set/Get Unicast MAC addresses */
+       void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+                             unsigned int reg_n);
+       void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+                             unsigned int reg_n);
+       void (*enable_rx)(void __iomem *ioaddr, bool enable);
+       void (*enable_tx)(void __iomem *ioaddr, bool enable);
+
+       /* controller version specific operations */
+       int (*get_controller_version)(void __iomem *ioaddr);
+
+       /* If supported then get the optional core features */
+       unsigned int (*get_hw_feature)(void __iomem *ioaddr,
+                                      unsigned char feature_index);
+       /* adjust SXGBE speed */
+       void (*set_speed)(void __iomem *ioaddr, unsigned char speed);
+
+       /* EEE-LPI specific operations */
+       void (*set_eee_mode)(void __iomem *ioaddr);
+       void (*reset_eee_mode)(void __iomem *ioaddr);
+       void (*set_eee_timer)(void __iomem *ioaddr, const int ls,
+                             const int tw);
+       void (*set_eee_pls)(void __iomem *ioaddr, const int link);
+
+       /* Enable disable checksum offload operations */
+       void (*enable_rx_csum)(void __iomem *ioaddr);
+       void (*disable_rx_csum)(void __iomem *ioaddr);
+};
+
+const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
+
+struct sxgbe_ops {
+       const struct sxgbe_core_ops *mac;
+       const struct sxgbe_desc_ops *desc;
+       const struct sxgbe_dma_ops *dma;
+       const struct sxgbe_mtl_ops *mtl;
+       struct mii_regs mii;    /* MII register Addresses */
+       struct mac_link link;
+       unsigned int ctrl_uid;
+       unsigned int ctrl_id;
+};
+
+/* SXGBE private data structures */
+struct sxgbe_tx_queue {
+       unsigned int irq_no;
+       struct sxgbe_priv_data *priv_ptr;
+       struct sxgbe_tx_norm_desc *dma_tx;
+       dma_addr_t dma_tx_phy;
+       dma_addr_t *tx_skbuff_dma;
+       struct sk_buff **tx_skbuff;
+       struct timer_list txtimer;
+       spinlock_t tx_lock;     /* lock for tx queues */
+       unsigned int cur_tx;
+       unsigned int dirty_tx;
+       u32 tx_count_frames;
+       u32 tx_coal_frames;
+       u32 tx_coal_timer;
+       int hwts_tx_en;
+       u16 prev_mss;
+       u8 queue_no;
+};
+
+struct sxgbe_rx_queue {
+       struct sxgbe_priv_data *priv_ptr;
+       struct sxgbe_rx_norm_desc *dma_rx;
+       struct sk_buff **rx_skbuff;
+       unsigned int cur_rx;
+       unsigned int dirty_rx;
+       unsigned int irq_no;
+       u32 rx_riwt;
+       dma_addr_t *rx_skbuff_dma;
+       dma_addr_t dma_rx_phy;
+       u8 queue_no;
+};
+
+/* SXGBE HW capabilities */
+struct sxgbe_hw_features {
+       /****** CAP [0] *******/
+       unsigned int pmt_remote_wake_up;
+       unsigned int pmt_magic_frame;
+       /* IEEE 1588-2008 */
+       unsigned int atime_stamp;
+
+       unsigned int eee;
+
+       unsigned int tx_csum_offload;
+       unsigned int rx_csum_offload;
+       unsigned int multi_macaddr;
+       unsigned int tstamp_srcselect;
+       unsigned int sa_vlan_insert;
+
+       /****** CAP [1] *******/
+       unsigned int rxfifo_size;
+       unsigned int txfifo_size;
+       unsigned int atstmap_hword;
+       unsigned int dcb_enable;
+       unsigned int splithead_enable;
+       unsigned int tcpseg_offload;
+       unsigned int debug_mem;
+       unsigned int rss_enable;
+       unsigned int hash_tsize;
+       unsigned int l3l4_filer_size;
+
+       /* This value is in bytes and
+        * as mentioned in HW features
+        * of SXGBE data book
+        */
+       unsigned int rx_mtl_qsize;
+       unsigned int tx_mtl_qsize;
+
+       /****** CAP [2] *******/
+       /* TX and RX number of channels */
+       unsigned int rx_mtl_queues;
+       unsigned int tx_mtl_queues;
+       unsigned int rx_dma_channels;
+       unsigned int tx_dma_channels;
+       unsigned int pps_output_count;
+       unsigned int aux_input_count;
+};
+
+struct sxgbe_priv_data {
+       /* DMA descriptos */
+       struct sxgbe_tx_queue *txq[SXGBE_TX_QUEUES];
+       struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES];
+       u8 cur_rx_qnum;
+
+       unsigned int dma_tx_size;
+       unsigned int dma_rx_size;
+       unsigned int dma_buf_sz;
+       u32 rx_riwt;
+
+       struct napi_struct napi;
+
+       void __iomem *ioaddr;
+       struct net_device *dev;
+       struct device *device;
+       struct sxgbe_ops *hw;   /* sxgbe specific ops */
+       int no_csum_insertion;
+       int irq;
+       int rxcsum_insertion;
+       spinlock_t stats_lock;  /* lock for tx/rx statatics */
+
+       struct phy_device *phydev;
+       int oldlink;
+       int speed;
+       int oldduplex;
+       struct mii_bus *mii;
+       int mii_irq[PHY_MAX_ADDR];
+       u8 rx_pause;
+       u8 tx_pause;
+
+       struct sxgbe_extra_stats xstats;
+       struct sxgbe_plat_data *plat;
+       struct sxgbe_hw_features hw_cap;
+
+       u32 msg_enable;
+
+       struct clk *sxgbe_clk;
+       int clk_csr;
+       unsigned int mode;
+       unsigned int default_addend;
+
+       /* advanced time stamp support */
+       u32 adv_ts;
+       int use_riwt;
+       struct ptp_clock *ptp_clock;
+
+       /* tc control */
+       int tx_tc;
+       int rx_tc;
+       /* EEE-LPI specific members */
+       struct timer_list eee_ctrl_timer;
+       bool tx_path_in_lpi_mode;
+       int lpi_irq;
+       int eee_enabled;
+       int eee_active;
+       int tx_lpi_timer;
+};
+
+/* Function prototypes */
+struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
+                                       struct sxgbe_plat_data *plat_dat,
+                                       void __iomem *addr);
+int sxgbe_drv_remove(struct net_device *ndev);
+void sxgbe_set_ethtool_ops(struct net_device *netdev);
+int sxgbe_mdio_unregister(struct net_device *ndev);
+int sxgbe_mdio_register(struct net_device *ndev);
+int sxgbe_register_platform(void);
+void sxgbe_unregister_platform(void);
+
+#ifdef CONFIG_PM
+int sxgbe_suspend(struct net_device *ndev);
+int sxgbe_resume(struct net_device *ndev);
+int sxgbe_freeze(struct net_device *ndev);
+int sxgbe_restore(struct net_device *ndev);
+#endif /* CONFIG_PM */
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
+
+void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv);
+bool sxgbe_eee_init(struct sxgbe_priv_data * const priv);
+#endif /* __SXGBE_COMMON_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
new file mode 100644 (file)
index 0000000..66d4a74
--- /dev/null
@@ -0,0 +1,262 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+/* MAC core initialization */
+static void sxgbe_core_init(void __iomem *ioaddr)
+{
+       u32 regval;
+
+       /* TX configuration */
+       regval = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+       /* Other configurable parameters IFP, IPG, ISR, ISM
+        * needs to be set if needed
+        */
+       regval |= SXGBE_TX_JABBER_DISABLE;
+       writel(regval, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+
+       /* RX configuration */
+       regval = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+       /* Other configurable parameters CST, SPEN, USP, GPSLCE
+        * WD, LM, S2KP, HDSMS, GPSL, ELEN, ARPEN needs to be
+        * set if needed
+        */
+       regval |= SXGBE_RX_JUMBPKT_ENABLE | SXGBE_RX_ACS_ENABLE;
+       writel(regval, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+/* Dump MAC registers */
+static void sxgbe_core_dump_regs(void __iomem *ioaddr)
+{
+}
+
+static int sxgbe_get_lpi_status(void __iomem *ioaddr, const u32 irq_status)
+{
+       int status = 0;
+       int lpi_status;
+
+       /* Reading this register shall clear all the LPI status bits */
+       lpi_status = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+
+       if (lpi_status & LPI_CTRL_STATUS_TLPIEN)
+               status |= TX_ENTRY_LPI_MODE;
+       if (lpi_status & LPI_CTRL_STATUS_TLPIEX)
+               status |= TX_EXIT_LPI_MODE;
+       if (lpi_status & LPI_CTRL_STATUS_RLPIEN)
+               status |= RX_ENTRY_LPI_MODE;
+       if (lpi_status & LPI_CTRL_STATUS_RLPIEX)
+               status |= RX_EXIT_LPI_MODE;
+
+       return status;
+}
+
+/* Handle extra events on specific interrupts hw dependent */
+static int sxgbe_core_host_irq_status(void __iomem *ioaddr,
+                                     struct sxgbe_extra_stats *x)
+{
+       int irq_status, status = 0;
+
+       irq_status = readl(ioaddr + SXGBE_CORE_INT_STATUS_REG);
+
+       if (unlikely(irq_status & LPI_INT_STATUS))
+               status |= sxgbe_get_lpi_status(ioaddr, irq_status);
+
+       return status;
+}
+
+/* Set power management mode (e.g. magic frame) */
+static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+}
+
+/* Set/Get Unicast MAC addresses */
+static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+                                    unsigned int reg_n)
+{
+       u32 high_word, low_word;
+
+       high_word = (addr[5] << 8) || (addr[4]);
+       low_word = ((addr[3] << 24) || (addr[2] << 16) ||
+                   (addr[1] << 8) || (addr[0]));
+       writel(high_word, ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
+       writel(low_word, ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
+}
+
+static void sxgbe_core_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+                                    unsigned int reg_n)
+{
+       u32 high_word, low_word;
+
+       high_word = readl(ioaddr + SXGBE_CORE_ADD_HIGHOFFSET(reg_n));
+       low_word = readl(ioaddr + SXGBE_CORE_ADD_LOWOFFSET(reg_n));
+
+       /* extract and assign address */
+       addr[5] = (high_word & 0x0000FF00) >> 8;
+       addr[4] = (high_word & 0x000000FF);
+       addr[3] = (low_word & 0xFF000000) >> 24;
+       addr[2] = (low_word & 0x00FF0000) >> 16;
+       addr[1] = (low_word & 0x0000FF00) >> 8;
+       addr[0] = (low_word & 0x000000FF);
+}
+
+static void sxgbe_enable_tx(void __iomem *ioaddr, bool enable)
+{
+       u32 tx_config;
+
+       tx_config = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+       tx_config &= ~SXGBE_TX_ENABLE;
+
+       if (enable)
+               tx_config |= SXGBE_TX_ENABLE;
+       writel(tx_config, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+}
+
+static void sxgbe_enable_rx(void __iomem *ioaddr, bool enable)
+{
+       u32 rx_config;
+
+       rx_config = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+       rx_config &= ~SXGBE_RX_ENABLE;
+
+       if (enable)
+               rx_config |= SXGBE_RX_ENABLE;
+       writel(rx_config, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static int sxgbe_get_controller_version(void __iomem *ioaddr)
+{
+       return readl(ioaddr + SXGBE_CORE_VERSION_REG);
+}
+
+/* If supported then get the optional core features */
+static unsigned int sxgbe_get_hw_feature(void __iomem *ioaddr,
+                                        unsigned char feature_index)
+{
+       return readl(ioaddr + (SXGBE_CORE_HW_FEA_REG(feature_index)));
+}
+
+static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
+{
+       u32 tx_cfg = readl(ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+
+       /* clear the speed bits */
+       tx_cfg &= ~0x60000000;
+       tx_cfg |= (speed << SXGBE_SPEED_LSHIFT);
+
+       /* set the speed */
+       writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
+}
+
+static void  sxgbe_set_eee_mode(void __iomem *ioaddr)
+{
+       u32 ctrl;
+
+       /* Enable the LPI mode for transmit path with Tx automate bit set.
+        * When Tx Automate bit is set, MAC internally handles the entry
+        * to LPI mode after all outstanding and pending packets are
+        * transmitted.
+        */
+       ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+       ctrl |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA;
+       writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void  sxgbe_reset_eee_mode(void __iomem *ioaddr)
+{
+       u32 ctrl;
+
+       ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+       ctrl &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_TXA);
+       writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void  sxgbe_set_eee_pls(void __iomem *ioaddr, const int link)
+{
+       u32 ctrl;
+
+       ctrl = readl(ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+
+       /* If the PHY link status is UP then set PLS */
+       if (link)
+               ctrl |= LPI_CTRL_STATUS_PLS;
+       else
+               ctrl &= ~LPI_CTRL_STATUS_PLS;
+
+       writel(ctrl, ioaddr + SXGBE_CORE_LPI_CTRL_STATUS);
+}
+
+static void  sxgbe_set_eee_timer(void __iomem *ioaddr,
+                                const int ls, const int tw)
+{
+       int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+       /* Program the timers in the LPI timer control register:
+        * LS: minimum time (ms) for which the link
+        *  status from PHY should be ok before transmitting
+        *  the LPI pattern.
+        * TW: minimum time (us) for which the core waits
+        *  after it has stopped transmitting the LPI pattern.
+        */
+       writel(value, ioaddr + SXGBE_CORE_LPI_TIMER_CTRL);
+}
+
+static void sxgbe_enable_rx_csum(void __iomem *ioaddr)
+{
+       u32 ctrl;
+
+       ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+       ctrl |= SXGBE_RX_CSUMOFFLOAD_ENABLE;
+       writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+static void sxgbe_disable_rx_csum(void __iomem *ioaddr)
+{
+       u32 ctrl;
+
+       ctrl = readl(ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+       ctrl &= ~SXGBE_RX_CSUMOFFLOAD_ENABLE;
+       writel(ctrl, ioaddr + SXGBE_CORE_RX_CONFIG_REG);
+}
+
+const struct sxgbe_core_ops core_ops = {
+       .core_init              = sxgbe_core_init,
+       .dump_regs              = sxgbe_core_dump_regs,
+       .host_irq_status        = sxgbe_core_host_irq_status,
+       .pmt                    = sxgbe_core_pmt,
+       .set_umac_addr          = sxgbe_core_set_umac_addr,
+       .get_umac_addr          = sxgbe_core_get_umac_addr,
+       .enable_rx              = sxgbe_enable_rx,
+       .enable_tx              = sxgbe_enable_tx,
+       .get_controller_version = sxgbe_get_controller_version,
+       .get_hw_feature         = sxgbe_get_hw_feature,
+       .set_speed              = sxgbe_core_set_speed,
+       .set_eee_mode           = sxgbe_set_eee_mode,
+       .reset_eee_mode         = sxgbe_reset_eee_mode,
+       .set_eee_timer          = sxgbe_set_eee_timer,
+       .set_eee_pls            = sxgbe_set_eee_pls,
+       .enable_rx_csum         = sxgbe_enable_rx_csum,
+       .disable_rx_csum        = sxgbe_disable_rx_csum,
+};
+
+const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
+{
+       return &core_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c
new file mode 100644 (file)
index 0000000..e896dbb
--- /dev/null
@@ -0,0 +1,515 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_desc.h"
+
+/* DMA TX descriptor ring initialization */
+static void sxgbe_init_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+       p->tdes23.tx_rd_des23.own_bit = 0;
+}
+
+static void sxgbe_tx_desc_enable_tse(struct sxgbe_tx_norm_desc *p, u8 is_tse,
+                                    u32 total_hdr_len, u32 tcp_hdr_len,
+                                    u32 tcp_payload_len)
+{
+       p->tdes23.tx_rd_des23.tse_bit = is_tse;
+       p->tdes23.tx_rd_des23.buf1_size = total_hdr_len;
+       p->tdes23.tx_rd_des23.tcp_hdr_len = tcp_hdr_len / 4;
+       p->tdes23.tx_rd_des23.tx_pkt_len.tcp_payload_len  = tcp_payload_len;
+}
+
+/* Assign buffer lengths for descriptor */
+static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
+                                 int buf1_len, int pkt_len, int cksum)
+{
+       p->tdes23.tx_rd_des23.first_desc = is_fd;
+       p->tdes23.tx_rd_des23.buf1_size = buf1_len;
+
+       p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
+
+       if (cksum)
+               p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
+}
+
+/* Set VLAN control information */
+static void sxgbe_tx_vlanctl_desc(struct sxgbe_tx_norm_desc *p, int vlan_ctl)
+{
+       p->tdes23.tx_rd_des23.vlan_tag_ctl = vlan_ctl;
+}
+
+/* Set the owner of Normal descriptor */
+static void sxgbe_set_tx_owner(struct sxgbe_tx_norm_desc *p)
+{
+       p->tdes23.tx_rd_des23.own_bit = 1;
+}
+
+/* Get the owner of Normal descriptor */
+static int sxgbe_get_tx_owner(struct sxgbe_tx_norm_desc *p)
+{
+       return p->tdes23.tx_rd_des23.own_bit;
+}
+
+/* Invoked by the xmit function to close the tx descriptor */
+static void sxgbe_close_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+       p->tdes23.tx_rd_des23.last_desc = 1;
+       p->tdes23.tx_rd_des23.int_on_com = 1;
+}
+
+/* Clean the tx descriptor as soon as the tx irq is received */
+static void sxgbe_release_tx_desc(struct sxgbe_tx_norm_desc *p)
+{
+       memset(p, 0, sizeof(*p));
+}
+
+/* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted
+ */
+static void sxgbe_clear_tx_ic(struct sxgbe_tx_norm_desc *p)
+{
+       p->tdes23.tx_rd_des23.int_on_com = 0;
+}
+
+/* Last tx segment reports the transmit status */
+static int sxgbe_get_tx_ls(struct sxgbe_tx_norm_desc *p)
+{
+       return p->tdes23.tx_rd_des23.last_desc;
+}
+
+/* Get the buffer size from the descriptor */
+static int sxgbe_get_tx_len(struct sxgbe_tx_norm_desc *p)
+{
+       return p->tdes23.tx_rd_des23.buf1_size;
+}
+
+/* Set tx timestamp enable bit */
+static void sxgbe_tx_enable_tstamp(struct sxgbe_tx_norm_desc *p)
+{
+       p->tdes23.tx_rd_des23.timestmp_enable = 1;
+}
+
+/* get tx timestamp status */
+static int sxgbe_get_tx_timestamp_status(struct sxgbe_tx_norm_desc *p)
+{
+       return p->tdes23.tx_rd_des23.timestmp_enable;
+}
+
+/* TX Context Descripto Specific */
+static void sxgbe_tx_ctxt_desc_set_ctxt(struct sxgbe_tx_ctxt_desc *p)
+{
+       p->ctxt_bit = 1;
+}
+
+/* Set the owner of TX context descriptor */
+static void sxgbe_tx_ctxt_desc_set_owner(struct sxgbe_tx_ctxt_desc *p)
+{
+       p->own_bit = 1;
+}
+
+/* Get the owner of TX context descriptor */
+static int sxgbe_tx_ctxt_desc_get_owner(struct sxgbe_tx_ctxt_desc *p)
+{
+       return p->own_bit;
+}
+
+/* Set TX mss in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_set_mss(struct sxgbe_tx_ctxt_desc *p, u16 mss)
+{
+       p->maxseg_size = mss;
+}
+
+/* Get TX mss from TX context Descriptor */
+static int sxgbe_tx_ctxt_desc_get_mss(struct sxgbe_tx_ctxt_desc *p)
+{
+       return p->maxseg_size;
+}
+
+/* Set TX tcmssv in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_set_tcmssv(struct sxgbe_tx_ctxt_desc *p)
+{
+       p->tcmssv = 1;
+}
+
+/* Reset TX ostc in TX context Descriptor */
+static void sxgbe_tx_ctxt_desc_reset_ostc(struct sxgbe_tx_ctxt_desc *p)
+{
+       p->ostc = 0;
+}
+
+/* Set IVLAN information */
+static void sxgbe_tx_ctxt_desc_set_ivlantag(struct sxgbe_tx_ctxt_desc *p,
+                                           int is_ivlanvalid, int ivlan_tag,
+                                           int ivlan_ctl)
+{
+       if (is_ivlanvalid) {
+               p->ivlan_tag_valid = is_ivlanvalid;
+               p->ivlan_tag = ivlan_tag;
+               p->ivlan_tag_ctl = ivlan_ctl;
+       }
+}
+
+/* Return IVLAN Tag */
+static int sxgbe_tx_ctxt_desc_get_ivlantag(struct sxgbe_tx_ctxt_desc *p)
+{
+       return p->ivlan_tag;
+}
+
+/* Set VLAN Tag */
+static void sxgbe_tx_ctxt_desc_set_vlantag(struct sxgbe_tx_ctxt_desc *p,
+                                          int is_vlanvalid, int vlan_tag)
+{
+       if (is_vlanvalid) {
+               p->vltag_valid = is_vlanvalid;
+               p->vlan_tag = vlan_tag;
+       }
+}
+
+/* Return VLAN Tag */
+static int sxgbe_tx_ctxt_desc_get_vlantag(struct sxgbe_tx_ctxt_desc *p)
+{
+       return p->vlan_tag;
+}
+
+/* Set Time stamp */
+static void sxgbe_tx_ctxt_desc_set_tstamp(struct sxgbe_tx_ctxt_desc *p,
+                                         u8 ostc_enable, u64 tstamp)
+{
+       if (ostc_enable) {
+               p->ostc = ostc_enable;
+               p->tstamp_lo = (u32) tstamp;
+               p->tstamp_hi = (u32) (tstamp>>32);
+       }
+}
+/* Close TX context descriptor */
+static void sxgbe_tx_ctxt_desc_close(struct sxgbe_tx_ctxt_desc *p)
+{
+       p->own_bit = 1;
+}
+
+/* WB status of context descriptor */
+static int sxgbe_tx_ctxt_desc_get_cde(struct sxgbe_tx_ctxt_desc *p)
+{
+       return p->ctxt_desc_err;
+}
+
+/* DMA RX descriptor ring initialization */
+static void sxgbe_init_rx_desc(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
+                              int mode, int end)
+{
+       p->rdes23.rx_rd_des23.own_bit = 1;
+       if (disable_rx_ic)
+               p->rdes23.rx_rd_des23.int_on_com = disable_rx_ic;
+}
+
+/* Get RX own bit */
+static int sxgbe_get_rx_owner(struct sxgbe_rx_norm_desc *p)
+{
+       return p->rdes23.rx_rd_des23.own_bit;
+}
+
+/* Set RX own bit */
+static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
+{
+       p->rdes23.rx_rd_des23.own_bit = 1;
+}
+
+/* Get the receive frame size */
+static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
+{
+       return p->rdes23.rx_wb_des23.pkt_len;
+}
+
+/* Return first Descriptor status */
+static int sxgbe_get_rx_fd_status(struct sxgbe_rx_norm_desc *p)
+{
+       return p->rdes23.rx_wb_des23.first_desc;
+}
+
+/* Return Last Descriptor status */
+static int sxgbe_get_rx_ld_status(struct sxgbe_rx_norm_desc *p)
+{
+       return p->rdes23.rx_wb_des23.last_desc;
+}
+
+
+/* Return the RX status looking at the WB fields */
+static int sxgbe_rx_wbstatus(struct sxgbe_rx_norm_desc *p,
+                            struct sxgbe_extra_stats *x, int *checksum)
+{
+       int status = 0;
+
+       *checksum = CHECKSUM_UNNECESSARY;
+       if (p->rdes23.rx_wb_des23.err_summary) {
+               switch (p->rdes23.rx_wb_des23.err_l2_type) {
+               case RX_GMII_ERR:
+                       status = -EINVAL;
+                       x->rx_code_gmii_err++;
+                       break;
+               case RX_WATCHDOG_ERR:
+                       status = -EINVAL;
+                       x->rx_watchdog_err++;
+                       break;
+               case RX_CRC_ERR:
+                       status = -EINVAL;
+                       x->rx_crc_err++;
+                       break;
+               case RX_GAINT_ERR:
+                       status = -EINVAL;
+                       x->rx_gaint_pkt_err++;
+                       break;
+               case RX_IP_HDR_ERR:
+                       *checksum = CHECKSUM_NONE;
+                       x->ip_hdr_err++;
+                       break;
+               case RX_PAYLOAD_ERR:
+                       *checksum = CHECKSUM_NONE;
+                       x->ip_payload_err++;
+                       break;
+               case RX_OVERFLOW_ERR:
+                       status = -EINVAL;
+                       x->overflow_error++;
+                       break;
+               default:
+                       pr_err("Invalid Error type\n");
+                       break;
+               }
+       } else {
+               switch (p->rdes23.rx_wb_des23.err_l2_type) {
+               case RX_LEN_PKT:
+                       x->len_pkt++;
+                       break;
+               case RX_MACCTL_PKT:
+                       x->mac_ctl_pkt++;
+                       break;
+               case RX_DCBCTL_PKT:
+                       x->dcb_ctl_pkt++;
+                       break;
+               case RX_ARP_PKT:
+                       x->arp_pkt++;
+                       break;
+               case RX_OAM_PKT:
+                       x->oam_pkt++;
+                       break;
+               case RX_UNTAG_PKT:
+                       x->untag_okt++;
+                       break;
+               case RX_OTHER_PKT:
+                       x->other_pkt++;
+                       break;
+               case RX_SVLAN_PKT:
+                       x->svlan_tag_pkt++;
+                       break;
+               case RX_CVLAN_PKT:
+                       x->cvlan_tag_pkt++;
+                       break;
+               case RX_DVLAN_OCVLAN_ICVLAN_PKT:
+                       x->dvlan_ocvlan_icvlan_pkt++;
+                       break;
+               case RX_DVLAN_OSVLAN_ISVLAN_PKT:
+                       x->dvlan_osvlan_isvlan_pkt++;
+                       break;
+               case RX_DVLAN_OSVLAN_ICVLAN_PKT:
+                       x->dvlan_osvlan_icvlan_pkt++;
+                       break;
+               case RX_DVLAN_OCVLAN_ISVLAN_PKT:
+                       x->dvlan_ocvlan_icvlan_pkt++;
+                       break;
+               default:
+                       pr_err("Invalid L2 Packet type\n");
+                       break;
+               }
+       }
+
+       /* L3/L4 Pkt type */
+       switch (p->rdes23.rx_wb_des23.layer34_pkt_type) {
+       case RX_NOT_IP_PKT:
+               x->not_ip_pkt++;
+               break;
+       case RX_IPV4_TCP_PKT:
+               x->ip4_tcp_pkt++;
+               break;
+       case RX_IPV4_UDP_PKT:
+               x->ip4_udp_pkt++;
+               break;
+       case RX_IPV4_ICMP_PKT:
+               x->ip4_icmp_pkt++;
+               break;
+       case RX_IPV4_UNKNOWN_PKT:
+               x->ip4_unknown_pkt++;
+               break;
+       case RX_IPV6_TCP_PKT:
+               x->ip6_tcp_pkt++;
+               break;
+       case RX_IPV6_UDP_PKT:
+               x->ip6_udp_pkt++;
+               break;
+       case RX_IPV6_ICMP_PKT:
+               x->ip6_icmp_pkt++;
+               break;
+       case RX_IPV6_UNKNOWN_PKT:
+               x->ip6_unknown_pkt++;
+               break;
+       default:
+               pr_err("Invalid L3/L4 Packet type\n");
+               break;
+       }
+
+       /* Filter */
+       if (p->rdes23.rx_wb_des23.vlan_filter_match)
+               x->vlan_filter_match++;
+
+       if (p->rdes23.rx_wb_des23.sa_filter_fail) {
+               status = -EINVAL;
+               x->sa_filter_fail++;
+       }
+       if (p->rdes23.rx_wb_des23.da_filter_fail) {
+               status = -EINVAL;
+               x->da_filter_fail++;
+       }
+       if (p->rdes23.rx_wb_des23.hash_filter_pass)
+               x->hash_filter_pass++;
+
+       if (p->rdes23.rx_wb_des23.l3_filter_match)
+               x->l3_filter_match++;
+
+       if (p->rdes23.rx_wb_des23.l4_filter_match)
+               x->l4_filter_match++;
+
+       return status;
+}
+
+/* Get own bit of context descriptor */
+static int sxgbe_get_rx_ctxt_owner(struct sxgbe_rx_ctxt_desc *p)
+{
+       return p->own_bit;
+}
+
+/* Set own bit for context descriptor */
+static void sxgbe_set_ctxt_rx_owner(struct sxgbe_rx_ctxt_desc *p)
+{
+       p->own_bit = 1;
+}
+
+
+/* Return the reception status looking at Context control information */
+static void sxgbe_rx_ctxt_wbstatus(struct sxgbe_rx_ctxt_desc *p,
+                                  struct sxgbe_extra_stats *x)
+{
+       if (p->tstamp_dropped)
+               x->timestamp_dropped++;
+
+       /* ptp */
+       if (p->ptp_msgtype == RX_NO_PTP)
+               x->rx_msg_type_no_ptp++;
+       else if (p->ptp_msgtype == RX_PTP_SYNC)
+               x->rx_ptp_type_sync++;
+       else if (p->ptp_msgtype == RX_PTP_FOLLOW_UP)
+               x->rx_ptp_type_follow_up++;
+       else if (p->ptp_msgtype == RX_PTP_DELAY_REQ)
+               x->rx_ptp_type_delay_req++;
+       else if (p->ptp_msgtype == RX_PTP_DELAY_RESP)
+               x->rx_ptp_type_delay_resp++;
+       else if (p->ptp_msgtype == RX_PTP_PDELAY_REQ)
+               x->rx_ptp_type_pdelay_req++;
+       else if (p->ptp_msgtype == RX_PTP_PDELAY_RESP)
+               x->rx_ptp_type_pdelay_resp++;
+       else if (p->ptp_msgtype == RX_PTP_PDELAY_FOLLOW_UP)
+               x->rx_ptp_type_pdelay_follow_up++;
+       else if (p->ptp_msgtype == RX_PTP_ANNOUNCE)
+               x->rx_ptp_announce++;
+       else if (p->ptp_msgtype == RX_PTP_MGMT)
+               x->rx_ptp_mgmt++;
+       else if (p->ptp_msgtype == RX_PTP_SIGNAL)
+               x->rx_ptp_signal++;
+       else if (p->ptp_msgtype == RX_PTP_RESV_MSG)
+               x->rx_ptp_resv_msg_type++;
+}
+
+/* Get rx timestamp status */
+static int sxgbe_get_rx_ctxt_tstamp_status(struct sxgbe_rx_ctxt_desc *p)
+{
+       if ((p->tstamp_hi == 0xffffffff) && (p->tstamp_lo == 0xffffffff)) {
+               pr_err("Time stamp corrupted\n");
+               return 0;
+       }
+
+       return p->tstamp_available;
+}
+
+
+static u64 sxgbe_get_rx_timestamp(struct sxgbe_rx_ctxt_desc *p)
+{
+       u64 ns;
+
+       ns = p->tstamp_lo;
+       ns |= ((u64)p->tstamp_hi) << 32;
+
+       return ns;
+}
+
+static const struct sxgbe_desc_ops desc_ops = {
+       .init_tx_desc                   = sxgbe_init_tx_desc,
+       .tx_desc_enable_tse             = sxgbe_tx_desc_enable_tse,
+       .prepare_tx_desc                = sxgbe_prepare_tx_desc,
+       .tx_vlanctl_desc                = sxgbe_tx_vlanctl_desc,
+       .set_tx_owner                   = sxgbe_set_tx_owner,
+       .get_tx_owner                   = sxgbe_get_tx_owner,
+       .close_tx_desc                  = sxgbe_close_tx_desc,
+       .release_tx_desc                = sxgbe_release_tx_desc,
+       .clear_tx_ic                    = sxgbe_clear_tx_ic,
+       .get_tx_ls                      = sxgbe_get_tx_ls,
+       .get_tx_len                     = sxgbe_get_tx_len,
+       .tx_enable_tstamp               = sxgbe_tx_enable_tstamp,
+       .get_tx_timestamp_status        = sxgbe_get_tx_timestamp_status,
+       .tx_ctxt_desc_set_ctxt          = sxgbe_tx_ctxt_desc_set_ctxt,
+       .tx_ctxt_desc_set_owner         = sxgbe_tx_ctxt_desc_set_owner,
+       .get_tx_ctxt_owner              = sxgbe_tx_ctxt_desc_get_owner,
+       .tx_ctxt_desc_set_mss           = sxgbe_tx_ctxt_desc_set_mss,
+       .tx_ctxt_desc_get_mss           = sxgbe_tx_ctxt_desc_get_mss,
+       .tx_ctxt_desc_set_tcmssv        = sxgbe_tx_ctxt_desc_set_tcmssv,
+       .tx_ctxt_desc_reset_ostc        = sxgbe_tx_ctxt_desc_reset_ostc,
+       .tx_ctxt_desc_set_ivlantag      = sxgbe_tx_ctxt_desc_set_ivlantag,
+       .tx_ctxt_desc_get_ivlantag      = sxgbe_tx_ctxt_desc_get_ivlantag,
+       .tx_ctxt_desc_set_vlantag       = sxgbe_tx_ctxt_desc_set_vlantag,
+       .tx_ctxt_desc_get_vlantag       = sxgbe_tx_ctxt_desc_get_vlantag,
+       .tx_ctxt_set_tstamp             = sxgbe_tx_ctxt_desc_set_tstamp,
+       .close_tx_ctxt_desc             = sxgbe_tx_ctxt_desc_close,
+       .get_tx_ctxt_cde                = sxgbe_tx_ctxt_desc_get_cde,
+       .init_rx_desc                   = sxgbe_init_rx_desc,
+       .get_rx_owner                   = sxgbe_get_rx_owner,
+       .set_rx_owner                   = sxgbe_set_rx_owner,
+       .get_rx_frame_len               = sxgbe_get_rx_frame_len,
+       .get_rx_fd_status               = sxgbe_get_rx_fd_status,
+       .get_rx_ld_status               = sxgbe_get_rx_ld_status,
+       .rx_wbstatus                    = sxgbe_rx_wbstatus,
+       .get_rx_ctxt_owner              = sxgbe_get_rx_ctxt_owner,
+       .set_rx_ctxt_owner              = sxgbe_set_ctxt_rx_owner,
+       .rx_ctxt_wbstatus               = sxgbe_rx_ctxt_wbstatus,
+       .get_rx_ctxt_tstamp_status      = sxgbe_get_rx_ctxt_tstamp_status,
+       .get_timestamp                  = sxgbe_get_rx_timestamp,
+};
+
+const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void)
+{
+       return &desc_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h
new file mode 100644 (file)
index 0000000..838cb9f
--- /dev/null
@@ -0,0 +1,298 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_DESC_H__
+#define __SXGBE_DESC_H__
+
+#define SXGBE_DESC_SIZE_BYTES  16
+
+/* forward declaration */
+struct sxgbe_extra_stats;
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+       cic_disabled            = 0,    /* Checksum Insertion Control */
+       cic_only_ip             = 1,    /* Only IP header */
+       /* IP header but pseudoheader is not calculated */
+       cic_no_pseudoheader     = 2,
+       cic_full                = 3,    /* IP header and pseudoheader */
+};
+
+struct sxgbe_tx_norm_desc {
+       u64 tdes01; /* buf1 address */
+       union {
+               /* TX Read-Format Desc 2,3 */
+               struct {
+                       /* TDES2 */
+                       u32 buf1_size:14;
+                       u32 vlan_tag_ctl:2;
+                       u32 buf2_size:14;
+                       u32 timestmp_enable:1;
+                       u32 int_on_com:1;
+                       /* TDES3 */
+                       union {
+                               u32 tcp_payload_len:18;
+                               struct {
+                                       u32 total_pkt_len:15;
+                                       u32 reserved1:1;
+                                       u32 cksum_ctl:2;
+                               } cksum_pktlen;
+                       } tx_pkt_len;
+
+                       u32 tse_bit:1;
+                       u32 tcp_hdr_len:4;
+                       u32 sa_insert_ctl:3;
+                       u32 crc_pad_ctl:2;
+                       u32 last_desc:1;
+                       u32 first_desc:1;
+                       u32 ctxt_bit:1;
+                       u32 own_bit:1;
+               } tx_rd_des23;
+
+               /* tx write back Desc 2,3 */
+               struct {
+                       /* WB TES2 */
+                       u32 reserved1;
+                       /* WB TES3 */
+                       u32 reserved2:31;
+                       u32 own_bit:1;
+               } tx_wb_des23;
+       } tdes23;
+};
+
+struct sxgbe_rx_norm_desc {
+       union {
+               u32 rdes0; /* buf1 address */
+               struct {
+                       u32 out_vlan_tag:16;
+                       u32 in_vlan_tag:16;
+               } wb_rx_des0;
+       } rd_wb_des0;
+
+       union {
+               u32 rdes1;      /* buf2 address or buf1[63:32] */
+               u32 rss_hash;   /* Write-back RX */
+       } rd_wb_des1;
+
+       union {
+               /* RX Read format Desc 2,3 */
+               struct{
+                       /* RDES2 */
+                       u32 buf2_addr;
+                       /* RDES3 */
+                       u32 buf2_hi_addr:30;
+                       u32 int_on_com:1;
+                       u32 own_bit:1;
+               } rx_rd_des23;
+
+               /* RX write back */
+               struct{
+                       /* WB RDES2 */
+                       u32 hdr_len:10;
+                       u32 rdes2_reserved:2;
+                       u32 elrd_val:1;
+                       u32 iovt_sel:1;
+                       u32 res_pkt:1;
+                       u32 vlan_filter_match:1;
+                       u32 sa_filter_fail:1;
+                       u32 da_filter_fail:1;
+                       u32 hash_filter_pass:1;
+                       u32 macaddr_filter_match:8;
+                       u32 l3_filter_match:1;
+                       u32 l4_filter_match:1;
+                       u32 l34_filter_num:3;
+
+                       /* WB RDES3 */
+                       u32 pkt_len:14;
+                       u32 rdes3_reserved:1;
+                       u32 err_summary:1;
+                       u32 err_l2_type:4;
+                       u32 layer34_pkt_type:4;
+                       u32 no_coagulation_pkt:1;
+                       u32 in_seq_pkt:1;
+                       u32 rss_valid:1;
+                       u32 context_des_avail:1;
+                       u32 last_desc:1;
+                       u32 first_desc:1;
+                       u32 recv_context_desc:1;
+                       u32 own_bit:1;
+               } rx_wb_des23;
+       } rdes23;
+};
+
+/* Context descriptor structure */
+struct sxgbe_tx_ctxt_desc {
+       u32 tstamp_lo;
+       u32 tstamp_hi;
+       u32 maxseg_size:15;
+       u32 reserved1:1;
+       u32 ivlan_tag:16;
+       u32 vlan_tag:16;
+       u32 vltag_valid:1;
+       u32 ivlan_tag_valid:1;
+       u32 ivlan_tag_ctl:2;
+       u32 reserved2:3;
+       u32 ctxt_desc_err:1;
+       u32 reserved3:2;
+       u32 ostc:1;
+       u32 tcmssv:1;
+       u32 reserved4:2;
+       u32 ctxt_bit:1;
+       u32 own_bit:1;
+};
+
+struct sxgbe_rx_ctxt_desc {
+       u32 tstamp_lo;
+       u32 tstamp_hi;
+       u32 reserved1;
+       u32 ptp_msgtype:4;
+       u32 tstamp_available:1;
+       u32 ptp_rsp_err:1;
+       u32 tstamp_dropped:1;
+       u32 reserved2:23;
+       u32 rx_ctxt_desc:1;
+       u32 own_bit:1;
+};
+
+struct sxgbe_desc_ops {
+       /* DMA TX descriptor ring initialization */
+       void (*init_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+       /* Invoked by the xmit function to prepare the tx descriptor */
+       void (*tx_desc_enable_tse)(struct sxgbe_tx_norm_desc *p, u8 is_tse,
+                                  u32 total_hdr_len, u32 tcp_hdr_len,
+                                  u32 tcp_payload_len);
+
+       /* Assign buffer lengths for descriptor */
+       void (*prepare_tx_desc)(struct sxgbe_tx_norm_desc *p, u8 is_fd,
+                               int buf1_len, int pkt_len, int cksum);
+
+       /* Set VLAN control information */
+       void (*tx_vlanctl_desc)(struct sxgbe_tx_norm_desc *p, int vlan_ctl);
+
+       /* Set the owner of the descriptor */
+       void (*set_tx_owner)(struct sxgbe_tx_norm_desc *p);
+
+       /* Get the owner of the descriptor */
+       int (*get_tx_owner)(struct sxgbe_tx_norm_desc *p);
+
+       /* Invoked by the xmit function to close the tx descriptor */
+       void (*close_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+       /* Clean the tx descriptor as soon as the tx irq is received */
+       void (*release_tx_desc)(struct sxgbe_tx_norm_desc *p);
+
+       /* Clear interrupt on tx frame completion. When this bit is
+        * set an interrupt happens as soon as the frame is transmitted
+        */
+       void (*clear_tx_ic)(struct sxgbe_tx_norm_desc *p);
+
+       /* Last tx segment reports the transmit status */
+       int (*get_tx_ls)(struct sxgbe_tx_norm_desc *p);
+
+       /* Get the buffer size from the descriptor */
+       int (*get_tx_len)(struct sxgbe_tx_norm_desc *p);
+
+       /* Set tx timestamp enable bit */
+       void (*tx_enable_tstamp)(struct sxgbe_tx_norm_desc *p);
+
+       /* get tx timestamp status */
+       int (*get_tx_timestamp_status)(struct sxgbe_tx_norm_desc *p);
+
+       /* TX Context Descripto Specific */
+       void (*tx_ctxt_desc_set_ctxt)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Set the owner of the TX context descriptor */
+       void (*tx_ctxt_desc_set_owner)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Get the owner of the TX context descriptor */
+       int (*get_tx_ctxt_owner)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Set TX mss */
+       void (*tx_ctxt_desc_set_mss)(struct sxgbe_tx_ctxt_desc *p, u16 mss);
+
+       /* Set TX mss */
+       int (*tx_ctxt_desc_get_mss)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Set TX tcmssv */
+       void (*tx_ctxt_desc_set_tcmssv)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Reset TX ostc */
+       void (*tx_ctxt_desc_reset_ostc)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Set IVLAN information */
+       void (*tx_ctxt_desc_set_ivlantag)(struct sxgbe_tx_ctxt_desc *p,
+                                         int is_ivlanvalid, int ivlan_tag,
+                                         int ivlan_ctl);
+
+       /* Return IVLAN Tag */
+       int (*tx_ctxt_desc_get_ivlantag)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Set VLAN Tag */
+       void (*tx_ctxt_desc_set_vlantag)(struct sxgbe_tx_ctxt_desc *p,
+                                        int is_vlanvalid, int vlan_tag);
+
+       /* Return VLAN Tag */
+       int (*tx_ctxt_desc_get_vlantag)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* Set Time stamp */
+       void (*tx_ctxt_set_tstamp)(struct sxgbe_tx_ctxt_desc *p,
+                                  u8 ostc_enable, u64 tstamp);
+
+       /* Close TX context descriptor */
+       void (*close_tx_ctxt_desc)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* WB status of context descriptor */
+       int (*get_tx_ctxt_cde)(struct sxgbe_tx_ctxt_desc *p);
+
+       /* DMA RX descriptor ring initialization */
+       void (*init_rx_desc)(struct sxgbe_rx_norm_desc *p, int disable_rx_ic,
+                            int mode, int end);
+
+       /* Get own bit */
+       int (*get_rx_owner)(struct sxgbe_rx_norm_desc *p);
+
+       /* Set own bit */
+       void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
+
+       /* Get the receive frame size */
+       int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
+
+       /* Return first Descriptor status */
+       int (*get_rx_fd_status)(struct sxgbe_rx_norm_desc *p);
+
+       /* Return first Descriptor status */
+       int (*get_rx_ld_status)(struct sxgbe_rx_norm_desc *p);
+
+       /* Return the reception status looking at the RDES1 */
+       int (*rx_wbstatus)(struct sxgbe_rx_norm_desc *p,
+                          struct sxgbe_extra_stats *x, int *checksum);
+
+       /* Get own bit */
+       int (*get_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
+
+       /* Set own bit */
+       void (*set_rx_ctxt_owner)(struct sxgbe_rx_ctxt_desc *p);
+
+       /* Return the reception status looking at Context control information */
+       void (*rx_ctxt_wbstatus)(struct sxgbe_rx_ctxt_desc *p,
+                                struct sxgbe_extra_stats *x);
+
+       /* Get rx timestamp status */
+       int (*get_rx_ctxt_tstamp_status)(struct sxgbe_rx_ctxt_desc *p);
+
+       /* Get timestamp value for rx, need to check this */
+       u64 (*get_timestamp)(struct sxgbe_rx_ctxt_desc *p);
+};
+
+const struct sxgbe_desc_ops *sxgbe_get_desc_ops(void);
+
+#endif /* __SXGBE_DESC_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c
new file mode 100644 (file)
index 0000000..28f89c4
--- /dev/null
@@ -0,0 +1,382 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_reg.h"
+#include "sxgbe_desc.h"
+
+/* DMA core initialization */
+static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
+{
+       int retry_count = 10;
+       u32 reg_val;
+
+       /* reset the DMA */
+       writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
+       while (retry_count--) {
+               if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
+                     SXGBE_DMA_SOFT_RESET))
+                       break;
+               mdelay(10);
+       }
+
+       if (retry_count < 0)
+               return -EBUSY;
+
+       reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
+
+       /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
+        * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
+        * burst_map is bitmap for  BLEN[4, 8, 16, 32, 64, 128 and 256].
+        * Set burst_map irrespective of fix_burst value.
+        */
+       if (!fix_burst)
+               reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
+
+       /* write burst len map */
+       reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
+
+       writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
+
+       return 0;
+}
+
+static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
+                                  int fix_burst, int pbl, dma_addr_t dma_tx,
+                                  dma_addr_t dma_rx, int t_rsize, int r_rsize)
+{
+       u32 reg_val;
+       dma_addr_t dma_addr;
+
+       reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
+       /* set the pbl */
+       if (fix_burst) {
+               reg_val |= SXGBE_DMA_PBL_X8MODE;
+               writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
+               /* program the TX pbl */
+               reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+               reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
+               writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+               /* program the RX pbl */
+               reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
+               reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
+               writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
+       }
+
+       /* program desc registers */
+       writel(upper_32_bits(dma_tx),
+              ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
+       writel(lower_32_bits(dma_tx),
+              ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
+
+       writel(upper_32_bits(dma_rx),
+              ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
+       writel(lower_32_bits(dma_rx),
+              ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
+
+       /* program tail pointers */
+       /* assumption: upper 32 bits are constant and
+        * same as TX/RX desc list
+        */
+       dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
+       writel(lower_32_bits(dma_addr),
+              ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
+
+       dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
+       writel(lower_32_bits(dma_addr),
+              ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
+       /* program the ring sizes */
+       writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
+       writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
+
+       /* Enable TX/RX interrupts */
+       writel(SXGBE_DMA_ENA_INT,
+              ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
+}
+
+static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
+{
+       u32 tx_config;
+
+       tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+       tx_config |= SXGBE_TX_START_DMA;
+       writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
+}
+
+static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
+{
+       /* Enable TX/RX interrupts */
+       writel(SXGBE_DMA_ENA_INT,
+              ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
+}
+
+static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
+{
+       /* Disable TX/RX interrupts */
+       writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
+}
+
+static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
+{
+       int cnum;
+       u32 tx_ctl_reg;
+
+       for (cnum = 0; cnum < tchannels; cnum++) {
+               tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+               tx_ctl_reg |= SXGBE_TX_ENABLE;
+               writel(tx_ctl_reg,
+                      ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+       }
+}
+
+static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
+{
+       u32 tx_ctl_reg;
+
+       tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+       tx_ctl_reg |= SXGBE_TX_ENABLE;
+       writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+}
+
+static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
+{
+       u32 tx_ctl_reg;
+
+       tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+       tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
+       writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
+}
+
+static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
+{
+       int cnum;
+       u32 tx_ctl_reg;
+
+       for (cnum = 0; cnum < tchannels; cnum++) {
+               tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+               tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
+               writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
+       }
+}
+
+static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
+{
+       int cnum;
+       u32 rx_ctl_reg;
+
+       for (cnum = 0; cnum < rchannels; cnum++) {
+               rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+               rx_ctl_reg |= SXGBE_RX_ENABLE;
+               writel(rx_ctl_reg,
+                      ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+       }
+}
+
+static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
+{
+       int cnum;
+       u32 rx_ctl_reg;
+
+       for (cnum = 0; cnum < rchannels; cnum++) {
+               rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+               rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
+               writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
+       }
+}
+
+static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
+                                  struct sxgbe_extra_stats *x)
+{
+       u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+       u32 clear_val = 0;
+       u32 ret_val = 0;
+
+       /* TX Normal Interrupt Summary */
+       if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
+               x->normal_irq_n++;
+               if (int_status & SXGBE_DMA_INT_STATUS_TI) {
+                       ret_val |= handle_tx;
+                       x->tx_normal_irq_n++;
+                       clear_val |= SXGBE_DMA_INT_STATUS_TI;
+               }
+
+               if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
+                       x->tx_underflow_irq++;
+                       ret_val |= tx_bump_tc;
+                       clear_val |= SXGBE_DMA_INT_STATUS_TBU;
+               }
+       } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
+               /* TX Abnormal Interrupt Summary */
+               if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
+                       ret_val |= tx_hard_error;
+                       clear_val |= SXGBE_DMA_INT_STATUS_TPS;
+                       x->tx_process_stopped_irq++;
+               }
+
+               if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
+                       ret_val |= tx_hard_error;
+                       x->fatal_bus_error_irq++;
+
+                       /* Assumption: FBE bit is the combination of
+                        * all the bus access erros and cleared when
+                        * the respective error bits cleared
+                        */
+
+                       /* check for actual cause */
+                       if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
+                               x->tx_read_transfer_err++;
+                               clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
+                       } else {
+                               x->tx_write_transfer_err++;
+                       }
+
+                       if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
+                               x->tx_desc_access_err++;
+                               clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
+                       } else {
+                               x->tx_buffer_access_err++;
+                       }
+
+                       if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
+                               x->tx_data_transfer_err++;
+                               clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
+                       }
+               }
+
+               /* context descriptor error */
+               if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
+                       x->tx_ctxt_desc_err++;
+                       clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
+               }
+       }
+
+       /* clear the served bits */
+       writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+
+       return ret_val;
+}
+
+static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
+                                  struct sxgbe_extra_stats *x)
+{
+       u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+       u32 clear_val = 0;
+       u32 ret_val = 0;
+
+       /* RX Normal Interrupt Summary */
+       if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
+               x->normal_irq_n++;
+               if (int_status & SXGBE_DMA_INT_STATUS_RI) {
+                       ret_val |= handle_rx;
+                       x->rx_normal_irq_n++;
+                       clear_val |= SXGBE_DMA_INT_STATUS_RI;
+               }
+       } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
+               /* RX Abnormal Interrupt Summary */
+               if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
+                       ret_val |= rx_bump_tc;
+                       clear_val |= SXGBE_DMA_INT_STATUS_RBU;
+                       x->rx_underflow_irq++;
+               }
+
+               if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
+                       ret_val |= rx_hard_error;
+                       clear_val |= SXGBE_DMA_INT_STATUS_RPS;
+                       x->rx_process_stopped_irq++;
+               }
+
+               if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
+                       ret_val |= rx_hard_error;
+                       x->fatal_bus_error_irq++;
+
+                       /* Assumption: FBE bit is the combination of
+                        * all the bus access erros and cleared when
+                        * the respective error bits cleared
+                        */
+
+                       /* check for actual cause */
+                       if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
+                               x->rx_read_transfer_err++;
+                               clear_val |= SXGBE_DMA_INT_STATUS_REB0;
+                       } else {
+                               x->rx_write_transfer_err++;
+                       }
+
+                       if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
+                               x->rx_desc_access_err++;
+                               clear_val |= SXGBE_DMA_INT_STATUS_REB1;
+                       } else {
+                               x->rx_buffer_access_err++;
+                       }
+
+                       if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
+                               x->rx_data_transfer_err++;
+                               clear_val |= SXGBE_DMA_INT_STATUS_REB2;
+                       }
+               }
+       }
+
+       /* clear the served bits */
+       writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
+
+       return ret_val;
+}
+
+/* Program the HW RX Watchdog */
+static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+       u32 que_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
+               writel(riwt,
+                      ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
+       }
+}
+
+static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
+{
+       u32 ctrl;
+
+       ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
+       ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
+       writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
+}
+
+static const struct sxgbe_dma_ops sxgbe_dma_ops = {
+       .init                           = sxgbe_dma_init,
+       .cha_init                       = sxgbe_dma_channel_init,
+       .enable_dma_transmission        = sxgbe_enable_dma_transmission,
+       .enable_dma_irq                 = sxgbe_enable_dma_irq,
+       .disable_dma_irq                = sxgbe_disable_dma_irq,
+       .start_tx                       = sxgbe_dma_start_tx,
+       .start_tx_queue                 = sxgbe_dma_start_tx_queue,
+       .stop_tx                        = sxgbe_dma_stop_tx,
+       .stop_tx_queue                  = sxgbe_dma_stop_tx_queue,
+       .start_rx                       = sxgbe_dma_start_rx,
+       .stop_rx                        = sxgbe_dma_stop_rx,
+       .tx_dma_int_status              = sxgbe_tx_dma_int_status,
+       .rx_dma_int_status              = sxgbe_rx_dma_int_status,
+       .rx_watchdog                    = sxgbe_dma_rx_watchdog,
+       .enable_tso                     = sxgbe_enable_tso,
+};
+
+const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
+{
+       return &sxgbe_dma_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.h
new file mode 100644 (file)
index 0000000..1607b54
--- /dev/null
@@ -0,0 +1,50 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_DMA_H__
+#define __SXGBE_DMA_H__
+
+/* forward declaration */
+struct sxgbe_extra_stats;
+
+#define SXGBE_DMA_BLENMAP_LSHIFT       1
+#define SXGBE_DMA_TXPBL_LSHIFT         16
+#define SXGBE_DMA_RXPBL_LSHIFT         16
+#define DEFAULT_DMA_PBL                        8
+
+struct sxgbe_dma_ops {
+       /* DMA core initialization */
+       int (*init)(void __iomem *ioaddr, int fix_burst, int burst_map);
+       void (*cha_init)(void __iomem *ioaddr, int cha_num, int fix_burst,
+                        int pbl, dma_addr_t dma_tx, dma_addr_t dma_rx,
+                        int t_rzie, int r_rsize);
+       void (*enable_dma_transmission)(void __iomem *ioaddr, int dma_cnum);
+       void (*enable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
+       void (*disable_dma_irq)(void __iomem *ioaddr, int dma_cnum);
+       void (*start_tx)(void __iomem *ioaddr, int tchannels);
+       void (*start_tx_queue)(void __iomem *ioaddr, int dma_cnum);
+       void (*stop_tx)(void __iomem *ioaddr, int tchannels);
+       void (*stop_tx_queue)(void __iomem *ioaddr, int dma_cnum);
+       void (*start_rx)(void __iomem *ioaddr, int rchannels);
+       void (*stop_rx)(void __iomem *ioaddr, int rchannels);
+       int (*tx_dma_int_status)(void __iomem *ioaddr, int channel_no,
+                                struct sxgbe_extra_stats *x);
+       int (*rx_dma_int_status)(void __iomem *ioaddr, int channel_no,
+                                struct sxgbe_extra_stats *x);
+       /* Program the HW RX Watchdog */
+       void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt);
+       /* Enable TSO for each DMA channel */
+       void (*enable_tso)(void __iomem *ioaddr, u8 chan_num);
+};
+
+const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void);
+
+#endif /* __SXGBE_CORE_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
new file mode 100644 (file)
index 0000000..0415fa5
--- /dev/null
@@ -0,0 +1,524 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+#include "sxgbe_dma.h"
+
+struct sxgbe_stats {
+       char stat_string[ETH_GSTRING_LEN];
+       int sizeof_stat;
+       int stat_offset;
+};
+
+#define SXGBE_STAT(m)                                          \
+{                                                              \
+       #m,                                                     \
+       FIELD_SIZEOF(struct sxgbe_extra_stats, m),              \
+       offsetof(struct sxgbe_priv_data, xstats.m)              \
+}
+
+static const struct sxgbe_stats sxgbe_gstrings_stats[] = {
+       /* TX/RX IRQ events */
+       SXGBE_STAT(tx_process_stopped_irq),
+       SXGBE_STAT(tx_ctxt_desc_err),
+       SXGBE_STAT(tx_threshold),
+       SXGBE_STAT(rx_threshold),
+       SXGBE_STAT(tx_pkt_n),
+       SXGBE_STAT(rx_pkt_n),
+       SXGBE_STAT(normal_irq_n),
+       SXGBE_STAT(tx_normal_irq_n),
+       SXGBE_STAT(rx_normal_irq_n),
+       SXGBE_STAT(napi_poll),
+       SXGBE_STAT(tx_clean),
+       SXGBE_STAT(tx_reset_ic_bit),
+       SXGBE_STAT(rx_process_stopped_irq),
+       SXGBE_STAT(rx_underflow_irq),
+
+       /* Bus access errors */
+       SXGBE_STAT(fatal_bus_error_irq),
+       SXGBE_STAT(tx_read_transfer_err),
+       SXGBE_STAT(tx_write_transfer_err),
+       SXGBE_STAT(tx_desc_access_err),
+       SXGBE_STAT(tx_buffer_access_err),
+       SXGBE_STAT(tx_data_transfer_err),
+       SXGBE_STAT(rx_read_transfer_err),
+       SXGBE_STAT(rx_write_transfer_err),
+       SXGBE_STAT(rx_desc_access_err),
+       SXGBE_STAT(rx_buffer_access_err),
+       SXGBE_STAT(rx_data_transfer_err),
+
+       /* EEE-LPI stats */
+       SXGBE_STAT(tx_lpi_entry_n),
+       SXGBE_STAT(tx_lpi_exit_n),
+       SXGBE_STAT(rx_lpi_entry_n),
+       SXGBE_STAT(rx_lpi_exit_n),
+       SXGBE_STAT(eee_wakeup_error_n),
+
+       /* RX specific */
+       /* L2 error */
+       SXGBE_STAT(rx_code_gmii_err),
+       SXGBE_STAT(rx_watchdog_err),
+       SXGBE_STAT(rx_crc_err),
+       SXGBE_STAT(rx_gaint_pkt_err),
+       SXGBE_STAT(ip_hdr_err),
+       SXGBE_STAT(ip_payload_err),
+       SXGBE_STAT(overflow_error),
+
+       /* L2 Pkt type */
+       SXGBE_STAT(len_pkt),
+       SXGBE_STAT(mac_ctl_pkt),
+       SXGBE_STAT(dcb_ctl_pkt),
+       SXGBE_STAT(arp_pkt),
+       SXGBE_STAT(oam_pkt),
+       SXGBE_STAT(untag_okt),
+       SXGBE_STAT(other_pkt),
+       SXGBE_STAT(svlan_tag_pkt),
+       SXGBE_STAT(cvlan_tag_pkt),
+       SXGBE_STAT(dvlan_ocvlan_icvlan_pkt),
+       SXGBE_STAT(dvlan_osvlan_isvlan_pkt),
+       SXGBE_STAT(dvlan_osvlan_icvlan_pkt),
+       SXGBE_STAT(dvan_ocvlan_icvlan_pkt),
+
+       /* L3/L4 Pkt type */
+       SXGBE_STAT(not_ip_pkt),
+       SXGBE_STAT(ip4_tcp_pkt),
+       SXGBE_STAT(ip4_udp_pkt),
+       SXGBE_STAT(ip4_icmp_pkt),
+       SXGBE_STAT(ip4_unknown_pkt),
+       SXGBE_STAT(ip6_tcp_pkt),
+       SXGBE_STAT(ip6_udp_pkt),
+       SXGBE_STAT(ip6_icmp_pkt),
+       SXGBE_STAT(ip6_unknown_pkt),
+
+       /* Filter specific */
+       SXGBE_STAT(vlan_filter_match),
+       SXGBE_STAT(sa_filter_fail),
+       SXGBE_STAT(da_filter_fail),
+       SXGBE_STAT(hash_filter_pass),
+       SXGBE_STAT(l3_filter_match),
+       SXGBE_STAT(l4_filter_match),
+
+       /* RX context specific */
+       SXGBE_STAT(timestamp_dropped),
+       SXGBE_STAT(rx_msg_type_no_ptp),
+       SXGBE_STAT(rx_ptp_type_sync),
+       SXGBE_STAT(rx_ptp_type_follow_up),
+       SXGBE_STAT(rx_ptp_type_delay_req),
+       SXGBE_STAT(rx_ptp_type_delay_resp),
+       SXGBE_STAT(rx_ptp_type_pdelay_req),
+       SXGBE_STAT(rx_ptp_type_pdelay_resp),
+       SXGBE_STAT(rx_ptp_type_pdelay_follow_up),
+       SXGBE_STAT(rx_ptp_announce),
+       SXGBE_STAT(rx_ptp_mgmt),
+       SXGBE_STAT(rx_ptp_signal),
+       SXGBE_STAT(rx_ptp_resv_msg_type),
+};
+#define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats)
+
+static int sxgbe_get_eee(struct net_device *dev,
+                        struct ethtool_eee *edata)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       if (!priv->hw_cap.eee)
+               return -EOPNOTSUPP;
+
+       edata->eee_enabled = priv->eee_enabled;
+       edata->eee_active = priv->eee_active;
+       edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+       return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int sxgbe_set_eee(struct net_device *dev,
+                        struct ethtool_eee *edata)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       priv->eee_enabled = edata->eee_enabled;
+
+       if (!priv->eee_enabled) {
+               sxgbe_disable_eee_mode(priv);
+       } else {
+               /* We are asking for enabling the EEE but it is safe
+                * to verify all by invoking the eee_init function.
+                * In case of failure it will return an error.
+                */
+               priv->eee_enabled = sxgbe_eee_init(priv);
+               if (!priv->eee_enabled)
+                       return -EOPNOTSUPP;
+
+               /* Do not change tx_lpi_timer in case of failure */
+               priv->tx_lpi_timer = edata->tx_lpi_timer;
+       }
+
+       return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
+static void sxgbe_getdrvinfo(struct net_device *dev,
+                            struct ethtool_drvinfo *info)
+{
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static int sxgbe_getsettings(struct net_device *dev,
+                            struct ethtool_cmd *cmd)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       if (priv->phydev)
+               return phy_ethtool_gset(priv->phydev, cmd);
+
+       return -EOPNOTSUPP;
+}
+
+static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       if (priv->phydev)
+               return phy_ethtool_sset(priv->phydev, cmd);
+
+       return -EOPNOTSUPP;
+}
+
+static u32 sxgbe_getmsglevel(struct net_device *dev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       return priv->msg_enable;
+}
+
+static void sxgbe_setmsglevel(struct net_device *dev, u32 level)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       priv->msg_enable = level;
+}
+
+static void sxgbe_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       int i;
+       u8 *p = data;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < SXGBE_STATS_LEN; i++) {
+                       memcpy(p, sxgbe_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+
+static int sxgbe_get_sset_count(struct net_device *netdev, int sset)
+{
+       int len;
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               len = SXGBE_STATS_LEN;
+               return len;
+       default:
+               return -EINVAL;
+       }
+}
+
+static void sxgbe_get_ethtool_stats(struct net_device *dev,
+                                   struct ethtool_stats *dummy, u64 *data)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       int i;
+       char *p;
+
+       if (priv->eee_enabled) {
+               int val = phy_get_eee_err(priv->phydev);
+
+               if (val)
+                       priv->xstats.eee_wakeup_error_n = val;
+       }
+
+       for (i = 0; i < SXGBE_STATS_LEN; i++) {
+               p = (char *)priv + sxgbe_gstrings_stats[i].stat_offset;
+               data[i] = (sxgbe_gstrings_stats[i].sizeof_stat == sizeof(u64))
+                       ? (*(u64 *)p) : (*(u32 *)p);
+       }
+}
+
+static void sxgbe_get_channels(struct net_device *dev,
+                              struct ethtool_channels *channel)
+{
+       channel->max_rx = SXGBE_MAX_RX_CHANNELS;
+       channel->max_tx = SXGBE_MAX_TX_CHANNELS;
+       channel->rx_count = SXGBE_RX_QUEUES;
+       channel->tx_count = SXGBE_TX_QUEUES;
+}
+
+static u32 sxgbe_riwt2usec(u32 riwt, struct sxgbe_priv_data *priv)
+{
+       unsigned long clk = clk_get_rate(priv->sxgbe_clk);
+
+       if (!clk)
+               return 0;
+
+       return (riwt * 256) / (clk / 1000000);
+}
+
+static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv)
+{
+       unsigned long clk = clk_get_rate(priv->sxgbe_clk);
+
+       if (!clk)
+               return 0;
+
+       return (usec * (clk / 1000000)) / 256;
+}
+
+static int sxgbe_get_coalesce(struct net_device *dev,
+                             struct ethtool_coalesce *ec)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       if (priv->use_riwt)
+               ec->rx_coalesce_usecs = sxgbe_riwt2usec(priv->rx_riwt, priv);
+
+       return 0;
+}
+
+static int sxgbe_set_coalesce(struct net_device *dev,
+                             struct ethtool_coalesce *ec)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       unsigned int rx_riwt;
+
+       if (!ec->rx_coalesce_usecs)
+               return -EINVAL;
+
+       rx_riwt = sxgbe_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+       if ((rx_riwt > SXGBE_MAX_DMA_RIWT) || (rx_riwt < SXGBE_MIN_DMA_RIWT))
+               return -EINVAL;
+       else if (!priv->use_riwt)
+               return -EOPNOTSUPP;
+
+       priv->rx_riwt = rx_riwt;
+       priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+
+       return 0;
+}
+
+static int sxgbe_get_rss_hash_opts(struct sxgbe_priv_data *priv,
+                                  struct ethtool_rxnfc *cmd)
+{
+       cmd->data = 0;
+
+       /* Report default options for RSS on sxgbe */
+       switch (cmd->flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case IPV4_FLOW:
+               cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+               break;
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+               cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+       case SCTP_V6_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case IPV6_FLOW:
+               cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int sxgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+                          u32 *rule_locs)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXFH:
+               ret = sxgbe_get_rss_hash_opts(priv, cmd);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static int sxgbe_set_rss_hash_opt(struct sxgbe_priv_data *priv,
+                                 struct ethtool_rxnfc *cmd)
+{
+       u32 reg_val = 0;
+
+       /* RSS does not support anything other than hashing
+        * to queues on src and dst IPs and ports
+        */
+       if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST |
+                         RXH_L4_B_0_1 | RXH_L4_B_2_3))
+               return -EINVAL;
+
+       switch (cmd->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               if (!(cmd->data & RXH_IP_SRC) ||
+                   !(cmd->data & RXH_IP_DST) ||
+                   !(cmd->data & RXH_L4_B_0_1) ||
+                   !(cmd->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               reg_val = SXGBE_CORE_RSS_CTL_TCP4TE;
+               break;
+       case UDP_V4_FLOW:
+       case UDP_V6_FLOW:
+               if (!(cmd->data & RXH_IP_SRC) ||
+                   !(cmd->data & RXH_IP_DST) ||
+                   !(cmd->data & RXH_L4_B_0_1) ||
+                   !(cmd->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               reg_val = SXGBE_CORE_RSS_CTL_UDP4TE;
+               break;
+       case SCTP_V4_FLOW:
+       case AH_ESP_V4_FLOW:
+       case AH_V4_FLOW:
+       case ESP_V4_FLOW:
+       case AH_ESP_V6_FLOW:
+       case AH_V6_FLOW:
+       case ESP_V6_FLOW:
+       case SCTP_V6_FLOW:
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               if (!(cmd->data & RXH_IP_SRC) ||
+                   !(cmd->data & RXH_IP_DST) ||
+                   (cmd->data & RXH_L4_B_0_1) ||
+                   (cmd->data & RXH_L4_B_2_3))
+                       return -EINVAL;
+               reg_val = SXGBE_CORE_RSS_CTL_IP2TE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Read SXGBE RSS control register and update */
+       reg_val |= readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+       writel(reg_val, priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+       readl(priv->ioaddr + SXGBE_CORE_RSS_CTL_REG);
+
+       return 0;
+}
+
+static int sxgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_SRXFH:
+               ret = sxgbe_set_rss_hash_opt(priv, cmd);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static void sxgbe_get_regs(struct net_device *dev,
+                          struct ethtool_regs *regs, void *space)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       u32 *reg_space = (u32 *)space;
+       int reg_offset;
+       int reg_ix = 0;
+       void __iomem *ioaddr = priv->ioaddr;
+
+       memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+       /* MAC registers */
+       for (reg_offset = START_MAC_REG_OFFSET;
+            reg_offset <= MAX_MAC_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = readl(ioaddr + reg_offset);
+               reg_ix++;
+       }
+
+       /* MTL registers */
+       for (reg_offset = START_MTL_REG_OFFSET;
+            reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = readl(ioaddr + reg_offset);
+               reg_ix++;
+       }
+
+       /* DMA registers */
+       for (reg_offset = START_DMA_REG_OFFSET;
+            reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = readl(ioaddr + reg_offset);
+               reg_ix++;
+       }
+
+       BUG_ON(reg_ix * 4 > REG_SPACE_SIZE);
+}
+
+static int sxgbe_get_regs_len(struct net_device *dev)
+{
+       return REG_SPACE_SIZE;
+}
+
+static const struct ethtool_ops sxgbe_ethtool_ops = {
+       .get_drvinfo = sxgbe_getdrvinfo,
+       .get_settings = sxgbe_getsettings,
+       .set_settings = sxgbe_setsettings,
+       .get_msglevel = sxgbe_getmsglevel,
+       .set_msglevel = sxgbe_setmsglevel,
+       .get_link = ethtool_op_get_link,
+       .get_strings = sxgbe_get_strings,
+       .get_ethtool_stats = sxgbe_get_ethtool_stats,
+       .get_sset_count = sxgbe_get_sset_count,
+       .get_channels = sxgbe_get_channels,
+       .get_coalesce = sxgbe_get_coalesce,
+       .set_coalesce = sxgbe_set_coalesce,
+       .get_rxnfc = sxgbe_get_rxnfc,
+       .set_rxnfc = sxgbe_set_rxnfc,
+       .get_regs = sxgbe_get_regs,
+       .get_regs_len = sxgbe_get_regs_len,
+       .get_eee = sxgbe_get_eee,
+       .set_eee = sxgbe_set_eee,
+};
+
+void sxgbe_set_ethtool_ops(struct net_device *netdev)
+{
+       SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
new file mode 100644 (file)
index 0000000..a72688e
--- /dev/null
@@ -0,0 +1,2317 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_desc.h"
+#include "sxgbe_dma.h"
+#include "sxgbe_mtl.h"
+#include "sxgbe_reg.h"
+
+#define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
+#define JUMBO_LEN      9000
+
+/* Module parameters */
+#define TX_TIMEO       5000
+#define DMA_TX_SIZE    512
+#define DMA_RX_SIZE    1024
+#define TC_DEFAULT     64
+#define DMA_BUFFER_SIZE        BUF_SIZE_2KiB
+/* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
+#define SXGBE_DEFAULT_LPI_TIMER        1000
+
+static int debug = -1;
+static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
+
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+                                     NETIF_MSG_LINK | NETIF_MSG_IFUP |
+                                     NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
+static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
+static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
+
+#define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
+
+#define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+
+/**
+ * sxgbe_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void sxgbe_verify_args(void)
+{
+       if (unlikely(eee_timer < 0))
+               eee_timer = SXGBE_DEFAULT_LPI_TIMER;
+}
+
+static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
+{
+       /* Check and enter in LPI mode */
+       if (!priv->tx_path_in_lpi_mode)
+               priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
+{
+       /* Exit and disable EEE in case of we are are in LPI state. */
+       priv->hw->mac->reset_eee_mode(priv->ioaddr);
+       del_timer_sync(&priv->eee_ctrl_timer);
+       priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * sxgbe_eee_ctrl_timer
+ * @arg : data hook
+ * Description:
+ *  If there is no data transfer and if we are not in LPI state,
+ *  then MAC Transmitter can be moved to LPI state.
+ */
+static void sxgbe_eee_ctrl_timer(unsigned long arg)
+{
+       struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
+
+       sxgbe_enable_eee_mode(priv);
+       mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
+}
+
+/**
+ * sxgbe_eee_init
+ * @priv: private device pointer
+ * Description:
+ *  If the EEE support has been enabled while configuring the driver,
+ *  if the GMAC actually supports the EEE (from the HW cap reg) and the
+ *  phy can also manage EEE, so enable the LPI state and start the timer
+ *  to verify if the tx path can enter in LPI state.
+ */
+bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
+{
+       bool ret = false;
+
+       /* MAC core supports the EEE feature. */
+       if (priv->hw_cap.eee) {
+               /* Check if the PHY supports EEE */
+               if (phy_init_eee(priv->phydev, 1))
+                       return false;
+
+               priv->eee_active = 1;
+               init_timer(&priv->eee_ctrl_timer);
+               priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
+               priv->eee_ctrl_timer.data = (unsigned long)priv;
+               priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
+               add_timer(&priv->eee_ctrl_timer);
+
+               priv->hw->mac->set_eee_timer(priv->ioaddr,
+                                            SXGBE_DEFAULT_LPI_TIMER,
+                                            priv->tx_lpi_timer);
+
+               pr_info("Energy-Efficient Ethernet initialized\n");
+
+               ret = true;
+       }
+
+       return ret;
+}
+
+static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
+{
+       /* When the EEE has been already initialised we have to
+        * modify the PLS bit in the LPI ctrl & status reg according
+        * to the PHY link status. For this reason.
+        */
+       if (priv->eee_enabled)
+               priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
+/**
+ * sxgbe_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ */
+static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
+{
+       u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
+
+       /* assign the proper divider, this will be used during
+        * mdio communication
+        */
+       if (clk_rate < SXGBE_CSR_F_150M)
+               priv->clk_csr = SXGBE_CSR_100_150M;
+       else if (clk_rate <= SXGBE_CSR_F_250M)
+               priv->clk_csr = SXGBE_CSR_150_250M;
+       else if (clk_rate <= SXGBE_CSR_F_300M)
+               priv->clk_csr = SXGBE_CSR_250_300M;
+       else if (clk_rate <= SXGBE_CSR_F_350M)
+               priv->clk_csr = SXGBE_CSR_300_350M;
+       else if (clk_rate <= SXGBE_CSR_F_400M)
+               priv->clk_csr = SXGBE_CSR_350_400M;
+       else if (clk_rate <= SXGBE_CSR_F_500M)
+               priv->clk_csr = SXGBE_CSR_400_500M;
+}
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define SXGBE_TX_THRESH(x)     (x->dma_tx_size/4)
+
+static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
+{
+       return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
+}
+
+/**
+ * sxgbe_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void sxgbe_adjust_link(struct net_device *dev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       struct phy_device *phydev = priv->phydev;
+       u8 new_state = 0;
+       u8 speed = 0xff;
+
+       if (!phydev)
+               return;
+
+       /* SXGBE is not supporting auto-negotiation and
+        * half duplex mode. so, not handling duplex change
+        * in this function. only handling speed and link status
+        */
+       if (phydev->link) {
+               if (phydev->speed != priv->speed) {
+                       new_state = 1;
+                       switch (phydev->speed) {
+                       case SPEED_10000:
+                               speed = SXGBE_SPEED_10G;
+                               break;
+                       case SPEED_2500:
+                               speed = SXGBE_SPEED_2_5G;
+                               break;
+                       case SPEED_1000:
+                               speed = SXGBE_SPEED_1G;
+                               break;
+                       default:
+                               netif_err(priv, link, dev,
+                                         "Speed (%d) not supported\n",
+                                         phydev->speed);
+                       }
+
+                       priv->speed = phydev->speed;
+                       priv->hw->mac->set_speed(priv->ioaddr, speed);
+               }
+
+               if (!priv->oldlink) {
+                       new_state = 1;
+                       priv->oldlink = 1;
+               }
+       } else if (priv->oldlink) {
+               new_state = 1;
+               priv->oldlink = 0;
+               priv->speed = SPEED_UNKNOWN;
+       }
+
+       if (new_state & netif_msg_link(priv))
+               phy_print_status(phydev);
+
+       /* Alter the MAC settings for EEE */
+       sxgbe_eee_adjust(priv);
+}
+
+/**
+ * sxgbe_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ *  Return value:
+ *  0 on success
+ */
+static int sxgbe_init_phy(struct net_device *ndev)
+{
+       char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+       char bus_id[MII_BUS_ID_SIZE];
+       struct phy_device *phydev;
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+       int phy_iface = priv->plat->interface;
+
+       /* assign default link status */
+       priv->oldlink = 0;
+       priv->speed = SPEED_UNKNOWN;
+       priv->oldduplex = DUPLEX_UNKNOWN;
+
+       if (priv->plat->phy_bus_name)
+               snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+                        priv->plat->phy_bus_name, priv->plat->bus_id);
+       else
+               snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
+                        priv->plat->bus_id);
+
+       snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+                priv->plat->phy_addr);
+       netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
+
+       phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
+
+       if (IS_ERR(phydev)) {
+               netdev_err(ndev, "Could not attach to PHY\n");
+               return PTR_ERR(phydev);
+       }
+
+       /* Stop Advertising 1000BASE Capability if interface is not GMII */
+       if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
+           (phy_iface == PHY_INTERFACE_MODE_RMII))
+               phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+                                        SUPPORTED_1000baseT_Full);
+       if (phydev->phy_id == 0) {
+               phy_disconnect(phydev);
+               return -ENODEV;
+       }
+
+       netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
+                  __func__, phydev->phy_id, phydev->link);
+
+       /* save phy device in private structure */
+       priv->phydev = phydev;
+
+       return 0;
+}
+
+/**
+ * sxgbe_clear_descriptors: clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the tx and rx descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
+{
+       int i, j;
+       unsigned int txsize = priv->dma_tx_size;
+       unsigned int rxsize = priv->dma_rx_size;
+
+       /* Clear the Rx/Tx descriptors */
+       for (j = 0; j < SXGBE_RX_QUEUES; j++) {
+               for (i = 0; i < rxsize; i++)
+                       priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
+                                                    priv->use_riwt, priv->mode,
+                                                    (i == rxsize - 1));
+       }
+
+       for (j = 0; j < SXGBE_TX_QUEUES; j++) {
+               for (i = 0; i < txsize; i++)
+                       priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
+       }
+}
+
+static int sxgbe_init_rx_buffers(struct net_device *dev,
+                                struct sxgbe_rx_norm_desc *p, int i,
+                                unsigned int dma_buf_sz,
+                                struct sxgbe_rx_queue *rx_ring)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       struct sk_buff *skb;
+
+       skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       rx_ring->rx_skbuff[i] = skb;
+       rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+                                                  dma_buf_sz, DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
+               netdev_err(dev, "%s: DMA mapping error\n", __func__);
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+
+       p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
+
+       return 0;
+}
+/**
+ * init_tx_ring - init the TX descriptor ring
+ * @dev: net device structure
+ * @tx_ring: ring to be intialised
+ * @tx_rsize: ring size
+ * Description:  this function initializes the DMA TX descriptor
+ */
+static int init_tx_ring(struct device *dev, u8 queue_no,
+                       struct sxgbe_tx_queue *tx_ring, int tx_rsize)
+{
+       /* TX ring is not allcoated */
+       if (!tx_ring) {
+               dev_err(dev, "No memory for TX queue of SXGBE\n");
+               return -ENOMEM;
+       }
+
+       /* allocate memory for TX descriptors */
+       tx_ring->dma_tx = dma_zalloc_coherent(dev,
+                                             tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+                                             &tx_ring->dma_tx_phy, GFP_KERNEL);
+       if (!tx_ring->dma_tx)
+               return -ENOMEM;
+
+       /* allocate memory for TX skbuff array */
+       tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
+                                             sizeof(dma_addr_t), GFP_KERNEL);
+       if (!tx_ring->tx_skbuff_dma)
+               goto dmamem_err;
+
+       tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
+                                         sizeof(struct sk_buff *), GFP_KERNEL);
+
+       if (!tx_ring->tx_skbuff)
+               goto dmamem_err;
+
+       /* assign queue number */
+       tx_ring->queue_no = queue_no;
+
+       /* initalise counters */
+       tx_ring->dirty_tx = 0;
+       tx_ring->cur_tx = 0;
+
+       /* initalise TX queue lock */
+       spin_lock_init(&tx_ring->tx_lock);
+
+       return 0;
+
+dmamem_err:
+       dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+                         tx_ring->dma_tx, tx_ring->dma_tx_phy);
+       return -ENOMEM;
+}
+
+/**
+ * free_rx_ring - free the RX descriptor ring
+ * @dev: net device structure
+ * @rx_ring: ring to be intialised
+ * @rx_rsize: ring size
+ * Description:  this function initializes the DMA RX descriptor
+ */
+void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
+                 int rx_rsize)
+{
+       dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+                         rx_ring->dma_rx, rx_ring->dma_rx_phy);
+       kfree(rx_ring->rx_skbuff_dma);
+       kfree(rx_ring->rx_skbuff);
+}
+
+/**
+ * init_rx_ring - init the RX descriptor ring
+ * @dev: net device structure
+ * @rx_ring: ring to be intialised
+ * @rx_rsize: ring size
+ * Description:  this function initializes the DMA RX descriptor
+ */
+static int init_rx_ring(struct net_device *dev, u8 queue_no,
+                       struct sxgbe_rx_queue *rx_ring, int rx_rsize)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       int desc_index;
+       unsigned int bfsize = 0;
+       unsigned int ret = 0;
+
+       /* Set the max buffer size according to the MTU. */
+       bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
+
+       netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
+
+       /* RX ring is not allcoated */
+       if (rx_ring == NULL) {
+               netdev_err(dev, "No memory for RX queue\n");
+               goto error;
+       }
+
+       /* assign queue number */
+       rx_ring->queue_no = queue_no;
+
+       /* allocate memory for RX descriptors */
+       rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
+                                             rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+                                             &rx_ring->dma_rx_phy, GFP_KERNEL);
+
+       if (rx_ring->dma_rx == NULL)
+               goto error;
+
+       /* allocate memory for RX skbuff array */
+       rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
+                                              sizeof(dma_addr_t), GFP_KERNEL);
+       if (rx_ring->rx_skbuff_dma == NULL)
+               goto dmamem_err;
+
+       rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
+                                          sizeof(struct sk_buff *), GFP_KERNEL);
+       if (rx_ring->rx_skbuff == NULL)
+               goto rxbuff_err;
+
+       /* initialise the buffers */
+       for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
+               struct sxgbe_rx_norm_desc *p;
+               p = rx_ring->dma_rx + desc_index;
+               ret = sxgbe_init_rx_buffers(dev, p, desc_index,
+                                           bfsize, rx_ring);
+               if (ret)
+                       goto err_init_rx_buffers;
+       }
+
+       /* initalise counters */
+       rx_ring->cur_rx = 0;
+       rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
+       priv->dma_buf_sz = bfsize;
+
+       return 0;
+
+err_init_rx_buffers:
+       while (--desc_index >= 0)
+               free_rx_ring(priv->device, rx_ring, desc_index);
+       kfree(rx_ring->rx_skbuff);
+rxbuff_err:
+       kfree(rx_ring->rx_skbuff_dma);
+dmamem_err:
+       dma_free_coherent(priv->device,
+                         rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+                         rx_ring->dma_rx, rx_ring->dma_rx_phy);
+error:
+       return -ENOMEM;
+}
+/**
+ * free_tx_ring - free the TX descriptor ring
+ * @dev: net device structure
+ * @tx_ring: ring to be intialised
+ * @tx_rsize: ring size
+ * Description:  this function initializes the DMA TX descriptor
+ */
+void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
+                 int tx_rsize)
+{
+       dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
+                         tx_ring->dma_tx, tx_ring->dma_tx_phy);
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description:  this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It suppors the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *netd)
+{
+       int queue_num, ret;
+       struct sxgbe_priv_data *priv = netdev_priv(netd);
+       int tx_rsize = priv->dma_tx_size;
+       int rx_rsize = priv->dma_rx_size;
+
+       /* Allocate memory for queue structures and TX descs */
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               ret = init_tx_ring(priv->device, queue_num,
+                                  priv->txq[queue_num], tx_rsize);
+               if (ret) {
+                       dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
+                       goto txalloc_err;
+               }
+
+               /* save private pointer in each ring this
+                * pointer is needed during cleaing TX queue
+                */
+               priv->txq[queue_num]->priv_ptr = priv;
+       }
+
+       /* Allocate memory for queue structures and RX descs */
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               ret = init_rx_ring(netd, queue_num,
+                                  priv->rxq[queue_num], rx_rsize);
+               if (ret) {
+                       netdev_err(netd, "RX DMA ring allocation failed!!\n");
+                       goto rxalloc_err;
+               }
+
+               /* save private pointer in each ring this
+                * pointer is needed during cleaing TX queue
+                */
+               priv->rxq[queue_num]->priv_ptr = priv;
+       }
+
+       sxgbe_clear_descriptors(priv);
+
+       return 0;
+
+txalloc_err:
+       while (queue_num--)
+               free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
+       return ret;
+
+rxalloc_err:
+       while (queue_num--)
+               free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
+       return ret;
+}
+
+static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
+{
+       int dma_desc;
+       struct sxgbe_priv_data *priv = txqueue->priv_ptr;
+       int tx_rsize = priv->dma_tx_size;
+
+       for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
+               struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
+
+               if (txqueue->tx_skbuff_dma[dma_desc])
+                       dma_unmap_single(priv->device,
+                                        txqueue->tx_skbuff_dma[dma_desc],
+                                        priv->hw->desc->get_tx_len(tdesc),
+                                        DMA_TO_DEVICE);
+
+               dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
+               txqueue->tx_skbuff[dma_desc] = NULL;
+               txqueue->tx_skbuff_dma[dma_desc] = 0;
+       }
+}
+
+
+static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
+               tx_free_ring_skbufs(tqueue);
+       }
+}
+
+static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+       int tx_rsize = priv->dma_tx_size;
+       int rx_rsize = priv->dma_rx_size;
+
+       /* Release the DMA TX buffers */
+       dma_free_tx_skbufs(priv);
+
+       /* Release the TX ring memory also */
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
+       }
+
+       /* Release the RX ring memory also */
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
+       }
+}
+
+static int txring_mem_alloc(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               priv->txq[queue_num] = devm_kmalloc(priv->device,
+                                                   sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
+               if (!priv->txq[queue_num])
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               priv->rxq[queue_num] = devm_kmalloc(priv->device,
+                                                   sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
+               if (!priv->rxq[queue_num])
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/**
+ *  sxgbe_mtl_operation_mode - HW MTL operation mode
+ *  @priv: driver private structure
+ *  Description: it sets the MTL operation mode: tx/rx MTL thresholds
+ *  or Store-And-Forward capability.
+ */
+static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+
+       /* TX/RX threshold control */
+       if (likely(priv->plat->force_sf_dma_mode)) {
+               /* set TC mode for TX QUEUES */
+               SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
+                       priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
+                                                      SXGBE_MTL_SFMODE);
+               priv->tx_tc = SXGBE_MTL_SFMODE;
+
+               /* set TC mode for RX QUEUES */
+               SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
+                       priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
+                                                      SXGBE_MTL_SFMODE);
+               priv->rx_tc = SXGBE_MTL_SFMODE;
+       } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
+               /* set TC mode for TX QUEUES */
+               SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
+                       priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
+                                                      priv->tx_tc);
+               /* set TC mode for RX QUEUES */
+               SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
+                       priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
+                                                      priv->rx_tc);
+       } else {
+               pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
+       }
+}
+
+/**
+ * sxgbe_tx_queue_clean:
+ * @priv: driver private structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
+{
+       struct sxgbe_priv_data *priv = tqueue->priv_ptr;
+       unsigned int tx_rsize = priv->dma_tx_size;
+       struct netdev_queue *dev_txq;
+       u8 queue_no = tqueue->queue_no;
+
+       dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
+
+       spin_lock(&tqueue->tx_lock);
+
+       priv->xstats.tx_clean++;
+       while (tqueue->dirty_tx != tqueue->cur_tx) {
+               unsigned int entry = tqueue->dirty_tx % tx_rsize;
+               struct sk_buff *skb = tqueue->tx_skbuff[entry];
+               struct sxgbe_tx_norm_desc *p;
+
+               p = tqueue->dma_tx + entry;
+
+               /* Check if the descriptor is owned by the DMA. */
+               if (priv->hw->desc->get_tx_owner(p))
+                       break;
+
+               if (netif_msg_tx_done(priv))
+                       pr_debug("%s: curr %d, dirty %d\n",
+                                __func__, tqueue->cur_tx, tqueue->dirty_tx);
+
+               if (likely(tqueue->tx_skbuff_dma[entry])) {
+                       dma_unmap_single(priv->device,
+                                        tqueue->tx_skbuff_dma[entry],
+                                        priv->hw->desc->get_tx_len(p),
+                                        DMA_TO_DEVICE);
+                       tqueue->tx_skbuff_dma[entry] = 0;
+               }
+
+               if (likely(skb)) {
+                       dev_kfree_skb(skb);
+                       tqueue->tx_skbuff[entry] = NULL;
+               }
+
+               priv->hw->desc->release_tx_desc(p);
+
+               tqueue->dirty_tx++;
+       }
+
+       /* wake up queue */
+       if (unlikely(netif_tx_queue_stopped(dev_txq) &&
+                    sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
+               netif_tx_lock(priv->dev);
+               if (netif_tx_queue_stopped(dev_txq) &&
+                   sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
+                       if (netif_msg_tx_done(priv))
+                               pr_debug("%s: restart transmit\n", __func__);
+                       netif_tx_wake_queue(dev_txq);
+               }
+               netif_tx_unlock(priv->dev);
+       }
+
+       spin_unlock(&tqueue->tx_lock);
+}
+
+/**
+ * sxgbe_tx_clean:
+ * @priv: driver private structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
+{
+       u8 queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
+
+               sxgbe_tx_queue_clean(tqueue);
+       }
+
+       if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+               sxgbe_enable_eee_mode(priv);
+               mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
+       }
+}
+
+/**
+ * sxgbe_restart_tx_queue: irq tx error mng function
+ * @priv: driver private structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
+{
+       struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
+       struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
+                                                          queue_num);
+
+       /* stop the queue */
+       netif_tx_stop_queue(dev_txq);
+
+       /* stop the tx dma */
+       priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
+
+       /* free the skbuffs of the ring */
+       tx_free_ring_skbufs(tx_ring);
+
+       /* initalise counters */
+       tx_ring->cur_tx = 0;
+       tx_ring->dirty_tx = 0;
+
+       /* start the tx dma */
+       priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
+
+       priv->dev->stats.tx_errors++;
+
+       /* wakeup the queue */
+       netif_tx_wake_queue(dev_txq);
+}
+
+/**
+ * sxgbe_reset_all_tx_queues: irq tx error mng function
+ * @priv: driver private structure
+ * Description: it cleans all the descriptors and
+ * restarts the transmission on all queues in case of errors.
+ */
+static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+
+       /* On TX timeout of net device, resetting of all queues
+        * may not be proper way, revisit this later if needed
+        */
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+               sxgbe_restart_tx_queue(priv, queue_num);
+}
+
+/**
+ * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
+ * @priv: driver private structure
+ * Description:
+ *  new GMAC chip generations have a new register to indicate the
+ *  presence of the optional feature/functions.
+ *  This can be also used to override the value passed through the
+ *  platform and necessary for old MAC10/100 and GMAC chips.
+ */
+static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
+{
+       int rval = 0;
+       struct sxgbe_hw_features *features = &priv->hw_cap;
+
+       /* Read First Capability Register CAP[0] */
+       rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
+       if (rval) {
+               features->pmt_remote_wake_up =
+                       SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
+               features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
+               features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
+               features->tx_csum_offload =
+                       SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
+               features->rx_csum_offload =
+                       SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
+               features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
+               features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
+               features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
+               features->eee = SXGBE_HW_FEAT_EEE(rval);
+       }
+
+       /* Read First Capability Register CAP[1] */
+       rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
+       if (rval) {
+               features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
+               features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
+               features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
+               features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
+               features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
+               features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
+               features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
+               features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
+               features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
+               features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
+       }
+
+       /* Read First Capability Register CAP[2] */
+       rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
+       if (rval) {
+               features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
+               features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
+               features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
+               features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
+               features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
+               features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
+       }
+
+       return rval;
+}
+
+/**
+ * sxgbe_check_ether_addr: check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
+static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
+{
+       if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+               priv->hw->mac->get_umac_addr((void __iomem *)
+                                            priv->ioaddr,
+                                            priv->dev->dev_addr, 0);
+               if (!is_valid_ether_addr(priv->dev->dev_addr))
+                       eth_hw_addr_random(priv->dev);
+       }
+       dev_info(priv->device, "device MAC address %pM\n",
+                priv->dev->dev_addr);
+}
+
+/**
+ * sxgbe_init_dma_engine: DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific SXGBE callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
+static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
+{
+       int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
+       int queue_num;
+
+       if (priv->plat->dma_cfg) {
+               pbl = priv->plat->dma_cfg->pbl;
+               fixed_burst = priv->plat->dma_cfg->fixed_burst;
+               burst_map = priv->plat->dma_cfg->burst_map;
+       }
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+               priv->hw->dma->cha_init(priv->ioaddr, queue_num,
+                                       fixed_burst, pbl,
+                                       (priv->txq[queue_num])->dma_tx_phy,
+                                       (priv->rxq[queue_num])->dma_rx_phy,
+                                       priv->dma_tx_size, priv->dma_rx_size);
+
+       return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
+}
+
+/**
+ * sxgbe_init_mtl_engine: MTL init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the MTL invoking the specific SXGBE callback.
+ */
+static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
+                                                 priv->hw_cap.tx_mtl_qsize);
+               priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
+       }
+}
+
+/**
+ * sxgbe_disable_mtl_engine: MTL disable.
+ * @priv: driver private structure
+ * Description:
+ * It disables the MTL queues by invoking the specific SXGBE callback.
+ */
+static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
+{
+       int queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
+               priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
+}
+
+
+/**
+ * sxgbe_tx_timer: mitigation sw timer for tx.
+ * @data: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the sxgbe_tx_clean.
+ */
+static void sxgbe_tx_timer(unsigned long data)
+{
+       struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
+       sxgbe_tx_queue_clean(p);
+}
+
+/**
+ * sxgbe_init_tx_coalesce: init tx mitigation options.
+ * @priv: driver private structure
+ * Description:
+ * This inits the transmit coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
+{
+       u8 queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               struct sxgbe_tx_queue *p = priv->txq[queue_num];
+               p->tx_coal_frames =  SXGBE_TX_FRAMES;
+               p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
+               init_timer(&p->txtimer);
+               p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
+               p->txtimer.data = (unsigned long)&priv->txq[queue_num];
+               p->txtimer.function = sxgbe_tx_timer;
+               add_timer(&p->txtimer);
+       }
+}
+
+static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
+{
+       u8 queue_num;
+
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               struct sxgbe_tx_queue *p = priv->txq[queue_num];
+               del_timer_sync(&p->txtimer);
+       }
+}
+
+/**
+ *  sxgbe_open - open entry point of the driver
+ *  @dev : pointer to the device structure.
+ *  Description:
+ *  This function is the open entry point of the driver.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int sxgbe_open(struct net_device *dev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       int ret, queue_num;
+
+       clk_prepare_enable(priv->sxgbe_clk);
+
+       sxgbe_check_ether_addr(priv);
+
+       /* Init the phy */
+       ret = sxgbe_init_phy(dev);
+       if (ret) {
+               netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
+                          __func__, ret);
+               goto phy_error;
+       }
+
+       /* Create and initialize the TX/RX descriptors chains. */
+       priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
+       priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
+       priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
+       priv->tx_tc = TC_DEFAULT;
+       priv->rx_tc = TC_DEFAULT;
+       init_dma_desc_rings(dev);
+
+       /* DMA initialization and SW reset */
+       ret = sxgbe_init_dma_engine(priv);
+       if (ret < 0) {
+               netdev_err(dev, "%s: DMA initialization failed\n", __func__);
+               goto init_error;
+       }
+
+       /*  MTL initialization */
+       sxgbe_init_mtl_engine(priv);
+
+       /* Copy the MAC addr into the HW  */
+       priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
+       /* Initialize the MAC Core */
+       priv->hw->mac->core_init(priv->ioaddr);
+
+       /* Request the IRQ lines */
+       ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
+                              IRQF_SHARED, dev->name, dev);
+       if (unlikely(ret < 0)) {
+               netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+                          __func__, priv->irq, ret);
+               goto init_error;
+       }
+
+       /* If the LPI irq is different from the mac irq
+        * register a dedicated handler
+        */
+       if (priv->lpi_irq != dev->irq) {
+               ret = devm_request_irq(priv->device, priv->lpi_irq,
+                                      sxgbe_common_interrupt,
+                                      IRQF_SHARED, dev->name, dev);
+               if (unlikely(ret < 0)) {
+                       netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+                                  __func__, priv->lpi_irq, ret);
+                       goto init_error;
+               }
+       }
+
+       /* Request TX DMA irq lines */
+       SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+               ret = devm_request_irq(priv->device,
+                                      (priv->txq[queue_num])->irq_no,
+                                      sxgbe_tx_interrupt, 0,
+                                      dev->name, priv->txq[queue_num]);
+               if (unlikely(ret < 0)) {
+                       netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
+                                  __func__, priv->irq, ret);
+                       goto init_error;
+               }
+       }
+
+       /* Request RX DMA irq lines */
+       SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
+               ret = devm_request_irq(priv->device,
+                                      (priv->rxq[queue_num])->irq_no,
+                                      sxgbe_rx_interrupt, 0,
+                                      dev->name, priv->rxq[queue_num]);
+               if (unlikely(ret < 0)) {
+                       netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
+                                  __func__, priv->irq, ret);
+                       goto init_error;
+               }
+       }
+
+       /* Enable the MAC Rx/Tx */
+       priv->hw->mac->enable_tx(priv->ioaddr, true);
+       priv->hw->mac->enable_rx(priv->ioaddr, true);
+
+       /* Set the HW DMA mode and the COE */
+       sxgbe_mtl_operation_mode(priv);
+
+       /* Extra statistics */
+       memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
+
+       priv->xstats.tx_threshold = priv->tx_tc;
+       priv->xstats.rx_threshold = priv->rx_tc;
+
+       /* Start the ball rolling... */
+       netdev_dbg(dev, "DMA RX/TX processes started...\n");
+       priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+       priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+
+       if (priv->phydev)
+               phy_start(priv->phydev);
+
+       /* initalise TX coalesce parameters */
+       sxgbe_tx_init_coalesce(priv);
+
+       if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+               priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
+               priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
+       }
+
+       priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
+       priv->eee_enabled = sxgbe_eee_init(priv);
+
+       napi_enable(&priv->napi);
+       netif_start_queue(dev);
+
+       return 0;
+
+init_error:
+       free_dma_desc_resources(priv);
+       if (priv->phydev)
+               phy_disconnect(priv->phydev);
+phy_error:
+       clk_disable_unprepare(priv->sxgbe_clk);
+
+       return ret;
+}
+
+/**
+ *  sxgbe_release - close entry point of the driver
+ *  @dev : device pointer.
+ *  Description:
+ *  This is the stop entry point of the driver.
+ */
+static int sxgbe_release(struct net_device *dev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       if (priv->eee_enabled)
+               del_timer_sync(&priv->eee_ctrl_timer);
+
+       /* Stop and disconnect the PHY */
+       if (priv->phydev) {
+               phy_stop(priv->phydev);
+               phy_disconnect(priv->phydev);
+               priv->phydev = NULL;
+       }
+
+       netif_tx_stop_all_queues(dev);
+
+       napi_disable(&priv->napi);
+
+       /* delete TX timers */
+       sxgbe_tx_del_timer(priv);
+
+       /* Stop TX/RX DMA and clear the descriptors */
+       priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+       priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+
+       /* disable MTL queue */
+       sxgbe_disable_mtl_engine(priv);
+
+       /* Release and free the Rx/Tx resources */
+       free_dma_desc_resources(priv);
+
+       /* Disable the MAC Rx/Tx */
+       priv->hw->mac->enable_tx(priv->ioaddr, false);
+       priv->hw->mac->enable_rx(priv->ioaddr, false);
+
+       clk_disable_unprepare(priv->sxgbe_clk);
+
+       return 0;
+}
+
+/* Prepare first Tx descriptor for doing TSO operation */
+void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
+                      struct sxgbe_tx_norm_desc *first_desc,
+                      struct sk_buff *skb)
+{
+       unsigned int total_hdr_len, tcp_hdr_len;
+
+       /* Write first Tx descriptor with appropriate value */
+       tcp_hdr_len = tcp_hdrlen(skb);
+       total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
+
+       first_desc->tdes01 = dma_map_single(priv->device, skb->data,
+                                           total_hdr_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(priv->device, first_desc->tdes01))
+               pr_err("%s: TX dma mapping failed!!\n", __func__);
+
+       first_desc->tdes23.tx_rd_des23.first_desc = 1;
+       priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
+                                          tcp_hdr_len,
+                                          skb->len - total_hdr_len);
+}
+
+/**
+ *  sxgbe_xmit: Tx entry point of the driver
+ *  @skb : the socket buffer
+ *  @dev : device pointer
+ *  Description : this is the tx entry point of the driver.
+ *  It programs the chain or the ring and supports oversized frames
+ *  and SG feature.
+ */
+static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       unsigned int entry, frag_num;
+       int cksum_flag = 0;
+       struct netdev_queue *dev_txq;
+       unsigned txq_index = skb_get_queue_mapping(skb);
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       unsigned int tx_rsize = priv->dma_tx_size;
+       struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
+       struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
+       struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       int no_pagedlen = skb_headlen(skb);
+       int is_jumbo = 0;
+       u16 cur_mss = skb_shinfo(skb)->gso_size;
+       u32 ctxt_desc_req = 0;
+
+       /* get the TX queue handle */
+       dev_txq = netdev_get_tx_queue(dev, txq_index);
+
+       if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
+               ctxt_desc_req = 1;
+
+       if (unlikely(vlan_tx_tag_present(skb) ||
+                    ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                     tqueue->hwts_tx_en)))
+               ctxt_desc_req = 1;
+
+       /* get the spinlock */
+       spin_lock(&tqueue->tx_lock);
+
+       if (priv->tx_path_in_lpi_mode)
+               sxgbe_disable_eee_mode(priv);
+
+       if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
+               if (!netif_tx_queue_stopped(dev_txq)) {
+                       netif_tx_stop_queue(dev_txq);
+                       netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
+                                  __func__, txq_index);
+               }
+               /* release the spin lock in case of BUSY */
+               spin_unlock(&tqueue->tx_lock);
+               return NETDEV_TX_BUSY;
+       }
+
+       entry = tqueue->cur_tx % tx_rsize;
+       tx_desc = tqueue->dma_tx + entry;
+
+       first_desc = tx_desc;
+       if (ctxt_desc_req)
+               ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
+
+       /* save the skb address */
+       tqueue->tx_skbuff[entry] = skb;
+
+       if (!is_jumbo) {
+               if (likely(skb_is_gso(skb))) {
+                       /* TSO support */
+                       if (unlikely(tqueue->prev_mss != cur_mss)) {
+                               priv->hw->desc->tx_ctxt_desc_set_mss(
+                                               ctxt_desc, cur_mss);
+                               priv->hw->desc->tx_ctxt_desc_set_tcmssv(
+                                               ctxt_desc);
+                               priv->hw->desc->tx_ctxt_desc_reset_ostc(
+                                               ctxt_desc);
+                               priv->hw->desc->tx_ctxt_desc_set_ctxt(
+                                               ctxt_desc);
+                               priv->hw->desc->tx_ctxt_desc_set_owner(
+                                               ctxt_desc);
+
+                               entry = (++tqueue->cur_tx) % tx_rsize;
+                               first_desc = tqueue->dma_tx + entry;
+
+                               tqueue->prev_mss = cur_mss;
+                       }
+                       sxgbe_tso_prepare(priv, first_desc, skb);
+               } else {
+                       tx_desc->tdes01 = dma_map_single(priv->device,
+                                                        skb->data, no_pagedlen, DMA_TO_DEVICE);
+                       if (dma_mapping_error(priv->device, tx_desc->tdes01))
+                               netdev_err(dev, "%s: TX dma mapping failed!!\n",
+                                          __func__);
+
+                       priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
+                                                       no_pagedlen, cksum_flag);
+               }
+       }
+
+       for (frag_num = 0; frag_num < nr_frags; frag_num++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
+               int len = skb_frag_size(frag);
+
+               entry = (++tqueue->cur_tx) % tx_rsize;
+               tx_desc = tqueue->dma_tx + entry;
+               tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
+                                                  DMA_TO_DEVICE);
+
+               tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
+               tqueue->tx_skbuff[entry] = NULL;
+
+               /* prepare the descriptor */
+               priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
+                                               len, cksum_flag);
+               /* memory barrier to flush descriptor */
+               wmb();
+
+               /* set the owner */
+               priv->hw->desc->set_tx_owner(tx_desc);
+       }
+
+       /* close the descriptors */
+       priv->hw->desc->close_tx_desc(tx_desc);
+
+       /* memory barrier to flush descriptor */
+       wmb();
+
+       tqueue->tx_count_frames += nr_frags + 1;
+       if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
+               priv->hw->desc->clear_tx_ic(tx_desc);
+               priv->xstats.tx_reset_ic_bit++;
+               mod_timer(&tqueue->txtimer,
+                         SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
+       } else {
+               tqueue->tx_count_frames = 0;
+       }
+
+       /* set owner for first desc */
+       priv->hw->desc->set_tx_owner(first_desc);
+
+       /* memory barrier to flush descriptor */
+       wmb();
+
+       tqueue->cur_tx++;
+
+       /* display current ring */
+       netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
+                 __func__, tqueue->cur_tx % tx_rsize,
+                 tqueue->dirty_tx % tx_rsize, entry,
+                 first_desc, nr_frags);
+
+       if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
+               netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
+                         __func__);
+               netif_tx_stop_queue(dev_txq);
+       }
+
+       dev->stats.tx_bytes += skb->len;
+
+       if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+                    tqueue->hwts_tx_en)) {
+               /* declare that device is doing timestamping */
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               priv->hw->desc->tx_enable_tstamp(first_desc);
+       }
+
+       if (!tqueue->hwts_tx_en)
+               skb_tx_timestamp(skb);
+
+       priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
+
+       spin_unlock(&tqueue->tx_lock);
+
+       return NETDEV_TX_OK;
+}
+
+/**
+ * sxgbe_rx_refill: refill used skb preallocated buffers
+ * @priv: driver private structure
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
+static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
+{
+       unsigned int rxsize = priv->dma_rx_size;
+       int bfsize = priv->dma_buf_sz;
+       u8 qnum = priv->cur_rx_qnum;
+
+       for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
+            priv->rxq[qnum]->dirty_rx++) {
+               unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
+               struct sxgbe_rx_norm_desc *p;
+
+               p = priv->rxq[qnum]->dma_rx + entry;
+
+               if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
+                       struct sk_buff *skb;
+
+                       skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
+
+                       if (unlikely(skb == NULL))
+                               break;
+
+                       priv->rxq[qnum]->rx_skbuff[entry] = skb;
+                       priv->rxq[qnum]->rx_skbuff_dma[entry] =
+                               dma_map_single(priv->device, skb->data, bfsize,
+                                              DMA_FROM_DEVICE);
+
+                       p->rdes23.rx_rd_des23.buf2_addr =
+                               priv->rxq[qnum]->rx_skbuff_dma[entry];
+               }
+
+               /* Added memory barrier for RX descriptor modification */
+               wmb();
+               priv->hw->desc->set_rx_owner(p);
+               /* Added memory barrier for RX descriptor modification */
+               wmb();
+       }
+}
+
+/**
+ * sxgbe_rx: receive the frames from the remote host
+ * @priv: driver private structure
+ * @limit: napi bugget.
+ * Description :  this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
+static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
+{
+       u8 qnum = priv->cur_rx_qnum;
+       unsigned int rxsize = priv->dma_rx_size;
+       unsigned int entry = priv->rxq[qnum]->cur_rx;
+       unsigned int next_entry = 0;
+       unsigned int count = 0;
+       int checksum;
+       int status;
+
+       while (count < limit) {
+               struct sxgbe_rx_norm_desc *p;
+               struct sk_buff *skb;
+               int frame_len;
+
+               p = priv->rxq[qnum]->dma_rx + entry;
+
+               if (priv->hw->desc->get_rx_owner(p))
+                       break;
+
+               count++;
+
+               next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
+               prefetch(priv->rxq[qnum]->dma_rx + next_entry);
+
+               /* Read the status of the incoming frame and also get checksum
+                * value based on whether it is enabled in SXGBE hardware or
+                * not.
+                */
+               status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
+                                                    &checksum);
+               if (unlikely(status < 0)) {
+                       entry = next_entry;
+                       continue;
+               }
+               if (unlikely(!priv->rxcsum_insertion))
+                       checksum = CHECKSUM_NONE;
+
+               skb = priv->rxq[qnum]->rx_skbuff[entry];
+
+               if (unlikely(!skb))
+                       netdev_err(priv->dev, "rx descriptor is not consistent\n");
+
+               prefetch(skb->data - NET_IP_ALIGN);
+               priv->rxq[qnum]->rx_skbuff[entry] = NULL;
+
+               frame_len = priv->hw->desc->get_rx_frame_len(p);
+
+               skb_put(skb, frame_len);
+
+               skb->ip_summed = checksum;
+               if (checksum == CHECKSUM_NONE)
+                       netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&priv->napi, skb);
+
+               entry = next_entry;
+       }
+
+       sxgbe_rx_refill(priv);
+
+       return count;
+}
+
+/**
+ *  sxgbe_poll - sxgbe poll method (NAPI)
+ *  @napi : pointer to the napi structure.
+ *  @budget : maximum number of packets that the current CPU can receive from
+ *           all interfaces.
+ *  Description :
+ *  To look at the incoming frames and clear the tx resources.
+ */
+static int sxgbe_poll(struct napi_struct *napi, int budget)
+{
+       struct sxgbe_priv_data *priv = container_of(napi,
+                                                   struct sxgbe_priv_data, napi);
+       int work_done = 0;
+       u8 qnum = priv->cur_rx_qnum;
+
+       priv->xstats.napi_poll++;
+       /* first, clean the tx queues */
+       sxgbe_tx_all_clean(priv);
+
+       work_done = sxgbe_rx(priv, budget);
+       if (work_done < budget) {
+               napi_complete(napi);
+               priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
+       }
+
+       return work_done;
+}
+
+/**
+ *  sxgbe_tx_timeout
+ *  @dev : Pointer to net device structure
+ *  Description: this function is called when a packet transmission fails to
+ *   complete within a reasonable time. The driver will mark the error in the
+ *   netdev structure and arrange for the device to be reset to a sane state
+ *   in order to transmit a new packet.
+ */
+static void sxgbe_tx_timeout(struct net_device *dev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       sxgbe_reset_all_tx_queues(priv);
+}
+
+/**
+ *  sxgbe_common_interrupt - main ISR
+ *  @irq: interrupt number.
+ *  @dev_id: to pass the net device pointer.
+ *  Description: this is the main driver interrupt service routine.
+ *  It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
+ *  interrupts.
+ */
+static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
+{
+       struct net_device *netdev = (struct net_device *)dev_id;
+       struct sxgbe_priv_data *priv = netdev_priv(netdev);
+       int status;
+
+       status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
+       /* For LPI we need to save the tx status */
+       if (status & TX_ENTRY_LPI_MODE) {
+               priv->xstats.tx_lpi_entry_n++;
+               priv->tx_path_in_lpi_mode = true;
+       }
+       if (status & TX_EXIT_LPI_MODE) {
+               priv->xstats.tx_lpi_exit_n++;
+               priv->tx_path_in_lpi_mode = false;
+       }
+       if (status & RX_ENTRY_LPI_MODE)
+               priv->xstats.rx_lpi_entry_n++;
+       if (status & RX_EXIT_LPI_MODE)
+               priv->xstats.rx_lpi_exit_n++;
+
+       return IRQ_HANDLED;
+}
+
+/**
+ *  sxgbe_tx_interrupt - TX DMA ISR
+ *  @irq: interrupt number.
+ *  @dev_id: to pass the net device pointer.
+ *  Description: this is the tx dma interrupt service routine.
+ */
+static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
+{
+       int status;
+       struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
+       struct sxgbe_priv_data *priv = txq->priv_ptr;
+
+       /* get the channel status */
+       status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
+                                                 &priv->xstats);
+       /* check for normal path */
+       if (likely((status & handle_tx)))
+               napi_schedule(&priv->napi);
+
+       /* check for unrecoverable error */
+       if (unlikely((status & tx_hard_error)))
+               sxgbe_restart_tx_queue(priv, txq->queue_no);
+
+       /* check for TC configuration change */
+       if (unlikely((status & tx_bump_tc) &&
+                    (priv->tx_tc != SXGBE_MTL_SFMODE) &&
+                    (priv->tx_tc < 512))) {
+               /* step of TX TC is 32 till 128, otherwise 64 */
+               priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
+               priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
+                                              txq->queue_no, priv->tx_tc);
+               priv->xstats.tx_threshold = priv->tx_tc;
+       }
+
+       return IRQ_HANDLED;
+}
+
+/**
+ *  sxgbe_rx_interrupt - RX DMA ISR
+ *  @irq: interrupt number.
+ *  @dev_id: to pass the net device pointer.
+ *  Description: this is the rx dma interrupt service routine.
+ */
+static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
+{
+       int status;
+       struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
+       struct sxgbe_priv_data *priv = rxq->priv_ptr;
+
+       /* get the channel status */
+       status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
+                                                 &priv->xstats);
+
+       if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
+               priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
+               __napi_schedule(&priv->napi);
+       }
+
+       /* check for TC configuration change */
+       if (unlikely((status & rx_bump_tc) &&
+                    (priv->rx_tc != SXGBE_MTL_SFMODE) &&
+                    (priv->rx_tc < 128))) {
+               /* step of TC is 32 */
+               priv->rx_tc += 32;
+               priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
+                                              rxq->queue_no, priv->rx_tc);
+               priv->xstats.rx_threshold = priv->rx_tc;
+       }
+
+       return IRQ_HANDLED;
+}
+
+static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
+{
+       u64 val = readl(ioaddr + reg_lo);
+
+       val |= ((u64)readl(ioaddr + reg_hi)) << 32;
+
+       return val;
+}
+
+
+/*  sxgbe_get_stats64 - entry point to see statistical information of device
+ *  @dev : device pointer.
+ *  @stats : pointer to hold all the statistical information of device.
+ *  Description:
+ *  This function is a driver entry point whenever ifconfig command gets
+ *  executed to see device statistics. Statistics are number of
+ *  bytes sent or received, errors occured etc.
+ *  Return value:
+ *  This function returns various statistical information of device.
+ */
+static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
+                                                  struct rtnl_link_stats64 *stats)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       void __iomem *ioaddr = priv->ioaddr;
+       u64 count;
+
+       spin_lock(&priv->stats_lock);
+       /* Freeze the counter registers before reading value otherwise it may
+        * get updated by hardware while we are reading them
+        */
+       writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
+
+       stats->rx_bytes = sxgbe_get_stat64(ioaddr,
+                                          SXGBE_MMC_RXOCTETLO_GCNT_REG,
+                                          SXGBE_MMC_RXOCTETHI_GCNT_REG);
+
+       stats->rx_packets = sxgbe_get_stat64(ioaddr,
+                                            SXGBE_MMC_RXFRAMELO_GBCNT_REG,
+                                            SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
+
+       stats->multicast = sxgbe_get_stat64(ioaddr,
+                                           SXGBE_MMC_RXMULTILO_GCNT_REG,
+                                           SXGBE_MMC_RXMULTIHI_GCNT_REG);
+
+       stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
+                                               SXGBE_MMC_RXCRCERRLO_REG,
+                                               SXGBE_MMC_RXCRCERRHI_REG);
+
+       stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
+                                                 SXGBE_MMC_RXLENERRLO_REG,
+                                                 SXGBE_MMC_RXLENERRHI_REG);
+
+       stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
+                                                  SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
+                                                  SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
+
+       stats->tx_bytes = sxgbe_get_stat64(ioaddr,
+                                          SXGBE_MMC_TXOCTETLO_GCNT_REG,
+                                          SXGBE_MMC_TXOCTETHI_GCNT_REG);
+
+       count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
+                                SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
+
+       stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
+                                           SXGBE_MMC_TXFRAMEHI_GCNT_REG);
+       stats->tx_errors = count - stats->tx_errors;
+       stats->tx_packets = count;
+       stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
+                                                SXGBE_MMC_TXUFLWHI_GBCNT_REG);
+       writel(0, ioaddr + SXGBE_MMC_CTL_REG);
+       spin_unlock(&priv->stats_lock);
+
+       return stats;
+}
+
+/*  sxgbe_set_features - entry point to set offload features of the device.
+ *  @dev : device pointer.
+ *  @features : features which are required to be set.
+ *  Description:
+ *  This function is a driver entry point and called by Linux kernel whenever
+ *  any device features are set or reset by user.
+ *  Return value:
+ *  This function returns 0 after setting or resetting device features.
+ */
+static int sxgbe_set_features(struct net_device *dev,
+                             netdev_features_t features)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       netdev_features_t changed = dev->features ^ features;
+
+       if (changed & NETIF_F_RXCSUM) {
+               if (features & NETIF_F_RXCSUM) {
+                       priv->hw->mac->enable_rx_csum(priv->ioaddr);
+                       priv->rxcsum_insertion = true;
+               } else {
+                       priv->hw->mac->disable_rx_csum(priv->ioaddr);
+                       priv->rxcsum_insertion = false;
+               }
+       }
+
+       return 0;
+}
+
+/*  sxgbe_change_mtu - entry point to change MTU size for the device.
+ *  @dev : device pointer.
+ *  @new_mtu : the new MTU size for the device.
+ *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ *  to drive packet transmission. Ethernet has an MTU of 1500 octets
+ *  (ETH_DATA_LEN). This value can be changed with ifconfig.
+ *  Return value:
+ *  0 on success and an appropriate (-)ve integer as defined in errno.h
+ *  file on failure.
+ */
+static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
+{
+       /* RFC 791, page 25, "Every internet module must be able to forward
+        * a datagram of 68 octets without further fragmentation."
+        */
+       if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
+               netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
+                          MIN_MTU, MAX_MTU);
+               return -EINVAL;
+       }
+
+       /* Return if the buffer sizes will not change */
+       if (dev->mtu == new_mtu)
+               return 0;
+
+       dev->mtu = new_mtu;
+
+       if (!netif_running(dev))
+               return 0;
+
+       /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
+        * changed then reinitilisation of the receive ring buffers need to be
+        * done. Hence bring interface down and bring interface back up
+        */
+       sxgbe_release(dev);
+       return sxgbe_open(dev);
+}
+
+static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+                               unsigned int reg_n)
+{
+       unsigned long data;
+
+       data = (addr[5] << 8) | addr[4];
+       /* For MAC Addr registers se have to set the Address Enable (AE)
+        * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+        * is RO.
+        */
+       writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
+       data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+       writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
+}
+
+/**
+ * sxgbe_set_rx_mode - entry point for setting different receive mode of
+ * a device. unicast, multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever different receive mode like unicast, multicast and promiscuous
+ * must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void sxgbe_set_rx_mode(struct net_device *dev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
+       unsigned int value = 0;
+       u32 mc_filter[2];
+       struct netdev_hw_addr *ha;
+       int reg = 1;
+
+       netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
+                  __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+       if (dev->flags & IFF_PROMISC) {
+               value = SXGBE_FRAME_FILTER_PR;
+
+       } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
+                  (dev->flags & IFF_ALLMULTI)) {
+               value = SXGBE_FRAME_FILTER_PM;  /* pass all multi */
+               writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
+               writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
+
+       } else if (!netdev_mc_empty(dev)) {
+               /* Hash filter for multicast */
+               value = SXGBE_FRAME_FILTER_HMC;
+
+               memset(mc_filter, 0, sizeof(mc_filter));
+               netdev_for_each_mc_addr(ha, dev) {
+                       /* The upper 6 bits of the calculated CRC are used to
+                        * index the contens of the hash table
+                        */
+                       int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+
+                       /* The most significant bit determines the register to
+                        * use (H/L) while the other 5 bits determine the bit
+                        * within the register.
+                        */
+                       mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+               }
+               writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
+               writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
+       }
+
+       /* Handle multiple unicast addresses (perfect filtering) */
+       if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
+               /* Switch to promiscuous mode if more than 16 addrs
+                * are required
+                */
+               value |= SXGBE_FRAME_FILTER_PR;
+       else {
+               netdev_for_each_uc_addr(ha, dev) {
+                       sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
+                       reg++;
+               }
+       }
+#ifdef FRAME_FILTER_DEBUG
+       /* Enable Receive all mode (to debug filtering_fail errors) */
+       value |= SXGBE_FRAME_FILTER_RA;
+#endif
+       writel(value, ioaddr + SXGBE_FRAME_FILTER);
+
+       netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
+                  readl(ioaddr + SXGBE_FRAME_FILTER),
+                  readl(ioaddr + SXGBE_HASH_HIGH),
+                  readl(ioaddr + SXGBE_HASH_LOW));
+}
+
+/**
+ * sxgbe_config - entry point for changing configuration mode passed on by
+ * ifconfig
+ * @dev : pointer to the device structure
+ * @map : pointer to the device mapping structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever some device configuration is changed.
+ * Return value:
+ * This function returns 0 if success and appropriate error otherwise.
+ */
+static int sxgbe_config(struct net_device *dev, struct ifmap *map)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       /* Can't act on a running interface */
+       if (dev->flags & IFF_UP)
+               return -EBUSY;
+
+       /* Don't allow changing the I/O address */
+       if (map->base_addr != (unsigned long)priv->ioaddr) {
+               netdev_warn(dev, "can't change I/O address\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* Don't allow changing the IRQ */
+       if (map->irq != priv->irq) {
+               netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * sxgbe_poll_controller - entry point for polling receive by device
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled.
+ * Return value:
+ * Void.
+ */
+static void sxgbe_poll_controller(struct net_device *dev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+
+       disable_irq(priv->irq);
+       sxgbe_rx_interrupt(priv->irq, dev);
+       enable_irq(priv->irq);
+}
+#endif
+
+/*  sxgbe_ioctl - Entry point for the Ioctl
+ *  @dev: Device pointer.
+ *  @rq: An IOCTL specefic structure, that can contain a pointer to
+ *  a proprietary structure used to pass information to the driver.
+ *  @cmd: IOCTL command
+ *  Description:
+ *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
+ */
+static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(dev);
+       int ret = -EOPNOTSUPP;
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+       switch (cmd) {
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               if (!priv->phydev)
+                       return -EINVAL;
+               ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static const struct net_device_ops sxgbe_netdev_ops = {
+       .ndo_open               = sxgbe_open,
+       .ndo_start_xmit         = sxgbe_xmit,
+       .ndo_stop               = sxgbe_release,
+       .ndo_get_stats64        = sxgbe_get_stats64,
+       .ndo_change_mtu         = sxgbe_change_mtu,
+       .ndo_set_features       = sxgbe_set_features,
+       .ndo_set_rx_mode        = sxgbe_set_rx_mode,
+       .ndo_tx_timeout         = sxgbe_tx_timeout,
+       .ndo_do_ioctl           = sxgbe_ioctl,
+       .ndo_set_config         = sxgbe_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = sxgbe_poll_controller,
+#endif
+       .ndo_set_mac_address    = eth_mac_addr,
+};
+
+/* Get the hardware ops */
+static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
+{
+       ops_ptr->mac            = sxgbe_get_core_ops();
+       ops_ptr->desc           = sxgbe_get_desc_ops();
+       ops_ptr->dma            = sxgbe_get_dma_ops();
+       ops_ptr->mtl            = sxgbe_get_mtl_ops();
+
+       /* set the MDIO communication Address/Data regisers */
+       ops_ptr->mii.addr       = SXGBE_MDIO_SCMD_ADD_REG;
+       ops_ptr->mii.data       = SXGBE_MDIO_SCMD_DATA_REG;
+
+       /* Assigning the default link settings
+        * no SXGBE defined default values to be set in registers,
+        * so assigning as 0 for port and duplex
+        */
+       ops_ptr->link.port      = 0;
+       ops_ptr->link.duplex    = 0;
+       ops_ptr->link.speed     = SXGBE_SPEED_10G;
+}
+
+/**
+ *  sxgbe_hw_init - Init the GMAC device
+ *  @priv: driver private structure
+ *  Description: this function checks the HW capability
+ *  (if supported) and sets the driver's features.
+ */
+static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
+{
+       u32 ctrl_ids;
+
+       priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
+       if(!priv->hw)
+               return -ENOMEM;
+
+       /* get the hardware ops */
+       sxgbe_get_ops(priv->hw);
+
+       /* get the controller id */
+       ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
+       priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
+       priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
+       pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
+               priv->hw->ctrl_uid, priv->hw->ctrl_id);
+
+       /* get the H/W features */
+       if (!sxgbe_get_hw_features(priv))
+               pr_info("Hardware features not found\n");
+
+       if (priv->hw_cap.tx_csum_offload)
+               pr_info("TX Checksum offload supported\n");
+
+       if (priv->hw_cap.rx_csum_offload)
+               pr_info("RX Checksum offload supported\n");
+
+       return 0;
+}
+
+/**
+ * sxgbe_drv_probe
+ * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ */
+struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
+                                       struct sxgbe_plat_data *plat_dat,
+                                       void __iomem *addr)
+{
+       struct sxgbe_priv_data *priv;
+       struct net_device *ndev;
+       int ret;
+       u8 queue_num;
+
+       ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
+                                 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
+       if (!ndev)
+               return NULL;
+
+       SET_NETDEV_DEV(ndev, device);
+
+       priv = netdev_priv(ndev);
+       priv->device = device;
+       priv->dev = ndev;
+
+       sxgbe_set_ethtool_ops(ndev);
+       priv->plat = plat_dat;
+       priv->ioaddr = addr;
+
+       /* Verify driver arguments */
+       sxgbe_verify_args();
+
+       /* Init MAC and get the capabilities */
+       ret = sxgbe_hw_init(priv);
+       if (ret)
+               goto error_free_netdev;
+
+       /* allocate memory resources for Descriptor rings */
+       ret = txring_mem_alloc(priv);
+       if (ret)
+               goto error_free_netdev;
+
+       ret = rxring_mem_alloc(priv);
+       if (ret)
+               goto error_free_netdev;
+
+       ndev->netdev_ops = &sxgbe_netdev_ops;
+
+       ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+               NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+               NETIF_F_GRO;
+       ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+       ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
+
+       /* assign filtering support */
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+
+       priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+       /* Enable TCP segmentation offload for all DMA channels */
+       if (priv->hw_cap.tcpseg_offload) {
+               SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
+                       priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
+               }
+       }
+
+       /* Enable Rx checksum offload */
+       if (priv->hw_cap.rx_csum_offload) {
+               priv->hw->mac->enable_rx_csum(priv->ioaddr);
+               priv->rxcsum_insertion = true;
+       }
+
+       /* Initialise pause frame settings */
+       priv->rx_pause = 1;
+       priv->tx_pause = 1;
+
+       /* Rx Watchdog is available, enable depend on platform data */
+       if (!priv->plat->riwt_off) {
+               priv->use_riwt = 1;
+               pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
+       }
+
+       netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
+
+       spin_lock_init(&priv->stats_lock);
+
+       priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
+       if (IS_ERR(priv->sxgbe_clk)) {
+               netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
+                           __func__);
+               goto error_clk_get;
+       }
+
+       /* If a specific clk_csr value is passed from the platform
+        * this means that the CSR Clock Range selection cannot be
+        * changed at run-time and it is fixed. Viceversa the driver'll try to
+        * set the MDC clock dynamically according to the csr actual
+        * clock input.
+        */
+       if (!priv->plat->clk_csr)
+               sxgbe_clk_csr_set(priv);
+       else
+               priv->clk_csr = priv->plat->clk_csr;
+
+       /* MDIO bus Registration */
+       ret = sxgbe_mdio_register(ndev);
+       if (ret < 0) {
+               netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
+                          __func__, priv->plat->bus_id);
+               goto error_mdio_register;
+       }
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               pr_err("%s: ERROR %i registering the device\n", __func__, ret);
+               goto error_netdev_register;
+       }
+
+       sxgbe_check_ether_addr(priv);
+
+       return priv;
+
+error_mdio_register:
+       clk_put(priv->sxgbe_clk);
+error_clk_get:
+error_netdev_register:
+       netif_napi_del(&priv->napi);
+error_free_netdev:
+       free_netdev(ndev);
+
+       return NULL;
+}
+
+/**
+ * sxgbe_drv_remove
+ * @ndev: net device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings.
+ */
+int sxgbe_drv_remove(struct net_device *ndev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+       netdev_info(ndev, "%s: removing driver\n", __func__);
+
+       priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
+       priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
+
+       priv->hw->mac->enable_tx(priv->ioaddr, false);
+       priv->hw->mac->enable_rx(priv->ioaddr, false);
+
+       netif_napi_del(&priv->napi);
+
+       sxgbe_mdio_unregister(ndev);
+
+       unregister_netdev(ndev);
+
+       free_netdev(ndev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+int sxgbe_suspend(struct net_device *ndev)
+{
+       return 0;
+}
+
+int sxgbe_resume(struct net_device *ndev)
+{
+       return 0;
+}
+
+int sxgbe_freeze(struct net_device *ndev)
+{
+       return -ENOSYS;
+}
+
+int sxgbe_restore(struct net_device *ndev)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/* Driver is configured as Platform driver */
+static int __init sxgbe_init(void)
+{
+       int ret;
+
+       ret = sxgbe_register_platform();
+       if (ret)
+               goto err;
+       return 0;
+err:
+       pr_err("driver registration failed\n");
+       return ret;
+}
+
+static void __exit sxgbe_exit(void)
+{
+       sxgbe_unregister_platform();
+}
+
+module_init(sxgbe_init);
+module_exit(sxgbe_exit);
+
+#ifndef MODULE
+static int __init sxgbe_cmdline_opt(char *str)
+{
+       char *opt;
+
+       if (!str || !*str)
+               return -EINVAL;
+       while ((opt = strsep(&str, ",")) != NULL) {
+               if (!strncmp(opt, "eee_timer:", 6)) {
+                       if (kstrtoint(opt + 10, 0, &eee_timer))
+                               goto err;
+               }
+       }
+       return 0;
+
+err:
+       pr_err("%s: ERROR broken module parameter conversion\n", __func__);
+       return -EINVAL;
+}
+
+__setup("sxgbeeth=", sxgbe_cmdline_opt);
+#endif /* MODULE */
+
+
+
+MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
+
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
+
+MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
+MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
+MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
+MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
new file mode 100644 (file)
index 0000000..b0eb0a2
--- /dev/null
@@ -0,0 +1,251 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/mii.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+#define SXGBE_SMA_WRITE_CMD    0x01 /* write command */
+#define SXGBE_SMA_PREAD_CMD    0x02 /* post read  increament address */
+#define SXGBE_SMA_READ_CMD     0x03 /* read command */
+#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
+#define SXGBE_MII_BUSY         0x00800000 /* mii busy */
+
+static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
+{
+       unsigned long fin_time = jiffies + 3 * HZ; /* 3 seconds */
+
+       while (!time_after(jiffies, fin_time)) {
+               if (!(readl(ioaddr + mii_data) & SXGBE_MII_BUSY))
+                       return 0;
+               cpu_relax();
+       }
+
+       return -EBUSY;
+}
+
+static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
+                                u16 phydata)
+{
+       u32 reg = phydata;
+
+       reg |= (cmd << 16) | SXGBE_SMA_SKIP_ADDRFRM |
+              ((sp->clk_csr & 0x7) << 19) | SXGBE_MII_BUSY;
+       writel(reg, sp->ioaddr + sp->hw->mii.data);
+}
+
+static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+                          int phyreg, u16 phydata)
+{
+       u32 reg;
+
+       /* set mdio address register */
+       reg = ((phyreg >> 16) & 0x1f) << 21;
+       reg |= (phyaddr << 16) | (phyreg & 0xffff);
+       writel(reg, sp->ioaddr + sp->hw->mii.addr);
+
+       sxgbe_mdio_ctrl_data(sp, cmd, phydata);
+}
+
+static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+                          int phyreg, u16 phydata)
+{
+       u32 reg;
+
+       writel(1 << phyaddr, sp->ioaddr + SXGBE_MDIO_CLAUSE22_PORT_REG);
+
+       /* set mdio address register */
+       reg = (phyaddr << 16) | (phyreg & 0x1f);
+       writel(reg, sp->ioaddr + sp->hw->mii.addr);
+
+       sxgbe_mdio_ctrl_data(sp, cmd, phydata);
+}
+
+static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
+                            int phyreg, u16 phydata)
+{
+       const struct mii_regs *mii = &sp->hw->mii;
+       int rc;
+
+       rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+       if (rc < 0)
+               return rc;
+
+       if (phyreg & MII_ADDR_C45) {
+               sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
+       } else {
+                /* Ports 0-3 only support C22. */
+               if (phyaddr >= 4)
+                       return -ENODEV;
+
+               sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
+       }
+
+       return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+}
+
+/**
+ * sxgbe_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of register with in phy register
+ * Description: this function used for C45 and C22 MDIO Read
+ */
+static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+       struct net_device *ndev = bus->priv;
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+       int rc;
+
+       rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
+       if (rc < 0)
+               return rc;
+
+       return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
+}
+
+/**
+ * sxgbe_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of phy registers
+ * @phydata: data to be written into phy register
+ * Description: this function is used for C45 and C22 MDIO write
+ */
+static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+                            u16 phydata)
+{
+       struct net_device *ndev = bus->priv;
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+       return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
+                                phydata);
+}
+
+int sxgbe_mdio_register(struct net_device *ndev)
+{
+       struct mii_bus *mdio_bus;
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+       struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
+       int err, phy_addr;
+       int *irqlist;
+       bool act;
+
+       /* allocate the new mdio bus */
+       mdio_bus = mdiobus_alloc();
+       if (!mdio_bus) {
+               netdev_err(ndev, "%s: mii bus allocation failed\n", __func__);
+               return -ENOMEM;
+       }
+
+       if (mdio_data->irqs)
+               irqlist = mdio_data->irqs;
+       else
+               irqlist = priv->mii_irq;
+
+       /* assign mii bus fields */
+       mdio_bus->name = "samsxgbe";
+       mdio_bus->read = &sxgbe_mdio_read;
+       mdio_bus->write = &sxgbe_mdio_write;
+       snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+                mdio_bus->name, priv->plat->bus_id);
+       mdio_bus->priv = ndev;
+       mdio_bus->phy_mask = mdio_data->phy_mask;
+       mdio_bus->parent = priv->device;
+
+       /* register with kernel subsystem */
+       err = mdiobus_register(mdio_bus);
+       if (err != 0) {
+               netdev_err(ndev, "mdiobus register failed\n");
+               goto mdiobus_err;
+       }
+
+       for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+               struct phy_device *phy = mdio_bus->phy_map[phy_addr];
+
+               if (phy) {
+                       char irq_num[4];
+                       char *irq_str;
+                       /* If an IRQ was provided to be assigned after
+                        * the bus probe, do it here.
+                        */
+                       if ((mdio_data->irqs == NULL) &&
+                           (mdio_data->probed_phy_irq > 0)) {
+                               irqlist[phy_addr] = mdio_data->probed_phy_irq;
+                               phy->irq = mdio_data->probed_phy_irq;
+                       }
+
+                       /* If we're  going to bind the MAC to this PHY bus,
+                        * and no PHY number was provided to the MAC,
+                        * use the one probed here.
+                        */
+                       if (priv->plat->phy_addr == -1)
+                               priv->plat->phy_addr = phy_addr;
+
+                       act = (priv->plat->phy_addr == phy_addr);
+                       switch (phy->irq) {
+                       case PHY_POLL:
+                               irq_str = "POLL";
+                               break;
+                       case PHY_IGNORE_INTERRUPT:
+                               irq_str = "IGNORE";
+                               break;
+                       default:
+                               sprintf(irq_num, "%d", phy->irq);
+                               irq_str = irq_num;
+                               break;
+                       }
+                       netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
+                                   phy->phy_id, phy_addr, irq_str,
+                                   dev_name(&phy->dev), act ? " active" : "");
+               }
+       }
+
+       if (!err) {
+               netdev_err(ndev, "PHY not found\n");
+               mdiobus_unregister(mdio_bus);
+               mdiobus_free(mdio_bus);
+               goto mdiobus_err;
+       }
+
+       priv->mii = mdio_bus;
+
+       return 0;
+
+mdiobus_err:
+       mdiobus_free(mdio_bus);
+       return err;
+}
+
+int sxgbe_mdio_unregister(struct net_device *ndev)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+       if (!priv->mii)
+               return 0;
+
+       mdiobus_unregister(priv->mii);
+       priv->mii->priv = NULL;
+       mdiobus_free(priv->mii);
+       priv->mii = NULL;
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.c
new file mode 100644 (file)
index 0000000..324681c
--- /dev/null
@@ -0,0 +1,254 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+
+#include "sxgbe_mtl.h"
+#include "sxgbe_reg.h"
+
+static void sxgbe_mtl_init(void __iomem *ioaddr, unsigned int etsalg,
+                          unsigned int raa)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_OP_MODE_REG);
+       reg_val &= ETS_RST;
+
+       /* ETS Algorith */
+       switch (etsalg & SXGBE_MTL_OPMODE_ESTMASK) {
+       case ETS_WRR:
+               reg_val &= ETS_WRR;
+               break;
+       case ETS_WFQ:
+               reg_val |= ETS_WFQ;
+               break;
+       case ETS_DWRR:
+               reg_val |= ETS_DWRR;
+               break;
+       }
+       writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
+
+       switch (raa & SXGBE_MTL_OPMODE_RAAMASK) {
+       case RAA_SP:
+               reg_val &= RAA_SP;
+               break;
+       case RAA_WSP:
+               reg_val |= RAA_WSP;
+               break;
+       }
+       writel(reg_val, ioaddr + SXGBE_MTL_OP_MODE_REG);
+}
+
+/* For Dynamic DMA channel mapping for Rx queue */
+static void sxgbe_mtl_dma_dm_rxqueue(void __iomem *ioaddr)
+{
+       writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP0_REG);
+       writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP1_REG);
+       writel(RX_QUEUE_DYNAMIC, ioaddr + SXGBE_MTL_RXQ_DMAMAP2_REG);
+}
+
+static void sxgbe_mtl_set_txfifosize(void __iomem *ioaddr, int queue_num,
+                                    int queue_fifo)
+{
+       u32 fifo_bits, reg_val;
+
+       /* 0 means 256 bytes */
+       fifo_bits = (queue_fifo / SXGBE_MTL_TX_FIFO_DIV) - 1;
+       reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+       reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
+       writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_set_rxfifosize(void __iomem *ioaddr, int queue_num,
+                                    int queue_fifo)
+{
+       u32 fifo_bits, reg_val;
+
+       /* 0 means 256 bytes */
+       fifo_bits = (queue_fifo / SXGBE_MTL_RX_FIFO_DIV)-1;
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val |= (fifo_bits << SXGBE_MTL_FIFO_LSHIFT);
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_enable_txqueue(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+       reg_val |= SXGBE_MTL_ENABLE_QUEUE;
+       writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_disable_txqueue(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+       reg_val &= ~SXGBE_MTL_ENABLE_QUEUE;
+       writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_active(void __iomem *ioaddr, int queue_num,
+                               int threshold)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_ACTIVE);
+       reg_val |= (threshold << RX_FC_ACTIVE);
+
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_enable(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val |= SXGBE_MTL_ENABLE_FC;
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fc_deactive(void __iomem *ioaddr, int queue_num,
+                                 int threshold)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val &= ~(SXGBE_MTL_FCMASK << RX_FC_DEACTIVE);
+       reg_val |= (threshold << RX_FC_DEACTIVE);
+
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fep_enable(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val |= SXGBE_MTL_RXQ_OP_FEP;
+
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fep_disable(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val &= ~(SXGBE_MTL_RXQ_OP_FEP);
+
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fup_enable(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val |= SXGBE_MTL_RXQ_OP_FUP;
+
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_mtl_fup_disable(void __iomem *ioaddr, int queue_num)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       reg_val &= ~(SXGBE_MTL_RXQ_OP_FUP);
+
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+
+static void sxgbe_set_tx_mtl_mode(void __iomem *ioaddr, int queue_num,
+                                 int tx_mode)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+       /* TX specific MTL mode settings */
+       if (tx_mode == SXGBE_MTL_SFMODE) {
+               reg_val |= SXGBE_MTL_SFMODE;
+       } else {
+               /* set the TTC values */
+               if (tx_mode <= 64)
+                       reg_val |= MTL_CONTROL_TTC_64;
+               else if (tx_mode <= 96)
+                       reg_val |= MTL_CONTROL_TTC_96;
+               else if (tx_mode <= 128)
+                       reg_val |= MTL_CONTROL_TTC_128;
+               else if (tx_mode <= 192)
+                       reg_val |= MTL_CONTROL_TTC_192;
+               else if (tx_mode <= 256)
+                       reg_val |= MTL_CONTROL_TTC_256;
+               else if (tx_mode <= 384)
+                       reg_val |= MTL_CONTROL_TTC_384;
+               else
+                       reg_val |= MTL_CONTROL_TTC_512;
+       }
+
+       /* write into TXQ operation register */
+       writel(reg_val, ioaddr + SXGBE_MTL_TXQ_OPMODE_REG(queue_num));
+}
+
+static void sxgbe_set_rx_mtl_mode(void __iomem *ioaddr, int queue_num,
+                                 int rx_mode)
+{
+       u32 reg_val;
+
+       reg_val = readl(ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+       /* RX specific MTL mode settings */
+       if (rx_mode == SXGBE_RX_MTL_SFMODE) {
+               reg_val |= SXGBE_RX_MTL_SFMODE;
+       } else {
+               if (rx_mode <= 64)
+                       reg_val |= MTL_CONTROL_RTC_64;
+               else if (rx_mode <= 96)
+                       reg_val |= MTL_CONTROL_RTC_96;
+               else if (rx_mode <= 128)
+                       reg_val |= MTL_CONTROL_RTC_128;
+       }
+
+       /* write into RXQ operation register */
+       writel(reg_val, ioaddr + SXGBE_MTL_RXQ_OPMODE_REG(queue_num));
+}
+
+static const struct sxgbe_mtl_ops mtl_ops = {
+       .mtl_set_txfifosize             = sxgbe_mtl_set_txfifosize,
+       .mtl_set_rxfifosize             = sxgbe_mtl_set_rxfifosize,
+       .mtl_enable_txqueue             = sxgbe_mtl_enable_txqueue,
+       .mtl_disable_txqueue            = sxgbe_mtl_disable_txqueue,
+       .mtl_dynamic_dma_rxqueue        = sxgbe_mtl_dma_dm_rxqueue,
+       .set_tx_mtl_mode                = sxgbe_set_tx_mtl_mode,
+       .set_rx_mtl_mode                = sxgbe_set_rx_mtl_mode,
+       .mtl_init                       = sxgbe_mtl_init,
+       .mtl_fc_active                  = sxgbe_mtl_fc_active,
+       .mtl_fc_deactive                = sxgbe_mtl_fc_deactive,
+       .mtl_fc_enable                  = sxgbe_mtl_fc_enable,
+       .mtl_fep_enable                 = sxgbe_mtl_fep_enable,
+       .mtl_fep_disable                = sxgbe_mtl_fep_disable,
+       .mtl_fup_enable                 = sxgbe_mtl_fup_enable,
+       .mtl_fup_disable                = sxgbe_mtl_fup_disable
+};
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void)
+{
+       return &mtl_ops;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mtl.h
new file mode 100644 (file)
index 0000000..7e4810c
--- /dev/null
@@ -0,0 +1,104 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_MTL_H__
+#define __SXGBE_MTL_H__
+
+#define SXGBE_MTL_OPMODE_ESTMASK       0x3
+#define SXGBE_MTL_OPMODE_RAAMASK       0x1
+#define SXGBE_MTL_FCMASK               0x7
+#define SXGBE_MTL_TX_FIFO_DIV          256
+#define SXGBE_MTL_RX_FIFO_DIV          256
+
+#define SXGBE_MTL_RXQ_OP_FEP           BIT(4)
+#define SXGBE_MTL_RXQ_OP_FUP           BIT(3)
+#define SXGBE_MTL_ENABLE_FC            0x80
+
+#define ETS_WRR                                0xFFFFFF9F
+#define ETS_RST                                0xFFFFFF9F
+#define ETS_WFQ                                0x00000020
+#define ETS_DWRR                       0x00000040
+#define RAA_SP                         0xFFFFFFFB
+#define RAA_WSP                                0x00000004
+
+#define RX_QUEUE_DYNAMIC               0x80808080
+#define RX_FC_ACTIVE                   8
+#define RX_FC_DEACTIVE                 13
+
+enum ttc_control {
+       MTL_CONTROL_TTC_64 = 0x00000000,
+       MTL_CONTROL_TTC_96 = 0x00000020,
+       MTL_CONTROL_TTC_128 = 0x00000030,
+       MTL_CONTROL_TTC_192 = 0x00000040,
+       MTL_CONTROL_TTC_256 = 0x00000050,
+       MTL_CONTROL_TTC_384 = 0x00000060,
+       MTL_CONTROL_TTC_512 = 0x00000070,
+};
+
+enum rtc_control {
+       MTL_CONTROL_RTC_64 = 0x00000000,
+       MTL_CONTROL_RTC_96 = 0x00000002,
+       MTL_CONTROL_RTC_128 = 0x00000003,
+};
+
+enum flow_control_th {
+       MTL_FC_FULL_1K = 0x00000000,
+       MTL_FC_FULL_2K = 0x00000001,
+       MTL_FC_FULL_4K = 0x00000002,
+       MTL_FC_FULL_5K = 0x00000003,
+       MTL_FC_FULL_6K = 0x00000004,
+       MTL_FC_FULL_8K = 0x00000005,
+       MTL_FC_FULL_16K = 0x00000006,
+       MTL_FC_FULL_24K = 0x00000007,
+};
+
+struct sxgbe_mtl_ops {
+       void (*mtl_init)(void __iomem *ioaddr, unsigned int etsalg,
+                        unsigned int raa);
+
+       void (*mtl_set_txfifosize)(void __iomem *ioaddr, int queue_num,
+                                  int mtl_fifo);
+
+       void (*mtl_set_rxfifosize)(void __iomem *ioaddr, int queue_num,
+                                  int queue_fifo);
+
+       void (*mtl_enable_txqueue)(void __iomem *ioaddr, int queue_num);
+
+       void (*mtl_disable_txqueue)(void __iomem *ioaddr, int queue_num);
+
+       void (*set_tx_mtl_mode)(void __iomem *ioaddr, int queue_num,
+                               int tx_mode);
+
+       void (*set_rx_mtl_mode)(void __iomem *ioaddr, int queue_num,
+                               int rx_mode);
+
+       void (*mtl_dynamic_dma_rxqueue)(void __iomem *ioaddr);
+
+       void (*mtl_fc_active)(void __iomem *ioaddr, int queue_num,
+                             int threshold);
+
+       void (*mtl_fc_deactive)(void __iomem *ioaddr, int queue_num,
+                               int threshold);
+
+       void (*mtl_fc_enable)(void __iomem *ioaddr, int queue_num);
+
+       void (*mtl_fep_enable)(void __iomem *ioaddr, int queue_num);
+
+       void (*mtl_fep_disable)(void __iomem *ioaddr, int queue_num);
+
+       void (*mtl_fup_enable)(void __iomem *ioaddr, int queue_num);
+
+       void (*mtl_fup_disable)(void __iomem *ioaddr, int queue_num);
+};
+
+const struct sxgbe_mtl_ops *sxgbe_get_mtl_ops(void);
+
+#endif /* __SXGBE_MTL_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
new file mode 100644 (file)
index 0000000..b147d46
--- /dev/null
@@ -0,0 +1,259 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/sxgbe_platform.h>
+
+#include "sxgbe_common.h"
+#include "sxgbe_reg.h"
+
+#ifdef CONFIG_OF
+static int sxgbe_probe_config_dt(struct platform_device *pdev,
+                                struct sxgbe_plat_data *plat,
+                                const char **mac)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sxgbe_dma_cfg *dma_cfg;
+
+       if (!np)
+               return -ENODEV;
+
+       *mac = of_get_mac_address(np);
+       plat->interface = of_get_phy_mode(np);
+
+       plat->bus_id = of_alias_get_id(np, "ethernet");
+       if (plat->bus_id < 0)
+               plat->bus_id = 0;
+
+       plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+                                          sizeof(*plat->mdio_bus_data),
+                                          GFP_KERNEL);
+
+       dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
+       if (!dma_cfg)
+               return -ENOMEM;
+
+       plat->dma_cfg = dma_cfg;
+       of_property_read_u32(np, "samsung,pbl", &dma_cfg->pbl);
+       if (of_property_read_u32(np, "samsung,burst-map", &dma_cfg->burst_map) == 0)
+               dma_cfg->fixed_burst = true;
+
+       return 0;
+}
+#else
+static int sxgbe_probe_config_dt(struct platform_device *pdev,
+                                struct sxgbe_plat_data *plat,
+                                const char **mac)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_OF */
+
+/**
+ * sxgbe_platform_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int sxgbe_platform_probe(struct platform_device *pdev)
+{
+       int ret;
+       int i, chan;
+       struct resource *res;
+       struct device *dev = &pdev->dev;
+       void __iomem *addr;
+       struct sxgbe_priv_data *priv = NULL;
+       struct sxgbe_plat_data *plat_dat = NULL;
+       const char *mac = NULL;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct device_node *node = dev->of_node;
+
+       /* Get memory resource */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               goto err_out;
+
+       addr = devm_ioremap_resource(dev, res);
+       if (IS_ERR(addr))
+               return PTR_ERR(addr);
+
+       if (pdev->dev.of_node) {
+               plat_dat = devm_kzalloc(&pdev->dev,
+                                       sizeof(struct sxgbe_plat_data),
+                                       GFP_KERNEL);
+               if (!plat_dat)
+                       return  -ENOMEM;
+
+               ret = sxgbe_probe_config_dt(pdev, plat_dat, &mac);
+               if (ret) {
+                       pr_err("%s: main dt probe failed\n", __func__);
+                       return ret;
+               }
+       }
+
+       /* Get MAC address if available (DT) */
+       if (mac)
+               ether_addr_copy(priv->dev->dev_addr, mac);
+
+       priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
+       if (!priv) {
+               pr_err("%s: main driver probe failed\n", __func__);
+               goto err_out;
+       }
+
+       /* Get the SXGBE common INT information */
+       priv->irq  = irq_of_parse_and_map(node, 0);
+       if (priv->irq <= 0) {
+               dev_err(dev, "sxgbe common irq parsing failed\n");
+               goto err_drv_remove;
+       }
+
+       /* Get the TX/RX IRQ numbers */
+       for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
+               priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
+               if (priv->txq[i]->irq_no <= 0) {
+                       dev_err(dev, "sxgbe tx irq parsing failed\n");
+                       goto err_tx_irq_unmap;
+               }
+       }
+
+       for (i = 0; i < SXGBE_RX_QUEUES; i++) {
+               priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++);
+               if (priv->rxq[i]->irq_no <= 0) {
+                       dev_err(dev, "sxgbe rx irq parsing failed\n");
+                       goto err_rx_irq_unmap;
+               }
+       }
+
+       priv->lpi_irq = irq_of_parse_and_map(node, chan);
+       if (priv->lpi_irq <= 0) {
+               dev_err(dev, "sxgbe lpi irq parsing failed\n");
+               goto err_rx_irq_unmap;
+       }
+
+       platform_set_drvdata(pdev, priv->dev);
+
+       pr_debug("platform driver registration completed\n");
+
+       return 0;
+
+err_rx_irq_unmap:
+       while (--i)
+               irq_dispose_mapping(priv->rxq[i]->irq_no);
+       i = SXGBE_TX_QUEUES;
+err_tx_irq_unmap:
+       while (--i)
+               irq_dispose_mapping(priv->txq[i]->irq_no);
+       irq_dispose_mapping(priv->irq);
+err_drv_remove:
+       sxgbe_drv_remove(ndev);
+err_out:
+       return -ENODEV;
+}
+
+/**
+ * sxgbe_platform_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int sxgbe_platform_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       int ret = sxgbe_drv_remove(ndev);
+
+       return ret;
+}
+
+#ifdef CONFIG_PM
+static int sxgbe_platform_suspend(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return sxgbe_suspend(ndev);
+}
+
+static int sxgbe_platform_resume(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return sxgbe_resume(ndev);
+}
+
+static int sxgbe_platform_freeze(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return sxgbe_freeze(ndev);
+}
+
+static int sxgbe_platform_restore(struct device *dev)
+{
+       struct net_device *ndev = dev_get_drvdata(dev);
+
+       return sxgbe_restore(ndev);
+}
+
+static const struct dev_pm_ops sxgbe_platform_pm_ops = {
+       .suspend        = sxgbe_platform_suspend,
+       .resume         = sxgbe_platform_resume,
+       .freeze         = sxgbe_platform_freeze,
+       .thaw           = sxgbe_platform_restore,
+       .restore        = sxgbe_platform_restore,
+};
+#else
+static const struct dev_pm_ops sxgbe_platform_pm_ops;
+#endif /* CONFIG_PM */
+
+static const struct of_device_id sxgbe_dt_ids[] = {
+       { .compatible = "samsung,sxgbe-v2.0a"},
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
+
+static struct platform_driver sxgbe_platform_driver = {
+       .probe  = sxgbe_platform_probe,
+       .remove = sxgbe_platform_remove,
+       .driver = {
+               .name           = SXGBE_RESOURCE_NAME,
+               .owner          = THIS_MODULE,
+               .pm             = &sxgbe_platform_pm_ops,
+               .of_match_table = of_match_ptr(sxgbe_dt_ids),
+       },
+};
+
+int sxgbe_register_platform(void)
+{
+       int err;
+
+       err = platform_driver_register(&sxgbe_platform_driver);
+       if (err)
+               pr_err("failed to register the platform driver\n");
+
+       return err;
+}
+
+void sxgbe_unregister_platform(void)
+{
+       platform_driver_unregister(&sxgbe_platform_driver);
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
new file mode 100644 (file)
index 0000000..5a89acb
--- /dev/null
@@ -0,0 +1,488 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_REGMAP_H__
+#define __SXGBE_REGMAP_H__
+
+/* SXGBE MAC Registers */
+#define SXGBE_CORE_TX_CONFIG_REG       0x0000
+#define SXGBE_CORE_RX_CONFIG_REG       0x0004
+#define SXGBE_CORE_PKT_FILTER_REG      0x0008
+#define SXGBE_CORE_WATCHDOG_TIMEOUT_REG 0x000C
+#define SXGBE_CORE_HASH_TABLE_REG0     0x0010
+#define SXGBE_CORE_HASH_TABLE_REG1     0x0014
+#define SXGBE_CORE_HASH_TABLE_REG2     0x0018
+#define SXGBE_CORE_HASH_TABLE_REG3     0x001C
+#define SXGBE_CORE_HASH_TABLE_REG4     0x0020
+#define SXGBE_CORE_HASH_TABLE_REG5     0x0024
+#define SXGBE_CORE_HASH_TABLE_REG6     0x0028
+#define SXGBE_CORE_HASH_TABLE_REG7     0x002C
+
+/* EEE-LPI Registers */
+#define SXGBE_CORE_LPI_CTRL_STATUS     0x00D0
+#define SXGBE_CORE_LPI_TIMER_CTRL      0x00D4
+
+/* VLAN Specific Registers */
+#define SXGBE_CORE_VLAN_TAG_REG                0x0050
+#define SXGBE_CORE_VLAN_HASHTAB_REG    0x0058
+#define SXGBE_CORE_VLAN_INSCTL_REG     0x0060
+#define SXGBE_CORE_VLAN_INNERCTL_REG   0x0064
+#define SXGBE_CORE_RX_ETHTYPE_MATCH_REG 0x006C
+
+/* Flow Contol Registers */
+#define SXGBE_CORE_TX_Q0_FLOWCTL_REG   0x0070
+#define SXGBE_CORE_TX_Q1_FLOWCTL_REG   0x0074
+#define SXGBE_CORE_TX_Q2_FLOWCTL_REG   0x0078
+#define SXGBE_CORE_TX_Q3_FLOWCTL_REG   0x007C
+#define SXGBE_CORE_TX_Q4_FLOWCTL_REG   0x0080
+#define SXGBE_CORE_TX_Q5_FLOWCTL_REG   0x0084
+#define SXGBE_CORE_TX_Q6_FLOWCTL_REG   0x0088
+#define SXGBE_CORE_TX_Q7_FLOWCTL_REG   0x008C
+#define SXGBE_CORE_RX_FLOWCTL_REG      0x0090
+#define SXGBE_CORE_RX_CTL0_REG         0x00A0
+#define SXGBE_CORE_RX_CTL1_REG         0x00A4
+#define SXGBE_CORE_RX_CTL2_REG         0x00A8
+#define SXGBE_CORE_RX_CTL3_REG         0x00AC
+
+/* Interrupt Registers */
+#define SXGBE_CORE_INT_STATUS_REG      0x00B0
+#define SXGBE_CORE_INT_ENABLE_REG      0x00B4
+#define SXGBE_CORE_RXTX_ERR_STATUS_REG 0x00B8
+#define SXGBE_CORE_PMT_CTL_STATUS_REG  0x00C0
+#define SXGBE_CORE_RWK_PKT_FILTER_REG  0x00C4
+#define SXGBE_CORE_VERSION_REG         0x0110
+#define SXGBE_CORE_DEBUG_REG           0x0114
+#define SXGBE_CORE_HW_FEA_REG(index)   (0x011C + index * 4)
+
+/* SMA(MDIO) module registers */
+#define SXGBE_MDIO_SCMD_ADD_REG                0x0200
+#define SXGBE_MDIO_SCMD_DATA_REG       0x0204
+#define SXGBE_MDIO_CCMD_WADD_REG       0x0208
+#define SXGBE_MDIO_CCMD_WDATA_REG      0x020C
+#define SXGBE_MDIO_CSCAN_PORT_REG      0x0210
+#define SXGBE_MDIO_INT_STATUS_REG      0x0214
+#define SXGBE_MDIO_INT_ENABLE_REG      0x0218
+#define SXGBE_MDIO_PORT_CONDCON_REG    0x021C
+#define SXGBE_MDIO_CLAUSE22_PORT_REG   0x0220
+
+/* port specific, addr = 0-3 */
+#define SXGBE_MDIO_DEV_BASE_REG                0x0230
+#define SXGBE_MDIO_PORT_DEV_REG(addr)                  \
+       (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x0)
+#define SXGBE_MDIO_PORT_LSTATUS_REG(addr)              \
+       (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x4)
+#define SXGBE_MDIO_PORT_ALIVE_REG(addr)                        \
+       (SXGBE_MDIO_DEV_BASE_REG + (0x10 * addr) + 0x8)
+
+#define SXGBE_CORE_GPIO_CTL_REG                0x0278
+#define SXGBE_CORE_GPIO_STATUS_REG     0x027C
+
+/* Address registers for filtering */
+#define SXGBE_CORE_ADD_BASE_REG                0x0300
+
+/* addr = 0-31 */
+#define SXGBE_CORE_ADD_HIGHOFFSET(addr)                        \
+       (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x0)
+#define SXGBE_CORE_ADD_LOWOFFSET(addr)                 \
+       (SXGBE_CORE_ADD_BASE_REG + (0x8 * addr) + 0x4)
+
+/* SXGBE MMC registers */
+#define SXGBE_MMC_CTL_REG              0x0800
+#define SXGBE_MMC_RXINT_STATUS_REG     0x0804
+#define SXGBE_MMC_TXINT_STATUS_REG     0x0808
+#define SXGBE_MMC_RXINT_ENABLE_REG     0x080C
+#define SXGBE_MMC_TXINT_ENABLE_REG     0x0810
+
+/* TX specific counters */
+#define SXGBE_MMC_TXOCTETHI_GBCNT_REG  0x0814
+#define SXGBE_MMC_TXOCTETLO_GBCNT_REG  0x0818
+#define SXGBE_MMC_TXFRAMELO_GBCNT_REG  0x081C
+#define SXGBE_MMC_TXFRAMEHI_GBCNT_REG  0x0820
+#define SXGBE_MMC_TXBROADLO_GCNT_REG   0x0824
+#define SXGBE_MMC_TXBROADHI_GCNT_REG   0x0828
+#define SXGBE_MMC_TXMULTILO_GCNT_REG   0x082C
+#define SXGBE_MMC_TXMULTIHI_GCNT_REG   0x0830
+#define SXGBE_MMC_TX64LO_GBCNT_REG     0x0834
+#define SXGBE_MMC_TX64HI_GBCNT_REG     0x0838
+#define SXGBE_MMC_TX65TO127LO_GBCNT_REG                0x083C
+#define SXGBE_MMC_TX65TO127HI_GBCNT_REG                0x0840
+#define SXGBE_MMC_TX128TO255LO_GBCNT_REG       0x0844
+#define SXGBE_MMC_TX128TO255HI_GBCNT_REG       0x0848
+#define SXGBE_MMC_TX256TO511LO_GBCNT_REG       0x084C
+#define SXGBE_MMC_TX256TO511HI_GBCNT_REG       0x0850
+#define SXGBE_MMC_TX512TO1023LO_GBCNT_REG      0x0854
+#define SXGBE_MMC_TX512TO1023HI_GBCNT_REG      0x0858
+#define SXGBE_MMC_TX1023TOMAXLO_GBCNT_REG      0x085C
+#define SXGBE_MMC_TX1023TOMAXHI_GBCNT_REG      0x0860
+#define SXGBE_MMC_TXUNICASTLO_GBCNT_REG                0x0864
+#define SXGBE_MMC_TXUNICASTHI_GBCNT_REG                0x0868
+#define SXGBE_MMC_TXMULTILO_GBCNT_REG          0x086C
+#define SXGBE_MMC_TXMULTIHI_GBCNT_REG          0x0870
+#define SXGBE_MMC_TXBROADLO_GBCNT_REG          0x0874
+#define SXGBE_MMC_TXBROADHI_GBCNT_REG          0x0878
+#define SXGBE_MMC_TXUFLWLO_GBCNT_REG           0x087C
+#define SXGBE_MMC_TXUFLWHI_GBCNT_REG           0x0880
+#define SXGBE_MMC_TXOCTETLO_GCNT_REG   0x0884
+#define SXGBE_MMC_TXOCTETHI_GCNT_REG   0x0888
+#define SXGBE_MMC_TXFRAMELO_GCNT_REG   0x088C
+#define SXGBE_MMC_TXFRAMEHI_GCNT_REG   0x0890
+#define SXGBE_MMC_TXPAUSELO_CNT_REG    0x0894
+#define SXGBE_MMC_TXPAUSEHI_CNT_REG    0x0898
+#define SXGBE_MMC_TXVLANLO_GCNT_REG    0x089C
+#define SXGBE_MMC_TXVLANHI_GCNT_REG    0x08A0
+
+/* RX specific counters */
+#define SXGBE_MMC_RXFRAMELO_GBCNT_REG  0x0900
+#define SXGBE_MMC_RXFRAMEHI_GBCNT_REG  0x0904
+#define SXGBE_MMC_RXOCTETLO_GBCNT_REG  0x0908
+#define SXGBE_MMC_RXOCTETHI_GBCNT_REG  0x090C
+#define SXGBE_MMC_RXOCTETLO_GCNT_REG   0x0910
+#define SXGBE_MMC_RXOCTETHI_GCNT_REG   0x0914
+#define SXGBE_MMC_RXBROADLO_GCNT_REG   0x0918
+#define SXGBE_MMC_RXBROADHI_GCNT_REG   0x091C
+#define SXGBE_MMC_RXMULTILO_GCNT_REG   0x0920
+#define SXGBE_MMC_RXMULTIHI_GCNT_REG   0x0924
+#define SXGBE_MMC_RXCRCERRLO_REG       0x0928
+#define SXGBE_MMC_RXCRCERRHI_REG       0x092C
+#define SXGBE_MMC_RXSHORT64BFRAME_ERR_REG      0x0930
+#define SXGBE_MMC_RXJABBERERR_REG              0x0934
+#define SXGBE_MMC_RXSHORT64BFRAME_COR_REG      0x0938
+#define SXGBE_MMC_RXOVERMAXFRAME_COR_REG       0x093C
+#define SXGBE_MMC_RX64LO_GBCNT_REG             0x0940
+#define SXGBE_MMC_RX64HI_GBCNT_REG             0x0944
+#define SXGBE_MMC_RX65TO127LO_GBCNT_REG                0x0948
+#define SXGBE_MMC_RX65TO127HI_GBCNT_REG                0x094C
+#define SXGBE_MMC_RX128TO255LO_GBCNT_REG       0x0950
+#define SXGBE_MMC_RX128TO255HI_GBCNT_REG       0x0954
+#define SXGBE_MMC_RX256TO511LO_GBCNT_REG       0x0958
+#define SXGBE_MMC_RX256TO511HI_GBCNT_REG       0x095C
+#define SXGBE_MMC_RX512TO1023LO_GBCNT_REG      0x0960
+#define SXGBE_MMC_RX512TO1023HI_GBCNT_REG      0x0964
+#define SXGBE_MMC_RX1023TOMAXLO_GBCNT_REG      0x0968
+#define SXGBE_MMC_RX1023TOMAXHI_GBCNT_REG      0x096C
+#define SXGBE_MMC_RXUNICASTLO_GCNT_REG         0x0970
+#define SXGBE_MMC_RXUNICASTHI_GCNT_REG         0x0974
+#define SXGBE_MMC_RXLENERRLO_REG               0x0978
+#define SXGBE_MMC_RXLENERRHI_REG               0x097C
+#define SXGBE_MMC_RXOUTOFRANGETYPELO_REG       0x0980
+#define SXGBE_MMC_RXOUTOFRANGETYPEHI_REG       0x0984
+#define SXGBE_MMC_RXPAUSELO_CNT_REG            0x0988
+#define SXGBE_MMC_RXPAUSEHI_CNT_REG            0x098C
+#define SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG   0x0990
+#define SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG   0x0994
+#define SXGBE_MMC_RXVLANLO_GBCNT_REG           0x0998
+#define SXGBE_MMC_RXVLANHI_GBCNT_REG           0x099C
+#define SXGBE_MMC_RXWATCHDOG_ERR_REG           0x09A0
+
+/* L3/L4 function registers */
+#define SXGBE_CORE_L34_ADDCTL_REG      0x0C00
+#define SXGBE_CORE_L34_ADDCTL_REG      0x0C00
+#define SXGBE_CORE_L34_DATA_REG                0x0C04
+
+/* ARP registers */
+#define SXGBE_CORE_ARP_ADD_REG         0x0C10
+
+/* RSS registers */
+#define SXGBE_CORE_RSS_CTL_REG         0x0C80
+#define SXGBE_CORE_RSS_ADD_REG         0x0C88
+#define SXGBE_CORE_RSS_DATA_REG                0x0C8C
+
+/* RSS control register bits */
+#define SXGBE_CORE_RSS_CTL_UDP4TE      BIT(3)
+#define SXGBE_CORE_RSS_CTL_TCP4TE      BIT(2)
+#define SXGBE_CORE_RSS_CTL_IP2TE       BIT(1)
+#define SXGBE_CORE_RSS_CTL_RSSE                BIT(0)
+
+/* IEEE 1588 registers */
+#define SXGBE_CORE_TSTAMP_CTL_REG      0x0D00
+#define SXGBE_CORE_SUBSEC_INC_REG      0x0D04
+#define SXGBE_CORE_SYSTIME_SEC_REG     0x0D0C
+#define SXGBE_CORE_SYSTIME_NSEC_REG    0x0D10
+#define SXGBE_CORE_SYSTIME_SECUP_REG   0x0D14
+#define SXGBE_CORE_TSTAMP_ADD_REG      0x0D18
+#define SXGBE_CORE_SYSTIME_HWORD_REG   0x0D1C
+#define SXGBE_CORE_TSTAMP_STATUS_REG   0x0D20
+#define SXGBE_CORE_TXTIME_STATUSNSEC_REG 0x0D30
+#define SXGBE_CORE_TXTIME_STATUSSEC_REG        0x0D34
+
+/* Auxiliary registers */
+#define SXGBE_CORE_AUX_CTL_REG                  0x0D40
+#define SXGBE_CORE_AUX_TSTAMP_NSEC_REG          0x0D48
+#define SXGBE_CORE_AUX_TSTAMP_SEC_REG           0x0D4C
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_REG        0x0D50
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_REG        0x0D54
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_NSEC_REG   0x0D58
+#define SXGBE_CORE_AUX_TSTAMP_INGCOR_SUBNSEC_REG 0x0D5C
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_NSEC_REG   0x0D60
+#define SXGBE_CORE_AUX_TSTAMP_ENGCOR_SUBNSEC_REG 0x0D64
+
+/* PPS registers */
+#define SXGBE_CORE_PPS_CTL_REG         0x0D70
+#define SXGBE_CORE_PPS_BASE                    0x0D80
+
+/* addr = 0 - 3 */
+#define SXGBE_CORE_PPS_TTIME_SEC_REG(addr)             \
+       (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x0)
+#define SXGBE_CORE_PPS_TTIME_NSEC_REG(addr)            \
+       (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x4)
+#define SXGBE_CORE_PPS_INTERVAL_REG(addr)              \
+       (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0x8)
+#define SXGBE_CORE_PPS_WIDTH_REG(addr)                 \
+       (SXGBE_CORE_PPS_BASE + (0x10 * addr) + 0xC)
+#define SXGBE_CORE_PTO_CTL_REG         0x0DC0
+#define SXGBE_CORE_SRCPORT_ITY0_REG    0x0DC4
+#define SXGBE_CORE_SRCPORT_ITY1_REG    0x0DC8
+#define SXGBE_CORE_SRCPORT_ITY2_REG    0x0DCC
+#define SXGBE_CORE_LOGMSG_LEVEL_REG    0x0DD0
+
+/* SXGBE MTL Registers */
+#define SXGBE_MTL_BASE_REG             0x1000
+#define SXGBE_MTL_OP_MODE_REG          (SXGBE_MTL_BASE_REG + 0x0000)
+#define SXGBE_MTL_DEBUG_CTL_REG                (SXGBE_MTL_BASE_REG + 0x0008)
+#define SXGBE_MTL_DEBUG_STATUS_REG     (SXGBE_MTL_BASE_REG + 0x000C)
+#define SXGBE_MTL_FIFO_DEBUGDATA_REG   (SXGBE_MTL_BASE_REG + 0x0010)
+#define SXGBE_MTL_INT_STATUS_REG       (SXGBE_MTL_BASE_REG + 0x0020)
+#define SXGBE_MTL_RXQ_DMAMAP0_REG      (SXGBE_MTL_BASE_REG + 0x0030)
+#define SXGBE_MTL_RXQ_DMAMAP1_REG      (SXGBE_MTL_BASE_REG + 0x0034)
+#define SXGBE_MTL_RXQ_DMAMAP2_REG      (SXGBE_MTL_BASE_REG + 0x0038)
+#define SXGBE_MTL_TX_PRTYMAP0_REG      (SXGBE_MTL_BASE_REG + 0x0040)
+#define SXGBE_MTL_TX_PRTYMAP1_REG      (SXGBE_MTL_BASE_REG + 0x0044)
+
+/* TC/Queue registers, qnum=0-15 */
+#define SXGBE_MTL_TC_TXBASE_REG                (SXGBE_MTL_BASE_REG + 0x0100)
+#define SXGBE_MTL_TXQ_OPMODE_REG(qnum)                         \
+       (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x00)
+#define SXGBE_MTL_SFMODE               BIT(1)
+#define SXGBE_MTL_FIFO_LSHIFT          16
+#define SXGBE_MTL_ENABLE_QUEUE         0x00000008
+#define SXGBE_MTL_TXQ_UNDERFLOW_REG(qnum)                      \
+       (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x04)
+#define SXGBE_MTL_TXQ_DEBUG_REG(qnum)                          \
+       (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x08)
+#define SXGBE_MTL_TXQ_ETSCTL_REG(qnum)                         \
+       (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x10)
+#define SXGBE_MTL_TXQ_ETSSTATUS_REG(qnum)                      \
+       (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x14)
+#define SXGBE_MTL_TXQ_QUANTWEIGHT_REG(qnum)                    \
+       (SXGBE_MTL_TC_TXBASE_REG + (qnum * 0x80) + 0x18)
+
+#define SXGBE_MTL_TC_RXBASE_REG                0x1140
+#define SXGBE_RX_MTL_SFMODE            BIT(5)
+#define SXGBE_MTL_RXQ_OPMODE_REG(qnum)                         \
+       (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x00)
+#define SXGBE_MTL_RXQ_MISPKTOVERFLOW_REG(qnum)                 \
+       (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x04)
+#define SXGBE_MTL_RXQ_DEBUG_REG(qnum)                          \
+       (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x08)
+#define SXGBE_MTL_RXQ_CTL_REG(qnum)                            \
+       (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x0C)
+#define SXGBE_MTL_RXQ_INTENABLE_REG(qnum)                      \
+       (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x30)
+#define SXGBE_MTL_RXQ_INTSTATUS_REG(qnum)                      \
+       (SXGBE_MTL_TC_RXBASE_REG + (qnum * 0x80) + 0x34)
+
+/* SXGBE DMA Registers */
+#define SXGBE_DMA_BASE_REG             0x3000
+#define SXGBE_DMA_MODE_REG             (SXGBE_DMA_BASE_REG + 0x0000)
+#define SXGBE_DMA_SOFT_RESET           BIT(0)
+#define SXGBE_DMA_SYSBUS_MODE_REG      (SXGBE_DMA_BASE_REG + 0x0004)
+#define SXGBE_DMA_AXI_UNDEF_BURST      BIT(0)
+#define SXGBE_DMA_ENHACE_ADDR_MODE     BIT(11)
+#define SXGBE_DMA_INT_STATUS_REG       (SXGBE_DMA_BASE_REG + 0x0008)
+#define SXGBE_DMA_AXI_ARCACHECTL_REG   (SXGBE_DMA_BASE_REG + 0x0010)
+#define SXGBE_DMA_AXI_AWCACHECTL_REG   (SXGBE_DMA_BASE_REG + 0x0018)
+#define SXGBE_DMA_DEBUG_STATUS0_REG    (SXGBE_DMA_BASE_REG + 0x0020)
+#define SXGBE_DMA_DEBUG_STATUS1_REG    (SXGBE_DMA_BASE_REG + 0x0024)
+#define SXGBE_DMA_DEBUG_STATUS2_REG    (SXGBE_DMA_BASE_REG + 0x0028)
+#define SXGBE_DMA_DEBUG_STATUS3_REG    (SXGBE_DMA_BASE_REG + 0x002C)
+#define SXGBE_DMA_DEBUG_STATUS4_REG    (SXGBE_DMA_BASE_REG + 0x0030)
+#define SXGBE_DMA_DEBUG_STATUS5_REG    (SXGBE_DMA_BASE_REG + 0x0034)
+
+/* Channel Registers, cha_num = 0-15 */
+#define SXGBE_DMA_CHA_BASE_REG                 \
+       (SXGBE_DMA_BASE_REG + 0x0100)
+#define SXGBE_DMA_CHA_CTL_REG(cha_num)                         \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x00)
+#define SXGBE_DMA_PBL_X8MODE                   BIT(16)
+#define SXGBE_DMA_CHA_TXCTL_TSE_ENABLE         BIT(12)
+#define SXGBE_DMA_CHA_TXCTL_REG(cha_num)                       \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x04)
+#define SXGBE_DMA_CHA_RXCTL_REG(cha_num)                       \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x08)
+#define SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num)                 \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x10)
+#define SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num)                 \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x14)
+#define SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num)                 \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x18)
+#define SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num)                 \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x1C)
+#define SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x24)
+#define SXGBE_DMA_CHA_RXDESC_TAILPTR_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x2C)
+#define SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x30)
+#define SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x34)
+#define SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num)                  \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x38)
+#define SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x3C)
+#define SXGBE_DMA_CHA_TXDESC_CURADDLO_REG(cha_num)             \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x44)
+#define SXGBE_DMA_CHA_RXDESC_CURADDLO_REG(cha_num)             \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x4C)
+#define SXGBE_DMA_CHA_CURTXBUF_ADDHI_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x50)
+#define SXGBE_DMA_CHA_CURTXBUF_ADDLO_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x54)
+#define SXGBE_DMA_CHA_CURRXBUF_ADDHI_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x58)
+#define SXGBE_DMA_CHA_CURRXBUF_ADDLO_REG(cha_num)              \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x5C)
+#define SXGBE_DMA_CHA_STATUS_REG(cha_num)                      \
+       (SXGBE_DMA_CHA_BASE_REG + (cha_num * 0x80) + 0x60)
+
+/* TX DMA control register specific */
+#define SXGBE_TX_START_DMA     BIT(0)
+
+/* sxgbe tx configuration register bitfields */
+#define SXGBE_SPEED_10G                0x0
+#define SXGBE_SPEED_2_5G       0x1
+#define SXGBE_SPEED_1G         0x2
+#define SXGBE_SPEED_LSHIFT     29
+
+#define SXGBE_TX_ENABLE                BIT(0)
+#define SXGBE_TX_DISDIC_ALGO   BIT(1)
+#define SXGBE_TX_JABBER_DISABLE        BIT(16)
+
+/* sxgbe rx configuration register bitfields */
+#define SXGBE_RX_ENABLE                BIT(0)
+#define SXGBE_RX_ACS_ENABLE            BIT(1)
+#define SXGBE_RX_WATCHDOG_DISABLE      BIT(7)
+#define SXGBE_RX_JUMBPKT_ENABLE                BIT(8)
+#define SXGBE_RX_CSUMOFFLOAD_ENABLE    BIT(9)
+#define SXGBE_RX_LOOPBACK_ENABLE       BIT(10)
+#define SXGBE_RX_ARPOFFLOAD_ENABLE     BIT(31)
+
+/* sxgbe vlan Tag Register bitfields */
+#define SXGBE_VLAN_SVLAN_ENABLE                BIT(18)
+#define SXGBE_VLAN_DOUBLEVLAN_ENABLE   BIT(26)
+#define SXGBE_VLAN_INNERVLAN_ENABLE    BIT(27)
+
+/* XMAC VLAN Tag Inclusion Register(0x0060) bitfields
+ * Below fields same for  Inner VLAN Tag Inclusion
+ * Register(0x0064) register
+ */
+enum vlan_tag_ctl_tx {
+       VLAN_TAG_TX_NOP,
+       VLAN_TAG_TX_DEL,
+       VLAN_TAG_TX_INSERT,
+       VLAN_TAG_TX_REPLACE
+};
+#define SXGBE_VLAN_PRTY_CTL    BIT(18)
+#define SXGBE_VLAN_CSVL_CTL    BIT(19)
+
+/* SXGBE TX Q Flow Control Register bitfields */
+#define SXGBE_TX_FLOW_CTL_FCB  BIT(0)
+#define SXGBE_TX_FLOW_CTL_TFB  BIT(1)
+
+/* SXGBE RX Q Flow Control Register bitfields */
+#define SXGBE_RX_FLOW_CTL_ENABLE       BIT(0)
+#define SXGBE_RX_UNICAST_DETECT                BIT(1)
+#define SXGBE_RX_PRTYFLOW_CTL_ENABLE   BIT(8)
+
+/* sxgbe rx Q control0 register bitfields */
+#define SXGBE_RX_Q_ENABLE      0x2
+
+/* SXGBE hardware features bitfield specific */
+/* Capability Register 0 */
+#define SXGBE_HW_FEAT_GMII(cap)                        ((cap & 0x00000002) >> 1)
+#define SXGBE_HW_FEAT_VLAN_HASH_FILTER(cap)    ((cap & 0x00000010) >> 4)
+#define SXGBE_HW_FEAT_SMA(cap)                 ((cap & 0x00000020) >> 5)
+#define SXGBE_HW_FEAT_PMT_TEMOTE_WOP(cap)      ((cap & 0x00000040) >> 6)
+#define SXGBE_HW_FEAT_PMT_MAGIC_PKT(cap)       ((cap & 0x00000080) >> 7)
+#define SXGBE_HW_FEAT_RMON(cap)                        ((cap & 0x00000100) >> 8)
+#define SXGBE_HW_FEAT_ARP_OFFLOAD(cap)         ((cap & 0x00000200) >> 9)
+#define SXGBE_HW_FEAT_IEEE1500_2008(cap)       ((cap & 0x00001000) >> 12)
+#define SXGBE_HW_FEAT_EEE(cap)                 ((cap & 0x00002000) >> 13)
+#define SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(cap)     ((cap & 0x00004000) >> 14)
+#define SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(cap)     ((cap & 0x00010000) >> 16)
+#define SXGBE_HW_FEAT_MACADDR_COUNT(cap)       ((cap & 0x007C0000) >> 18)
+#define SXGBE_HW_FEAT_TSTMAP_SRC(cap)          ((cap & 0x06000000) >> 25)
+#define SXGBE_HW_FEAT_SRCADDR_VLAN(cap)                ((cap & 0x08000000) >> 27)
+
+/* Capability Register 1 */
+#define SXGBE_HW_FEAT_RX_FIFO_SIZE(cap)                ((cap & 0x0000001F))
+#define SXGBE_HW_FEAT_TX_FIFO_SIZE(cap)                ((cap & 0x000007C0) >> 6)
+#define SXGBE_HW_FEAT_IEEE1588_HWORD(cap)      ((cap & 0x00002000) >> 13)
+#define SXGBE_HW_FEAT_DCB(cap)                 ((cap & 0x00010000) >> 16)
+#define SXGBE_HW_FEAT_SPLIT_HDR(cap)           ((cap & 0x00020000) >> 17)
+#define SXGBE_HW_FEAT_TSO(cap)                 ((cap & 0x00040000) >> 18)
+#define SXGBE_HW_FEAT_DEBUG_MEM_IFACE(cap)     ((cap & 0x00080000) >> 19)
+#define SXGBE_HW_FEAT_RSS(cap)                 ((cap & 0x00100000) >> 20)
+#define SXGBE_HW_FEAT_HASH_TABLE_SIZE(cap)     ((cap & 0x03000000) >> 24)
+#define SXGBE_HW_FEAT_L3L4_FILTER_NUM(cap)     ((cap & 0x78000000) >> 27)
+
+/* Capability Register 2 */
+#define SXGBE_HW_FEAT_RX_MTL_QUEUES(cap)       ((cap & 0x0000000F))
+#define SXGBE_HW_FEAT_TX_MTL_QUEUES(cap)       ((cap & 0x000003C0) >> 6)
+#define SXGBE_HW_FEAT_RX_DMA_CHANNELS(cap)     ((cap & 0x0000F000) >> 12)
+#define SXGBE_HW_FEAT_TX_DMA_CHANNELS(cap)     ((cap & 0x003C0000) >> 18)
+#define SXGBE_HW_FEAT_PPS_OUTPUTS(cap)         ((cap & 0x07000000) >> 24)
+#define SXGBE_HW_FEAT_AUX_SNAPSHOTS(cap)       ((cap & 0x70000000) >> 28)
+
+/* DMAchannel interrupt enable specific */
+/* DMA Normal interrupt */
+#define SXGBE_DMA_INT_ENA_NIE  BIT(16) /* Normal Summary */
+#define SXGBE_DMA_INT_ENA_TIE  BIT(0)  /* Transmit Interrupt */
+#define SXGBE_DMA_INT_ENA_TUE  BIT(2)  /* Transmit Buffer Unavailable */
+#define SXGBE_DMA_INT_ENA_RIE  BIT(6)  /* Receive Interrupt */
+
+#define SXGBE_DMA_INT_NORMAL                                   \
+       (SXGBE_DMA_INT_ENA_NIE | SXGBE_DMA_INT_ENA_RIE |        \
+        SXGBE_DMA_INT_ENA_TIE | SXGBE_DMA_INT_ENA_TUE)
+
+/* DMA Abnormal interrupt */
+#define SXGBE_DMA_INT_ENA_AIE  BIT(15) /* Abnormal Summary */
+#define SXGBE_DMA_INT_ENA_TSE  BIT(1)  /* Transmit Stopped */
+#define SXGBE_DMA_INT_ENA_RUE  BIT(7)  /* Receive Buffer Unavailable */
+#define SXGBE_DMA_INT_ENA_RSE  BIT(8)  /* Receive Stopped */
+#define SXGBE_DMA_INT_ENA_FBE  BIT(12) /* Fatal Bus Error */
+#define SXGBE_DMA_INT_ENA_CDEE BIT(13) /* Context Descriptor Error */
+
+#define SXGBE_DMA_INT_ABNORMAL                                 \
+       (SXGBE_DMA_INT_ENA_AIE | SXGBE_DMA_INT_ENA_TSE |        \
+        SXGBE_DMA_INT_ENA_RUE | SXGBE_DMA_INT_ENA_RSE |        \
+        SXGBE_DMA_INT_ENA_FBE | SXGBE_DMA_INT_ENA_CDEE)
+
+#define SXGBE_DMA_ENA_INT      (SXGBE_DMA_INT_NORMAL | SXGBE_DMA_INT_ABNORMAL)
+
+/* DMA channel interrupt status specific */
+#define        SXGBE_DMA_INT_STATUS_REB2       BIT(21)
+#define        SXGBE_DMA_INT_STATUS_REB1       BIT(20)
+#define        SXGBE_DMA_INT_STATUS_REB0       BIT(19)
+#define        SXGBE_DMA_INT_STATUS_TEB2       BIT(18)
+#define        SXGBE_DMA_INT_STATUS_TEB1       BIT(17)
+#define        SXGBE_DMA_INT_STATUS_TEB0       BIT(16)
+#define        SXGBE_DMA_INT_STATUS_NIS        BIT(15)
+#define SXGBE_DMA_INT_STATUS_AIS       BIT(14)
+#define SXGBE_DMA_INT_STATUS_CTXTERR   BIT(13)
+#define SXGBE_DMA_INT_STATUS_FBE       BIT(12)
+#define SXGBE_DMA_INT_STATUS_RPS       BIT(8)
+#define SXGBE_DMA_INT_STATUS_RBU       BIT(7)
+#define SXGBE_DMA_INT_STATUS_RI                BIT(6)
+#define SXGBE_DMA_INT_STATUS_TBU       BIT(2)
+#define SXGBE_DMA_INT_STATUS_TPS       BIT(1)
+#define SXGBE_DMA_INT_STATUS_TI                BIT(0)
+
+#endif /* __SXGBE_REGMAP_H__ */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
new file mode 100644 (file)
index 0000000..51c3219
--- /dev/null
@@ -0,0 +1,91 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include "sxgbe_common.h"
+#include "sxgbe_xpcs.h"
+
+static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
+{
+       u32 value;
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+       value = readl(priv->ioaddr + XPCS_OFFSET + reg);
+
+       return value;
+}
+
+static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
+{
+       struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+       writel(data, priv->ioaddr + XPCS_OFFSET + reg);
+
+       return 0;
+}
+
+int sxgbe_xpcs_init(struct net_device *ndev)
+{
+       u32 value;
+
+       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+       /* 10G XAUI mode */
+       sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
+       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
+       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
+       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
+
+       do {
+               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
+
+       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
+
+       do {
+               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
+
+       return 0;
+}
+
+int sxgbe_xpcs_init_1G(struct net_device *ndev)
+{
+       int value;
+
+       /* 10GBASE-X PCS (1G) mode */
+       sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
+       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
+       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
+
+       value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
+       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
+       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
+       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
+
+       do {
+               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
+       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
+
+       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
+       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
+
+       /* Auto Negotiation cluase 37 enable */
+       value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
+       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
new file mode 100644 (file)
index 0000000..6b26a50
--- /dev/null
@@ -0,0 +1,38 @@
+/* 10G controller driver for Samsung SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Byungho An <bh74.an@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_XPCS_H__
+#define __SXGBE_XPCS_H__
+
+/* XPCS Registers */
+#define XPCS_OFFSET                    0x1A060000
+#define SR_PCS_MMD_CONTROL1            0x030000
+#define SR_PCS_CONTROL2                        0x030007
+#define VR_PCS_MMD_XAUI_MODE_CONTROL   0x038004
+#define VR_PCS_MMD_DIGITAL_STATUS      0x038010
+#define SR_MII_MMD_CONTROL             0x1F0000
+#define SR_MII_MMD_AN_ADV              0x1F0004
+#define SR_MII_MMD_AN_LINK_PARTNER_BA  0x1F0005
+#define VR_MII_MMD_AN_CONTROL          0x1F8001
+#define VR_MII_MMD_AN_INT_STATUS       0x1F8002
+
+#define XPCS_QSEQ_STATE_STABLE         0x10
+#define XPCS_QSEQ_STATE_MPLLOFF                0x1c
+#define XPCS_TYPE_SEL_R                        0x00
+#define XPCS_TYPE_SEL_X                        0x01
+#define XPCS_TYPE_SEL_W                        0x02
+#define XPCS_XAUI_MODE                 0x00
+#define XPCS_RXAUI_MODE                        0x01
+
+int sxgbe_xpcs_init(struct net_device *ndev);
+int sxgbe_xpcs_init_1G(struct net_device *ndev);
+
+#endif /* __SXGBE_XPCS_H__ */
index 174a92f5fe5133814055cff36c14c0374901cbaf..651626e133f9b788dacc47234822c6f90190799d 100644 (file)
@@ -162,8 +162,8 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
        if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
                return -EIO;
 
-       memcpy(mac_address,
-              MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
+       ether_addr_copy(mac_address,
+                       MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
        return 0;
 }
 
@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
        struct efx_ef10_nic_data *nic_data;
        int i, rc;
 
-       /* We can have one VI for each 8K region.  However we need
-        * multiple TX queues per channel.
+       /* We can have one VI for each 8K region.  However, until we
+        * use TX option descriptors we need two TX queues per channel.
         */
        efx->max_channels =
                min_t(unsigned int,
@@ -1955,6 +1955,9 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
        int tx_descs = 0;
        int spent = 0;
 
+       if (quota <= 0)
+               return spent;
+
        read_ptr = channel->eventq_read_ptr;
 
        for (;;) {
@@ -3145,12 +3148,10 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
                table->dev_uc_count = -1;
        } else {
                table->dev_uc_count = 1 + netdev_uc_count(net_dev);
-               memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
-                      ETH_ALEN);
+               ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
                i = 1;
                netdev_for_each_uc_addr(uc, net_dev) {
-                       memcpy(table->dev_uc_list[i].addr,
-                              uc->addr, ETH_ALEN);
+                       ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
                        i++;
                }
        }
@@ -3162,8 +3163,7 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
                eth_broadcast_addr(table->dev_mc_list[0].addr);
                i = 1;
                netdev_for_each_mc_addr(mc, net_dev) {
-                       memcpy(table->dev_mc_list[i].addr,
-                              mc->addr, ETH_ALEN);
+                       ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
                        i++;
                }
        }
index 207ac9a1e3de989d0f5e4d588c27b6035eec31ec..62a55dde61d570ff78604962808cb2eff21ea273 100644 (file)
 #define        ESF_DZ_RX_KER_BUF_ADDR_LBN 0
 #define        ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
 
-/* RX_USER_DESC */
-#define        ESF_DZ_RX_USR_RESERVED_LBN 62
-#define        ESF_DZ_RX_USR_RESERVED_WIDTH 2
-#define        ESF_DZ_RX_USR_BYTE_CNT_LBN 48
-#define        ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
-#define        ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
-#define        ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define        ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define        ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define        ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define        ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define        ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
-#define        ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
-#define        ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
-#define        ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
-#define        ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
-#define        ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
-#define        ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
-#define        ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
-#define        ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
-#define        ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
-#define        ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define        ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define        ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define        ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
-
 /* TX_CSUM_TSTAMP_DESC */
 #define        ESF_DZ_TX_DESC_IS_OPT_LBN 63
 #define        ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
 #define        ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
 #define        ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
 
-/* TX_USER_DESC */
-#define        ESF_DZ_TX_USR_TYPE_LBN 63
-#define        ESF_DZ_TX_USR_TYPE_WIDTH 1
-#define        ESF_DZ_TX_USR_CONT_LBN 62
-#define        ESF_DZ_TX_USR_CONT_WIDTH 1
-#define        ESF_DZ_TX_USR_BYTE_CNT_LBN 48
-#define        ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
-#define        ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
-#define        ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define        ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define        ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define        ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define        ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define        ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
-#define        ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
-#define        ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
-#define        ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
-#define        ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
-#define        ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
-#define        ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
-#define        ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
-#define        ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
-#define        ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
-#define        ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define        ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define        ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define        ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define        ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
 /*************************************************************************/
 
 /* TX_DESC_UPD_REG: Transmit descriptor update register.
index 83d4643470213b7f2380b2fcaadb8404f6d08dc4..52589f6a8beb8d787185a708a050b9d1e28460c0 100644 (file)
@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
                        goto fail;
        }
 
-       channel->n_rx_frm_trunc = 0;
-
        return 0;
 
 fail:
@@ -1014,7 +1012,7 @@ static int efx_probe_port(struct efx_nic *efx)
                return rc;
 
        /* Initialise MAC address to permanent address */
-       memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN);
+       ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
 
        return 0;
 }
@@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
 
                for (i = 0; i < n_channels; i++)
                        xentries[i].entry = i;
-               rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
-               if (rc > 0) {
+               rc = pci_enable_msix_range(efx->pci_dev,
+                                          xentries, 1, n_channels);
+               if (rc < 0) {
+                       /* Fall back to single channel MSI */
+                       efx->interrupt_mode = EFX_INT_MODE_MSI;
+                       netif_err(efx, drv, efx->net_dev,
+                                 "could not enable MSI-X\n");
+               } else if (rc < n_channels) {
                        netif_err(efx, drv, efx->net_dev,
                                  "WARNING: Insufficient MSI-X vectors"
                                  " available (%d < %u).\n", rc, n_channels);
                        netif_err(efx, drv, efx->net_dev,
                                  "WARNING: Performance may be reduced.\n");
-                       EFX_BUG_ON_PARANOID(rc >= n_channels);
                        n_channels = rc;
-                       rc = pci_enable_msix(efx->pci_dev, xentries,
-                                            n_channels);
                }
 
-               if (rc == 0) {
+               if (rc > 0) {
                        efx->n_channels = n_channels;
                        if (n_channels > extra_channels)
                                n_channels -= extra_channels;
@@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                        for (i = 0; i < efx->n_channels; i++)
                                efx_get_channel(efx, i)->irq =
                                        xentries[i].vector;
-               } else {
-                       /* Fall back to single channel MSI */
-                       efx->interrupt_mode = EFX_INT_MODE_MSI;
-                       netif_err(efx, drv, efx->net_dev,
-                                 "could not enable MSI-X\n");
                }
        }
 
@@ -2115,7 +2111,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct sockaddr *addr = data;
-       char *new_addr = addr->sa_data;
+       u8 *new_addr = addr->sa_data;
 
        if (!is_valid_ether_addr(new_addr)) {
                netif_err(efx, drv, efx->net_dev,
@@ -2124,7 +2120,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
                return -EADDRNOTAVAIL;
        }
 
-       memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
+       ether_addr_copy(net_dev->dev_addr, new_addr);
        efx_sriov_mac_address_changed(efx);
 
        /* Reconfigure the MAC */
@@ -3273,6 +3269,6 @@ module_exit(efx_exit_module);
 
 MODULE_AUTHOR("Solarflare Communications and "
              "Michael Brown <mbrown@fensystems.co.uk>");
-MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_DESCRIPTION("Solarflare network driver");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, efx_pci_table);
index dbd7b78fe01c597d088d47ee62f566aadd544a41..99032581336f8297e735bfd21a6bb83c57d9ba4e 100644 (file)
@@ -14,7 +14,7 @@
 #include "net_driver.h"
 #include "filter.h"
 
-/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
 #define EFX_MEM_BAR 2
 
 /* TX */
index 229428915aa8d01dd40968629087ece21498605d..0de8b07c24c2bf39cd62a06021a42c217a3cba4c 100644 (file)
@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
  * @test_index:                Starting index of the test
  * @strings:           Ethtool strings, or %NULL
  * @data:              Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries.  Return new test
+ * index.
  */
 static int efx_fill_loopback_test(struct efx_nic *efx,
                                  struct efx_loopback_self_tests *lb_tests,
@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
  * @tests:             Efx self-test results structure, or %NULL
  * @strings:           Ethtool strings, or %NULL
  * @data:              Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
  */
 static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
                                       struct efx_self_tests *tests,
@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_self_tests *efx_tests;
-       int already_up;
+       bool already_up;
        int rc = -ENOMEM;
 
        efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                goto fail;
 
        if (efx->state != STATE_READY) {
-               rc = -EIO;
-               goto fail1;
+               rc = -EBUSY;
+               goto out;
        }
 
        netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                if (rc) {
                        netif_err(efx, drv, efx->net_dev,
                                  "failed opening device.\n");
-                       goto fail1;
+                       goto out;
                }
        }
 
@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
                   rc == 0 ? "passed" : "failed",
                   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
 
-fail1:
-       /* Fill ethtool results structures */
+out:
        efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
        kfree(efx_tests);
 fail:
@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
        pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
 }
 
-
 static void efx_ethtool_get_wol(struct net_device *net_dev,
                                struct ethtool_wolinfo *wol)
 {
@@ -720,7 +727,7 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
 }
 
 /* MAC address mask including only I/G bit */
-static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
 
 #define IP4_ADDR_FULL_MASK     ((__force __be32)~0)
 #define PORT_FULL_MASK         ((__force __be16)~0)
@@ -780,16 +787,16 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
                rule->flow_type = ETHER_FLOW;
                if (spec.match_flags &
                    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
-                       memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
+                       ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
                        if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
-                               memset(mac_mask->h_dest, ~0, ETH_ALEN);
+                               eth_broadcast_addr(mac_mask->h_dest);
                        else
-                               memcpy(mac_mask->h_dest, mac_addr_ig_mask,
-                                      ETH_ALEN);
+                               ether_addr_copy(mac_mask->h_dest,
+                                               mac_addr_ig_mask);
                }
                if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
-                       memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
-                       memset(mac_mask->h_source, ~0, ETH_ALEN);
+                       ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+                       eth_broadcast_addr(mac_mask->h_source);
                }
                if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
                        mac_entry->h_proto = spec.ether_type;
@@ -961,13 +968,13 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
                                spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
                        else
                                return -EINVAL;
-                       memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
+                       ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
                }
                if (!is_zero_ether_addr(mac_mask->h_source)) {
                        if (!is_broadcast_ether_addr(mac_mask->h_source))
                                return -EINVAL;
                        spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
-                       memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
+                       ether_addr_copy(spec.rem_mac, mac_entry->h_source);
                }
                if (mac_mask->h_proto) {
                        if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
index 18d6f761f4d06b18cd060e66d8625a4aa06aa6c2..8ec20b713cc610422781facd45c3c1a03c664164 100644 (file)
@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
        efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
 }
 
-
 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
 {
        struct efx_nic *efx = dev_id;
@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
                efx_schedule_channel_irq(efx_get_channel(efx, 1));
        return IRQ_HANDLED;
 }
+
 /**************************************************************************
  *
  * RSS
@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
        case 100:   link_speed = 1; break;
        default:    link_speed = 0; break;
        }
+
        /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
         * as advertised.  Disable to ensure packets are not
         * indefinitely held and TX queue can be flushed at any point
@@ -2182,7 +2183,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
        }
 
        /* Read the MAC addresses */
-       memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
+       ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
 
        netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
                  efx->phy_type, efx->mdio.prtad);
@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
        .mcdi_max_ver = -1,
        .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
 };
-
index f72489a105cadcf23a7e4d253c3e9a9e2a81d56f..a08761360cdf526caf33051142149c2962a17f6c 100644 (file)
@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
  */
 void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
 {
-
        struct efx_tx_buffer *buffer;
        efx_qword_t *txd;
        unsigned write_ptr;
@@ -1249,6 +1248,9 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
        int tx_packets = 0;
        int spent = 0;
 
+       if (budget <= 0)
+               return spent;
+
        read_ptr = channel->eventq_read_ptr;
 
        for (;;) {
@@ -1609,7 +1611,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-
 /* Setup RSS indirection table.
  * This maps from the hash value of the packet to RXQ
  */
index 3ef298d3c47e3b32078550231fccc46094cbef68..d0ed7f71ea7e25145bb4cf12395154efc587c3a0 100644 (file)
@@ -243,7 +243,7 @@ static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
        }
        if (addr != NULL) {
                spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
-               memcpy(spec->loc_mac, addr, ETH_ALEN);
+               ether_addr_copy(spec->loc_mac, addr);
        }
        return 0;
 }
index eb59abb57e8506b4fc9605cad9fcb460be0625a3..7bd4b14bf3b32f627457eb1c48f6231922838670 100644 (file)
@@ -1187,6 +1187,9 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
        int rc;
 
        BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+       /* we need __aligned(2) for ether_addr_copy */
+       BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
+       BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
 
        rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
                          outbuf, sizeof(outbuf), &outlen);
@@ -1199,11 +1202,10 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
        }
 
        if (mac_address)
-               memcpy(mac_address,
-                      port_num ?
-                      MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
-                      MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
-                      ETH_ALEN);
+               ether_addr_copy(mac_address,
+                               port_num ?
+                               MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+                               MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
        if (fw_subtype_list) {
                for (i = 0;
                     i < MCDI_VAR_ARRAY_LEN(outlen,
@@ -1532,7 +1534,7 @@ static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
        MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
        MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
                       MC_CMD_FILTER_MODE_SIMPLE);
-       memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
+       ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
 
        rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
                          outbuf, sizeof(outbuf), &outlen);
index 91d23252f8fae96d4ff4884d60b04a0cd5486750..e5fc4e1574b53f977e8509652aeb67e7404ca757 100644 (file)
@@ -854,8 +854,8 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
 
        BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
 
-       memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
-              efx->net_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+                       efx->net_dev->dev_addr);
 
        MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
                        EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
index af2b8c59a903e7cea3011a3a2be29658542d4d4e..8a400a0595eb8c30ec051aa1e06b3f13e6478eea 100644 (file)
@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
        return &rx_queue->buffer[index];
 }
 
-
 /**
  * EFX_MAX_FRAME_LEN - calculate maximum frame length
  *
index 79226b19e3c40d072bf929046bd8b0b219a9ab1a..32d969e857f7befc79bf4a6f18cb153c350b374b 100644 (file)
@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
        efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
        *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
 }
-
index d7a36829649a7eea1797057e0bb0e5773bc83c0e..6b861e3de4b0d0655879e5bb5740855d6b9c251d 100644 (file)
@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
  * @evt_list: List of MC receive events awaiting packets
  * @evt_free_list: List of free events
  * @evt_lock: Lock for manipulating evt_list and evt_free_list
- * @evt_overflow: Boolean indicating that event list has overflowed
  * @rx_evts: Instantiated events (on evt_list and evt_free_list)
  * @workwq: Work queue for processing pending PTP operations
  * @work: Work task
@@ -275,7 +274,6 @@ struct efx_ptp_data {
        struct list_head evt_list;
        struct list_head evt_free_list;
        spinlock_t evt_lock;
-       bool evt_overflow;
        struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
        struct workqueue_struct *workwq;
        struct work_struct work;
@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
                return -EAGAIN;
        }
 
-       /* Convert the NIC time into kernel time. No correction is required-
-        * this time is the output of a firmware process.
-        */
-       mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
-                                         ptp->timeset[last_good].minor, 0);
-
-       /* Calculate delay from actual PPS to last_time */
-       delta = ktime_to_timespec(mc_time);
-       delta.tv_nsec +=
-               last_time->ts_real.tv_nsec -
-               (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
-
-       /* It is possible that the seconds rolled over between taking
+       /* Calculate delay from last good sync (host time) to last_time.
+        * It is possible that the seconds rolled over between taking
         * the start reading and the last value written by the host.  The
         * timescales are such that a gap of more than one second is never
-        * expected.
+        * expected.  delta is *not* normalised.
         */
        start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
        last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
-       if (start_sec != last_sec) {
-               if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
-                       netif_warn(efx, hw, efx->net_dev,
-                                  "PTP bad synchronisation seconds\n");
-                       return -EAGAIN;
-               } else {
-                       delta.tv_sec = 1;
-               }
-       } else {
-               delta.tv_sec = 0;
+       if (start_sec != last_sec &&
+           ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+               netif_warn(efx, hw, efx->net_dev,
+                          "PTP bad synchronisation seconds\n");
+               return -EAGAIN;
        }
+       delta.tv_sec = (last_sec - start_sec) & 1;
+       delta.tv_nsec =
+               last_time->ts_real.tv_nsec -
+               (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+       /* Convert the NIC time at last good sync into kernel time.
+        * No correction is required - this time is the output of a
+        * firmware process.
+        */
+       mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+                                         ptp->timeset[last_good].minor, 0);
+
+       /* Calculate delay from NIC top of second to last_time */
+       delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
 
+       /* Set PPS timestamp to match NIC top of second */
        ptp->host_time_pps = *last_time;
        pps_sub_ts(&ptp->host_time_pps, delta);
 
@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
                        }
                }
        }
-       /* If the event overflow flag is set and the event list is now empty
-        * clear the flag to re-enable the overflow warning message.
-        */
-       if (ptp->evt_overflow && list_empty(&ptp->evt_list))
-               ptp->evt_overflow = false;
        spin_unlock_bh(&ptp->evt_lock);
 }
 
@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
                        break;
                }
        }
-       /* If the event overflow flag is set and the event list is now empty
-        * clear the flag to re-enable the overflow warning message.
-        */
-       if (ptp->evt_overflow && list_empty(&ptp->evt_list))
-               ptp->evt_overflow = false;
        spin_unlock_bh(&ptp->evt_lock);
 
        return rc;
@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
        list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
                list_move(cursor, &efx->ptp_data->evt_free_list);
        }
-       ptp->evt_overflow = false;
        spin_unlock_bh(&efx->ptp_data->evt_lock);
 
        return rc;
@@ -1208,6 +1194,7 @@ static const struct ptp_clock_info efx_phc_clock_info = {
        .n_alarm        = 0,
        .n_ext_ts       = 0,
        .n_per_out      = 0,
+       .n_pins         = 0,
        .pps            = 1,
        .adjfreq        = efx_phc_adjfreq,
        .adjtime        = efx_phc_adjtime,
@@ -1253,7 +1240,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
        spin_lock_init(&ptp->evt_lock);
        for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
                list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
-       ptp->evt_overflow = false;
 
        /* Get the NIC PTP attributes and set up time conversions */
        rc = efx_ptp_get_attributes(efx);
@@ -1380,6 +1366,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
        struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
        u8 *match_data_012, *match_data_345;
        unsigned int version;
+       u8 *data;
 
        match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
 
@@ -1388,7 +1375,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
                if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
                        return false;
                }
-               version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+               data = skb->data;
+               version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
                if (version != PTP_VERSION_V1) {
                        return false;
                }
@@ -1396,13 +1384,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
                /* PTP V1 uses all six bytes of the UUID to match the packet
                 * to the timestamp
                 */
-               match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
-               match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
+               match_data_012 = data + PTP_V1_UUID_OFFSET;
+               match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
        } else {
                if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
                        return false;
                }
-               version = skb->data[PTP_V2_VERSION_OFFSET];
+               data = skb->data;
+               version = data[PTP_V2_VERSION_OFFSET];
                if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
                        return false;
                }
@@ -1414,17 +1403,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
                 * enhanced mode fixes this issue and uses bytes 0-2
                 * and byte 5-7 of the UUID.
                 */
-               match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+               match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
                if (ptp->mode == MC_CMD_PTP_MODE_V2) {
-                       match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+                       match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
                } else {
-                       match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+                       match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
                        BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
                }
        }
 
        /* Does this packet require timestamping? */
-       if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+       if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
                match->state = PTP_PACKET_STATE_UNMATCHED;
 
                /* We expect the sequence number to be in the same position in
@@ -1440,8 +1429,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
                                   (match_data_345[0] << 24));
                match->words[1] = (match_data_345[1]         |
                                   (match_data_345[2] << 8)  |
-                                  (skb->data[PTP_V1_SEQUENCE_OFFSET +
-                                             PTP_V1_SEQUENCE_LENGTH - 1] <<
+                                  (data[PTP_V1_SEQUENCE_OFFSET +
+                                        PTP_V1_SEQUENCE_LENGTH - 1] <<
                                    16));
        } else {
                match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
@@ -1635,13 +1624,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
                list_add_tail(&evt->link, &ptp->evt_list);
 
                queue_work(ptp->workwq, &ptp->work);
-       } else if (!ptp->evt_overflow) {
-               /* Log a warning message and set the event overflow flag.
-                * The message won't be logged again until the event queue
-                * becomes empty.
-                */
+       } else if (net_ratelimit()) {
+               /* Log a rate-limited warning message. */
                netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
-               ptp->evt_overflow = true;
        }
        spin_unlock_bh(&ptp->evt_lock);
 }
index 26641817a9c73ab6634057faaa93cba9f91ece0d..0fc5baef45b17377faeef424dab5217bb41a87a0 100644 (file)
@@ -50,7 +50,7 @@ struct efx_loopback_payload {
 } __packed;
 
 /* Loopback test source MAC address */
-static const unsigned char payload_source[ETH_ALEN] = {
+static const u8 payload_source[ETH_ALEN] __aligned(2) = {
        0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
 };
 
@@ -366,8 +366,8 @@ static void efx_iterate_state(struct efx_nic *efx)
        struct efx_loopback_payload *payload = &state->payload;
 
        /* Initialise the layerII header */
-       memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN);
-       memcpy(&payload->header.h_source, &payload_source, ETH_ALEN);
+       ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
+       ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
        payload->header.h_proto = htons(ETH_P_IP);
 
        /* saddr set later and used as incrementing count */
index 0c38f926871ebbc99593cf71d263ae54cf83636c..9a9205e778964186d09094128a933179926f8006 100644 (file)
@@ -1095,7 +1095,7 @@ static void efx_sriov_peer_work(struct work_struct *data)
 
        /* Fill the remaining addresses */
        list_for_each_entry(local_addr, &efx->local_addr_list, link) {
-               memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN);
+               ether_addr_copy(peer->mac_addr, local_addr->addr);
                peer->tci = 0;
                ++peer;
                ++peer_count;
@@ -1303,8 +1303,7 @@ int efx_sriov_init(struct efx_nic *efx)
                goto fail_vfs;
 
        rtnl_lock();
-       memcpy(vfdi_status->peers[0].mac_addr,
-              net_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
        efx->vf_init_count = efx->vf_count;
        rtnl_unlock();
 
@@ -1452,8 +1451,8 @@ void efx_sriov_mac_address_changed(struct efx_nic *efx)
 
        if (!efx->vf_init_count)
                return;
-       memcpy(vfdi_status->peers[0].mac_addr,
-              efx->net_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(vfdi_status->peers[0].mac_addr,
+                       efx->net_dev->dev_addr);
        queue_work(vfdi_workqueue, &efx->peer_work);
 }
 
@@ -1570,7 +1569,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
        vf = efx->vf + vf_i;
 
        mutex_lock(&vf->status_lock);
-       memcpy(vf->addr.mac_addr, mac, ETH_ALEN);
+       ether_addr_copy(vf->addr.mac_addr, mac);
        __efx_sriov_update_vf_addr(vf);
        mutex_unlock(&vf->status_lock);
 
@@ -1633,7 +1632,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
        vf = efx->vf + vf_i;
 
        ivi->vf = vf_i;
-       memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN);
+       ether_addr_copy(ivi->mac, vf->addr.mac_addr);
        ivi->tx_rate = 0;
        tci = ntohs(vf->addr.tci);
        ivi->vlan = tci & VLAN_VID_MASK;
index 75d11fa4eb0a75dae1bf8b9b057c962ac63c1393..fa9475300411507e447fd51e21dfbd4c5f600aa3 100644 (file)
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * Requires TX checksum offload support.
  */
 
-/* Number of bytes inserted at the start of a TSO header buffer,
- * similar to NET_IP_ALIGN.
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define TSOH_OFFSET    0
-#else
-#define TSOH_OFFSET    NET_IP_ALIGN
-#endif
-
 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
 
 /**
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
        EFX_BUG_ON_PARANOID(buffer->flags);
        EFX_BUG_ON_PARANOID(buffer->unmap_len);
 
-       if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+       if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
                unsigned index =
                        (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
                struct efx_buffer *page_buf =
                        &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
                unsigned offset =
-                       TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+                       TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
 
                if (unlikely(!page_buf->addr) &&
                    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
        } else {
                tx_queue->tso_long_headers++;
 
-               buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+               buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
                if (unlikely(!buffer->heap_buf))
                        return NULL;
-               result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+               result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
                buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
        }
 
@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
 static int tso_start(struct tso_state *st, struct efx_nic *efx,
                     const struct sk_buff *skb)
 {
-       bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+       bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
        struct device *dma_dev = &efx->pci_dev->dev;
        unsigned int header_len, in_len;
        dma_addr_t dma_addr;
@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
 
        st->out_len = skb->len - header_len;
 
-       if (!use_options) {
+       if (!use_opt_desc) {
                st->header_unmap_len = 0;
 
                if (likely(in_len == 0)) {
index 5eb933c97bbacf26123d1857949515e6b04adbee..7daa7d433099d0d739bf28db8e6238efa2bd4c31 100644 (file)
@@ -987,7 +987,7 @@ out_unlock:
        spin_unlock(&priv->lock);
 
 out:
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        return NETDEV_TX_OK;
 }
index ff57a46388eefed67c81b70dc67e8ddd6a698826..6072f093e6b46618c0724f6a7ed1436d7f3c50f8 100644 (file)
@@ -1614,7 +1614,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
                skb->data, skb->len, PCI_DMA_TODEVICE);
        if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
                sis_priv->tx_ring[entry].bufptr))) {
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        sis_priv->tx_skbuff[entry] = NULL;
                        net_dev->stats.tx_dropped++;
                        spin_unlock_irqrestore(&sis_priv->lock, flags);
index c50fb08c990522d04a4cb819bdfa46cd1205b7fa..66b05e62f70a8bf8b7652b296bce1ef57d5b8699 100644 (file)
@@ -551,7 +551,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                dev->stats.tx_errors++;
                dev->stats.tx_dropped++;
                spin_unlock_irqrestore(&lp->lock, flags);
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index 839c0e6cca01a9ac24c7c52ed535b489a0551ea7..d1b4dca53a9d10be97f05e2e09dd08418598bf05 100644 (file)
@@ -621,7 +621,7 @@ static void smc_hardware_send_pkt(unsigned long data)
 done:  if (!THROTTLE_TX_PKTS)
                netif_wake_queue(dev);
 
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 }
 
 /*
@@ -657,7 +657,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                netdev_warn(dev, "Far too big packet error.\n");
                dev->stats.tx_errors++;
                dev->stats.tx_dropped++;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index 6382b7c416f4a4e2e6a7769e2846f19c84565251..a0fc151da40db13c20681e04c07d5e923566a14b 100644 (file)
@@ -439,7 +439,8 @@ static int smsc911x_request_resources(struct platform_device *pdev)
        /* Request clock */
        pdata->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(pdata->clk))
-               netdev_warn(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk));
+               dev_dbg(&pdev->dev, "couldn't get clock %li\n",
+                       PTR_ERR(pdata->clk));
 
        return ret;
 }
@@ -1672,7 +1673,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
        freespace -= (skb->len + 32);
        skb_tx_timestamp(skb);
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
 
        if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
                smsc911x_tx_update_txcounters(dev);
@@ -2379,8 +2380,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        int res_size, irq_flags;
        int retval;
 
-       pr_info("Driver version %s\n", SMSC_DRV_VERSION);
-
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
                                           "smsc911x-memory");
        if (!res)
index f2d7c702c77f3853fc05c90753f1ae90773d8753..2d09c116cbc8c57b9ded844082dc2693d27a2402 100644 (file)
@@ -26,6 +26,16 @@ config STMMAC_PLATFORM
 
          If unsure, say N.
 
+config DWMAC_SOCFPGA
+       bool "SOCFPGA dwmac support"
+       depends on STMMAC_PLATFORM && MFD_SYSCON && (ARCH_SOCFPGA || COMPILE_TEST)
+       help
+         Support for ethernet controller on Altera SOCFPGA
+
+         This selects the Altera SOCFPGA SoC glue layer support
+         for the stmmac device driver. This driver is used for
+         arria5 and cyclone5 FPGA SoCs.
+
 config DWMAC_SUNXI
        bool "Allwinner GMAC support"
        depends on STMMAC_PLATFORM && ARCH_SUNXI
index dcef28775dadbeca184d8aca56358c389f1220e8..18695ebef7e43cb636e60640fbd52e4835e36ede 100644 (file)
@@ -3,6 +3,7 @@ stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
 stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
 stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
 stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+stmmac-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
 stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
              chain_mode.o dwmac_lib.o dwmac1000_core.o  dwmac1000_dma.o \
              dwmac100_core.o dwmac100_dma.o enh_desc.o  norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
new file mode 100644 (file)
index 0000000..fd8a217
--- /dev/null
@@ -0,0 +1,130 @@
+/* Copyright Altera Corporation (C) 2014. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Adopted from dwmac-sti.c
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
+
+struct socfpga_dwmac {
+       int     interface;
+       u32     reg_offset;
+       u32     reg_shift;
+       struct  device *dev;
+       struct regmap *sys_mgr_base_addr;
+};
+
+static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       struct regmap *sys_mgr_base_addr;
+       u32 reg_offset, reg_shift;
+       int ret;
+
+       dwmac->interface = of_get_phy_mode(np);
+
+       sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
+       if (IS_ERR(sys_mgr_base_addr)) {
+               dev_info(dev, "No sysmgr-syscon node found\n");
+               return PTR_ERR(sys_mgr_base_addr);
+       }
+
+       ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, &reg_offset);
+       if (ret) {
+               dev_info(dev, "Could not read reg_offset from sysmgr-syscon!\n");
+               return -EINVAL;
+       }
+
+       ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, &reg_shift);
+       if (ret) {
+               dev_info(dev, "Could not read reg_shift from sysmgr-syscon!\n");
+               return -EINVAL;
+       }
+
+       dwmac->reg_offset = reg_offset;
+       dwmac->reg_shift = reg_shift;
+       dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
+       dwmac->dev = dev;
+
+       return 0;
+}
+
+static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+{
+       struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+       int phymode = dwmac->interface;
+       u32 reg_offset = dwmac->reg_offset;
+       u32 reg_shift = dwmac->reg_shift;
+       u32 ctrl, val;
+
+       switch (phymode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
+               break;
+       case PHY_INTERFACE_MODE_MII:
+       case PHY_INTERFACE_MODE_GMII:
+               val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+               break;
+       default:
+               dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
+               return -EINVAL;
+       }
+
+       regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+       ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
+       ctrl |= val << reg_shift;
+
+       regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+       return 0;
+}
+
+static void *socfpga_dwmac_probe(struct platform_device *pdev)
+{
+       struct device           *dev = &pdev->dev;
+       int                     ret;
+       struct socfpga_dwmac    *dwmac;
+
+       dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+       if (!dwmac)
+               return ERR_PTR(-ENOMEM);
+
+       ret = socfpga_dwmac_parse_data(dwmac, dev);
+       if (ret) {
+               dev_err(dev, "Unable to parse OF data\n");
+               return ERR_PTR(ret);
+       }
+
+       ret = socfpga_dwmac_setup(dwmac);
+       if (ret) {
+               dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       return dwmac;
+}
+
+const struct stmmac_of_data socfpga_gmac_data = {
+       .setup = socfpga_dwmac_probe,
+};
index f9e60d7918c4ac0b800a664ecae26e0228033f53..ca01035634a76fbc88414f6550849cfa1c772403 100644 (file)
@@ -136,6 +136,9 @@ extern const struct stmmac_of_data sun7i_gmac_data;
 #ifdef CONFIG_DWMAC_STI
 extern const struct stmmac_of_data sti_gmac_data;
 #endif
+#ifdef CONFIG_DWMAC_SOCFPGA
+extern const struct stmmac_of_data socfpga_gmac_data;
+#endif
 extern struct platform_driver stmmac_pltfr_driver;
 static inline int stmmac_register_platform(void)
 {
index 8543e1cfd55edb4a60f5cb543a1e5ea9fc18a097..d940034acdd4aa465153f80ae0d5881cb648d6a8 100644 (file)
@@ -1303,7 +1303,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
                priv->hw->mode->clean_desc3(priv, p);
 
                if (likely(skb != NULL)) {
-                       dev_kfree_skb(skb);
+                       dev_consume_skb_any(skb);
                        priv->tx_skbuff[entry] = NULL;
                }
 
index 8fb32a80f1c1999a18234a2daec7323affd3917a..46aef5108bea47c7d6e49b891937d05ac2610c85 100644 (file)
@@ -37,6 +37,9 @@ static const struct of_device_id stmmac_dt_ids[] = {
        { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
        { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
        { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
+#endif
+#ifdef CONFIG_DWMAC_SOCFPGA
+       { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
 #endif
        /* SoC specific glue layers should come before generic bindings */
        { .compatible = "st,spear600-gmac"},
index 7680581ebe12fe58a60de42b419467e3f2f065f7..b7ad3565566cc8a09b7964fcb59aca3921e8e57c 100644 (file)
@@ -164,6 +164,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
        .n_alarm = 0,
        .n_ext_ts = 0,
        .n_per_out = 0,
+       .n_pins = 0,
        .pps = 0,
        .adjfreq = stmmac_adjust_freq,
        .adjtime = stmmac_adjust_time,
index 8e2266e1f26070614559c12d5d5de50281aad813..79606f47a08e0989396203e5ba54485dba33a04f 100644 (file)
@@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
        struct msix_entry msi_vec[NIU_NUM_LDG];
        struct niu_parent *parent = np->parent;
        struct pci_dev *pdev = np->pdev;
-       int i, num_irqs, err;
+       int i, num_irqs;
        u8 first_ldg;
 
        first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
@@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
                    (np->port == 0 ? 3 : 1));
        BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
 
-retry:
        for (i = 0; i < num_irqs; i++) {
                msi_vec[i].vector = 0;
                msi_vec[i].entry = i;
        }
 
-       err = pci_enable_msix(pdev, msi_vec, num_irqs);
-       if (err < 0) {
+       num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
+       if (num_irqs < 0) {
                np->flags &= ~NIU_FLAGS_MSIX;
                return;
        }
-       if (err > 0) {
-               num_irqs = err;
-               goto retry;
-       }
 
        np->flags |= NIU_FLAGS_MSIX;
        for (i = 0; i < num_irqs; i++)
index c2799dc46325e48bad961c4a3138e80e28eabdb9..102a66fc54a216718fbe753f764ed62ee08cc987 100644 (file)
@@ -688,7 +688,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
                }
 
                dev->stats.tx_packets++;
-               dev_kfree_skb(skb);
+               dev_consume_skb_any(skb);
        }
        gp->tx_old = entry;
 
index 7d6d8ec676c892ebeb633b7f54fcd91cca1267c1..5d5fec6c4eb04bbdacf851635394893055a27866 100644 (file)
@@ -378,7 +378,6 @@ struct cpsw_priv {
        u32                             version;
        u32                             coal_intvl;
        u32                             bus_freq_mhz;
-       struct net_device_stats         stats;
        int                             rx_packet_max;
        int                             host_port;
        struct clk                      *clk;
@@ -673,8 +672,8 @@ static void cpsw_tx_handler(void *token, int len, int status)
        if (unlikely(netif_queue_stopped(ndev)))
                netif_wake_queue(ndev);
        cpts_tx_timestamp(priv->cpts, skb);
-       priv->stats.tx_packets++;
-       priv->stats.tx_bytes += len;
+       ndev->stats.tx_packets++;
+       ndev->stats.tx_bytes += len;
        dev_kfree_skb_any(skb);
 }
 
@@ -700,10 +699,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
                cpts_rx_timestamp(priv->cpts, skb);
                skb->protocol = eth_type_trans(skb, ndev);
                netif_receive_skb(skb);
-               priv->stats.rx_bytes += len;
-               priv->stats.rx_packets++;
+               ndev->stats.rx_bytes += len;
+               ndev->stats.rx_packets++;
        } else {
-               priv->stats.rx_dropped++;
+               ndev->stats.rx_dropped++;
                new_skb = skb;
        }
 
@@ -1313,7 +1312,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 
        if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
                cpsw_err(priv, tx_err, "packet pad failed\n");
-               priv->stats.tx_dropped++;
+               ndev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
 
@@ -1337,7 +1336,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 
        return NETDEV_TX_OK;
 fail:
-       priv->stats.tx_dropped++;
+       ndev->stats.tx_dropped++;
        netif_stop_queue(ndev);
        return NETDEV_TX_BUSY;
 }
@@ -1477,7 +1476,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 {
        struct cpsw_priv *priv = netdev_priv(dev);
-       struct mii_ioctl_data *data = if_mii(req);
        int slave_no = cpsw_slave_index(priv);
 
        if (!netif_running(dev))
@@ -1490,14 +1488,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
        case SIOCGHWTSTAMP:
                return cpsw_hwtstamp_get(dev, req);
 #endif
-       case SIOCGMIIPHY:
-               data->phy_id = priv->slaves[slave_no].phy->addr;
-               break;
-       default:
-               return -ENOTSUPP;
        }
 
-       return 0;
+       if (!priv->slaves[slave_no].phy)
+               return -EOPNOTSUPP;
+       return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
 }
 
 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1505,7 +1500,7 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        struct cpsw_priv *priv = netdev_priv(ndev);
 
        cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
-       priv->stats.tx_errors++;
+       ndev->stats.tx_errors++;
        cpsw_intr_disable(priv);
        cpdma_ctlr_int_ctrl(priv->dma, false);
        cpdma_chan_stop(priv->txch);
@@ -1544,12 +1539,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
        return 0;
 }
 
-static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
-{
-       struct cpsw_priv *priv = netdev_priv(ndev);
-       return &priv->stats;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void cpsw_ndo_poll_controller(struct net_device *ndev)
 {
@@ -1642,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = eth_change_mtu,
        .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
-       .ndo_get_stats          = cpsw_ndo_get_stats,
        .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = cpsw_ndo_poll_controller,
index 8c351f100acac5335aedeba235b395bd4330a109..a3bbf59eaafdf2e31b400336129e3731e0d88364 100644 (file)
 
 #ifdef CONFIG_TI_CPTS
 
-static struct sock_filter ptp_filter[] = {
-       PTP_FILTER
-};
-
 #define cpts_read32(c, r)      __raw_readl(&c->reg->r)
 #define cpts_write32(c, v, r)  __raw_writel(v, &c->reg->r)
 
@@ -217,6 +213,7 @@ static struct ptp_clock_info cpts_info = {
        .name           = "CTPS timer",
        .max_adj        = 1000000,
        .n_ext_ts       = 0,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = cpts_ptp_adjfreq,
        .adjtime        = cpts_ptp_adjtime,
@@ -300,7 +297,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
        u64 ns = 0;
        struct cpts_event *event;
        struct list_head *this, *next;
-       unsigned int class = sk_run_filter(skb, ptp_filter);
+       unsigned int class = ptp_classify_raw(skb);
        unsigned long flags;
        u16 seqid;
        u8 mtype;
@@ -371,10 +368,6 @@ int cpts_register(struct device *dev, struct cpts *cpts,
        int err, i;
        unsigned long flags;
 
-       if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
-               pr_err("cpts: bad ptp filter\n");
-               return -EINVAL;
-       }
        cpts->info = cpts_info;
        cpts->clock = ptp_clock_register(&cpts->info, dev);
        if (IS_ERR(cpts->clock)) {
index 17503da9f7a589d5f56484329179c4c68c4af429..7e1c91d41a87ff2a4e065718d4caaf84c7db8020 100644 (file)
@@ -659,6 +659,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
        struct info_mpipe *info_mpipe =
                container_of(napi, struct info_mpipe, napi);
 
+       if (budget <= 0)
+               goto done;
+
        instance = info_mpipe->instance;
        while ((n = gxio_mpipe_iqueue_try_peek(
                        &info_mpipe->iqueue,
@@ -870,6 +873,7 @@ static struct ptp_clock_info ptp_mpipe_caps = {
        .name           = "mPIPE clock",
        .max_adj        = 999999999,
        .n_ext_ts       = 0,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = ptp_mpipe_adjfreq,
        .adjtime        = ptp_mpipe_adjtime,
index edb2e12a0fe214894e9a9a0445ccc7e869dbe7d6..e5a5c5d4ce0c8c8967c7963bf8bff36ee58f113f 100644 (file)
@@ -831,6 +831,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
 
        unsigned int work = 0;
 
+       if (budget <= 0)
+               goto done;
+
        while (priv->active) {
                int index = qup->__packet_receive_read;
                if (index == qsp->__packet_receive_queue.__packet_write)
@@ -1821,7 +1824,7 @@ busy:
 
        /* Handle completions. */
        for (i = 0; i < nolds; i++)
-               kfree_skb(olds[i]);
+               dev_consume_skb_any(olds[i]);
 
        /* Update stats. */
        u64_stats_update_begin(&stats->syncp);
@@ -2005,7 +2008,7 @@ busy:
 
        /* Handle completions. */
        for (i = 0; i < nolds; i++)
-               kfree_skb(olds[i]);
+               dev_consume_skb_any(olds[i]);
 
        /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
        u64_stats_update_begin(&stats->syncp);
@@ -2068,14 +2071,14 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
                cpu_stats = &priv->cpu[i]->stats;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
                        trx_packets = cpu_stats->rx_packets;
                        ttx_packets = cpu_stats->tx_packets;
                        trx_bytes   = cpu_stats->rx_bytes;
                        ttx_bytes   = cpu_stats->tx_bytes;
                        trx_errors  = cpu_stats->rx_errors;
                        trx_dropped = cpu_stats->rx_dropped;
-               } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 
                rx_packets += trx_packets;
                tx_packets += ttx_packets;
index 3f4a32e39d276f6fcf9c9d9d16c4aec440b9daf6..0282d01618595aa2e4b690dea15e1b82d625b705 100644 (file)
@@ -860,7 +860,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
                if (skb) {
                        pci_unmap_single(card->pdev, buf_addr, skb->len,
                                        PCI_DMA_TODEVICE);
-                       dev_kfree_skb(skb);
+                       dev_consume_skb_any(skb);
                }
        }
        return 0;
index 88e9c73cebc015b9a31b169b973911f92ef8a9d8..fef5573dbfcac0f6db7c44be4124fd04c1fe6587 100644 (file)
@@ -1645,6 +1645,9 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
        int received = 0, handled;
        u32 status;
 
+       if (budget <= 0)
+               return received;
+
        spin_lock(&lp->rx_lock);
        status = tc_readl(&tr->Int_Src);
        do {
index 6ac20a6738f4df5862b49aea71955a4a51ee4842..f61dc2b72bb2f43780ace58a503bd2e3b89f61a9 100644 (file)
@@ -1022,7 +1022,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* The chip-specific entries in the device structure. */
        dev->netdev_ops = &rhine_netdev_ops;
-       dev->ethtool_ops = &netdev_ethtool_ops,
+       dev->ethtool_ops = &netdev_ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
@@ -1678,7 +1678,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                /* Must use alignment buffer. */
                if (skb->len > PKT_BUF_SZ) {
                        /* packet too long, drop it */
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        rp->tx_skbuff[entry] = NULL;
                        dev->stats.tx_dropped++;
                        return NETDEV_TX_OK;
@@ -1698,7 +1698,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                        pci_map_single(rp->pdev, skb->data, skb->len,
                                       PCI_DMA_TODEVICE);
                if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
-                       dev_kfree_skb(skb);
+                       dev_kfree_skb_any(skb);
                        rp->tx_skbuff_dma[entry] = 0;
                        dev->stats.tx_dropped++;
                        return NETDEV_TX_OK;
@@ -1836,7 +1836,7 @@ static void rhine_tx(struct net_device *dev)
                                         rp->tx_skbuff[entry]->len,
                                         PCI_DMA_TODEVICE);
                }
-               dev_kfree_skb(rp->tx_skbuff[entry]);
+               dev_consume_skb_any(rp->tx_skbuff[entry]);
                rp->tx_skbuff[entry] = NULL;
                entry = (++rp->dirty_tx) % TX_RING_SIZE;
        }
@@ -2072,16 +2072,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        netdev_stats_to_stats64(stats, &dev->stats);
 
        do {
-               start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
                stats->rx_packets = rp->rx_stats.packets;
                stats->rx_bytes = rp->rx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
 
        do {
-               start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
+               start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
                stats->tx_packets = rp->tx_stats.packets;
                stats->tx_bytes = rp->tx_stats.bytes;
-       } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
+       } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
 
        return stats;
 }
index ad61d26a44f31d26fc3b6d96f8430a5264e28e6f..de08e86db209fdd21aee55bea8b2827b2af48fe7 100644 (file)
@@ -2565,7 +2565,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
        /* The hardware can handle at most 7 memory segments, so merge
         * the skb if there are more */
        if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index 0df36c6ec7f4618c753db429d8c6c793455e0594..104d46f37969f990c920d4ee42c61e7258579f8a 100644 (file)
@@ -641,11 +641,10 @@ static int w5100_hw_probe(struct platform_device *pdev)
        if (!mem)
                return -ENXIO;
        mem_size = resource_size(mem);
-       if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
-               return -EBUSY;
-       priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
-       if (!priv->base)
-               return -EBUSY;
+
+       priv->base = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
 
        spin_lock_init(&priv->reg_lock);
        priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
index 71c27b3292f1318a30a26dcd25d277dfcc5bd119..1f33c4c86c207f6a4fecf52bca8645c5e114be9e 100644 (file)
@@ -561,11 +561,10 @@ static int w5300_hw_probe(struct platform_device *pdev)
        if (!mem)
                return -ENXIO;
        mem_size = resource_size(mem);
-       if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
-               return -EBUSY;
-       priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
-       if (!priv->base)
-               return -EBUSY;
+
+       priv->base = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
 
        spin_lock_init(&priv->reg_lock);
        priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
index a4347508031ce4cff516c8d5dce32a4ba9e2efa8..fa193c4688da78719257ac982af8be1f81b270c1 100644 (file)
@@ -771,8 +771,8 @@ static void ll_temac_recv(struct net_device *ndev)
 
                /* if we're doing rx csum offload, set it up */
                if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
-                       (skb->protocol == __constant_htons(ETH_P_IP)) &&
-                       (skb->len > 64)) {
+                   (skb->protocol == htons(ETH_P_IP)) &&
+                   (skb->len > 64)) {
 
                        skb->csum = cur_p->app3 & 0xFFFF;
                        skb->ip_summed = CHECKSUM_COMPLETE;
index 4bfdf8c7ada033cf964f1da66daba0137b499e6f..7b0a735562645cf042cfb312ace5018180d94559 100644 (file)
@@ -756,7 +756,7 @@ static void axienet_recv(struct net_device *ndev)
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
                        }
                } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
-                          skb->protocol == __constant_htons(ETH_P_IP) &&
+                          skb->protocol == htons(ETH_P_IP) &&
                           skb->len > 64) {
                        skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
                        skb->ip_summed = CHECKSUM_COMPLETE;
index 36052b98b3fcb20c8955a575c66d38fb8e6fc2ad..0d87c67a5ff7208e807a980c406a934214c9d4a6 100644 (file)
@@ -794,18 +794,6 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
        return 0;
 }
 
-/**
- * xemaclite_mdio_reset - Reset the mdio bus.
- * @bus:       Pointer to the MII bus
- *
- * This function is required(?) as per Documentation/networking/phy.txt.
- * There is no reset in this device; this function always returns 0.
- */
-static int xemaclite_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 /**
  * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
  * @lp:                Pointer to the Emaclite device private data
@@ -861,7 +849,6 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
        bus->name = "Xilinx Emaclite MDIO";
        bus->read = xemaclite_mdio_read;
        bus->write = xemaclite_mdio_write;
-       bus->reset = xemaclite_mdio_reset;
        bus->parent = dev;
        bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
 
@@ -1037,7 +1024,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
        skb_tx_timestamp(new_skb);
 
        dev->stats.tx_bytes += len;
-       dev_kfree_skb(new_skb);
+       dev_consume_skb_any(new_skb);
 
        return 0;
 }
index 25283f17d82f34e093e8b41fe195f82ebb66db26..f7e0f0f7c2e27dd19b2cbc674644cd4678074c2c 100644 (file)
@@ -256,10 +256,6 @@ static int ports_open;
 static struct port *npe_port_tab[MAX_NPES];
 static struct dma_pool *dma_pool;
 
-static struct sock_filter ptp_filter[] = {
-       PTP_FILTER
-};
-
 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
 {
        u8 *data = skb->data;
@@ -267,7 +263,7 @@ static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
        u16 *hi, *id;
        u32 lo;
 
-       if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
+       if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
                return 0;
 
        offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
@@ -1413,11 +1409,6 @@ static int eth_init_one(struct platform_device *pdev)
        char phy_id[MII_BUS_ID_SIZE + 3];
        int err;
 
-       if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
-               pr_err("ixp4xx_eth: bad ptp filter\n");
-               return -EINVAL;
-       }
-
        if (!(dev = alloc_etherdev(sizeof(struct port))))
                return -ENOMEM;
 
index 61dd2447e1bb4eedb5d8e6b72abcf21e9f273afb..81901659cc9ea1126e22f02e427dab1b9a729039 100644 (file)
@@ -1184,7 +1184,7 @@ static void __exit yam_cleanup_driver(void)
        struct yam_mcs *p;
        int i;
 
-       del_timer(&yam_timer);
+       del_timer_sync(&yam_timer);
        for (i = 0; i < NR_PORTS; i++) {
                struct net_device *dev = yam_devs[i];
                if (dev) {
index 7b594ce3f21db2102139e8802d4d9e69bb61a3e8..13010b4dae5b7ddea9b3a48cf28dc8425c5b3e6c 100644 (file)
@@ -30,6 +30,7 @@
 
 /* Fwd declaration */
 struct hv_netvsc_packet;
+struct ndis_tcp_ip_checksum_info;
 
 /* Represent the xfer page packet which contains 1 or more netvsc packet */
 struct xferpage_packet {
@@ -73,7 +74,7 @@ struct hv_netvsc_packet {
        } completion;
 
        /* This points to the memory after page_buf */
-       void *extension;
+       struct rndis_message *rndis_msg;
 
        u32 total_data_buflen;
        /* Points to the send/receive buffer where the ethernet frame is */
@@ -117,7 +118,8 @@ int netvsc_send(struct hv_device *device,
 void netvsc_linkstatus_callback(struct hv_device *device_obj,
                                unsigned int status);
 int netvsc_recv_callback(struct hv_device *device_obj,
-                       struct hv_netvsc_packet *packet);
+                       struct hv_netvsc_packet *packet,
+                       struct ndis_tcp_ip_checksum_info *csum_info);
 int rndis_filter_open(struct hv_device *dev);
 int rndis_filter_close(struct hv_device *dev);
 int rndis_filter_device_add(struct hv_device *dev,
@@ -126,11 +128,6 @@ void rndis_filter_device_remove(struct hv_device *dev);
 int rndis_filter_receive(struct hv_device *dev,
                        struct hv_netvsc_packet *pkt);
 
-
-
-int rndis_filter_send(struct hv_device *dev,
-                       struct hv_netvsc_packet *pkt);
-
 int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
 int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
 
@@ -139,6 +136,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
 
 #define NVSP_PROTOCOL_VERSION_1                2
 #define NVSP_PROTOCOL_VERSION_2                0x30002
+#define NVSP_PROTOCOL_VERSION_4                0x40000
+#define NVSP_PROTOCOL_VERSION_5                0x50000
 
 enum {
        NVSP_MSG_TYPE_NONE = 0,
@@ -193,6 +192,23 @@ enum {
 
        NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
        NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+       NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+       /* Version 4 messages */
+       NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
+       NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
+       NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+       NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+       /* Version 5 messages */
+       NVSP_MSG5_TYPE_OID_QUERY_EX,
+       NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
+       NVSP_MSG5_TYPE_SUBCHANNEL,
+       NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+       NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
 };
 
 enum {
@@ -447,10 +463,44 @@ union nvsp_2_message_uber {
        struct nvsp_2_free_rxbuf free_rxbuf;
 } __packed;
 
+enum nvsp_subchannel_operation {
+       NVSP_SUBCHANNEL_NONE = 0,
+       NVSP_SUBCHANNEL_ALLOCATE,
+       NVSP_SUBCHANNEL_MAX
+};
+
+struct nvsp_5_subchannel_request {
+       u32 op;
+       u32 num_subchannels;
+} __packed;
+
+struct nvsp_5_subchannel_complete {
+       u32 status;
+       u32 num_subchannels; /* Actual number of subchannels allocated */
+} __packed;
+
+struct nvsp_5_send_indirect_table {
+       /* The number of entries in the send indirection table */
+       u32 count;
+
+       /* The offset of the send indireciton table from top of this struct.
+        * The send indirection table tells which channel to put the send
+        * traffic on. Each entry is a channel number.
+        */
+       u32 offset;
+} __packed;
+
+union nvsp_5_message_uber {
+       struct nvsp_5_subchannel_request subchn_req;
+       struct nvsp_5_subchannel_complete subchn_comp;
+       struct nvsp_5_send_indirect_table send_table;
+} __packed;
+
 union nvsp_all_messages {
        union nvsp_message_init_uber init_msg;
        union nvsp_1_message_uber v1_msg;
        union nvsp_2_message_uber v2_msg;
+       union nvsp_5_message_uber v5_msg;
 } __packed;
 
 /* ALL Messages */
@@ -463,6 +513,7 @@ struct nvsp_message {
 #define NETVSC_MTU 65536
 
 #define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*16)  /* 16MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY      (1024*1024*15)  /* 15MB */
 
 #define NETVSC_RECEIVE_BUFFER_ID               0xcafe
 
@@ -506,6 +557,8 @@ struct netvsc_device {
 
        /* Holds rndis device info */
        void *extension;
+       /* The recive buffer for this device */
+       unsigned char cb_buffer[NETVSC_PACKET_SIZE];
 };
 
 /* NdisInitialize message */
@@ -671,9 +724,133 @@ struct ndis_pkt_8021q_info {
        };
 };
 
+struct ndis_oject_header {
+       u8 type;
+       u8 revision;
+       u16 size;
+};
+
+#define NDIS_OBJECT_TYPE_DEFAULT       0x80
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
+#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED  2
+#define NDIS_OFFLOAD_PARAMETERS_LSOV1_ENABLED  2
+#define NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED 1
+#define NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED 2
+#define NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED 3
+#define NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED 4
+
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE    1
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4       0
+#define NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6       1
+
+/*
+ * New offload OIDs for NDIS 6
+ */
+#define OID_TCP_OFFLOAD_CURRENT_CONFIG 0xFC01020B /* query only */
+#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C          /* set only */
+#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D/* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_CURRENT_CONFIG 0xFC01020E /* query only */
+#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
+#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
+
+struct ndis_offload_params {
+       struct ndis_oject_header header;
+       u8 ip_v4_csum;
+       u8 tcp_ip_v4_csum;
+       u8 udp_ip_v4_csum;
+       u8 tcp_ip_v6_csum;
+       u8 udp_ip_v6_csum;
+       u8 lso_v1;
+       u8 ip_sec_v1;
+       u8 lso_v2_ipv4;
+       u8 lso_v2_ipv6;
+       u8 tcp_connection_ip_v4;
+       u8 tcp_connection_ip_v6;
+       u32 flags;
+       u8 ip_sec_v2;
+       u8 ip_sec_v2_ip_v4;
+       struct {
+               u8 rsc_ip_v4;
+               u8 rsc_ip_v6;
+       };
+       struct {
+               u8 encapsulated_packet_task_offload;
+               u8 encapsulation_types;
+       };
+};
+
+struct ndis_tcp_ip_checksum_info {
+       union {
+               struct {
+                       u32 is_ipv4:1;
+                       u32 is_ipv6:1;
+                       u32 tcp_checksum:1;
+                       u32 udp_checksum:1;
+                       u32 ip_header_checksum:1;
+                       u32 reserved:11;
+                       u32 tcp_header_offset:10;
+               } transmit;
+               struct {
+                       u32 tcp_checksum_failed:1;
+                       u32 udp_checksum_failed:1;
+                       u32 ip_checksum_failed:1;
+                       u32 tcp_checksum_succeeded:1;
+                       u32 udp_checksum_succeeded:1;
+                       u32 ip_checksum_succeeded:1;
+                       u32 loopback:1;
+                       u32 tcp_checksum_value_invalid:1;
+                       u32 ip_checksum_value_invalid:1;
+               } receive;
+               u32  value;
+       };
+};
+
+struct ndis_tcp_lso_info {
+       union {
+               struct {
+                       u32 unused:30;
+                       u32 type:1;
+                       u32 reserved2:1;
+               } transmit;
+               struct {
+                       u32 mss:20;
+                       u32 tcp_header_offset:10;
+                       u32 type:1;
+                       u32 reserved2:1;
+               } lso_v1_transmit;
+               struct {
+                       u32 tcp_payload:30;
+                       u32 type:1;
+                       u32 reserved2:1;
+               } lso_v1_transmit_complete;
+               struct {
+                       u32 mss:20;
+                       u32 tcp_header_offset:10;
+                       u32 type:1;
+                       u32 ip_version:1;
+               } lso_v2_transmit;
+               struct {
+                       u32 reserved:30;
+                       u32 type:1;
+                       u32 reserved2:1;
+               } lso_v2_transmit_complete;
+               u32  value;
+       };
+};
+
 #define NDIS_VLAN_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
                sizeof(struct ndis_pkt_8021q_info))
 
+#define NDIS_CSUM_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+               sizeof(struct ndis_tcp_ip_checksum_info))
+
+#define NDIS_LSO_PPI_SIZE (sizeof(struct rndis_per_packet_info) + \
+               sizeof(struct ndis_tcp_lso_info))
+
 /* Format of Information buffer passed in a SetRequest for the OID */
 /* OID_GEN_RNDIS_CONFIG_PARAMETER. */
 struct rndis_config_parameter_info {
@@ -846,12 +1023,6 @@ struct rndis_message {
 };
 
 
-struct rndis_filter_packet {
-       void *completion_ctx;
-       void (*completion)(void *context);
-       struct rndis_message msg;
-};
-
 /* Handy macros */
 
 /* get the size of an RNDIS message. Pass in the message type, */
@@ -905,6 +1076,16 @@ struct rndis_filter_packet {
 #define NDIS_PACKET_TYPE_FUNCTIONAL    0x00000400
 #define NDIS_PACKET_TYPE_MAC_FRAME     0x00000800
 
+#define INFO_IPV4       2
+#define INFO_IPV6       4
+#define INFO_TCP        2
+#define INFO_UDP        4
+
+#define TRANSPORT_INFO_NOT_IP   0
+#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
+#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
+#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
+#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
 
 
 #endif /* _HYPERV_NET_H */
index 03a2c6e171584ff5f639d686458fbb75b7dcfb47..daddea2654ce4ec6ee3e3db66d45c9edb9ceb35f 100644 (file)
@@ -290,7 +290,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
            NVSP_STAT_SUCCESS)
                return -EINVAL;
 
-       if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+       if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
                return 0;
 
        /* NVSPv2 only: Send NDIS config */
@@ -314,6 +314,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
        struct nvsp_message *init_packet;
        int ndis_version;
        struct net_device *ndev;
+       u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
+               NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
+       int i, num_ver = 4; /* number of different NVSP versions */
 
        net_device = get_outbound_net_device(device);
        if (!net_device)
@@ -323,13 +326,14 @@ static int netvsc_connect_vsp(struct hv_device *device)
        init_packet = &net_device->channel_init_pkt;
 
        /* Negotiate the latest NVSP protocol supported */
-       if (negotiate_nvsp_ver(device, net_device, init_packet,
-                              NVSP_PROTOCOL_VERSION_2) == 0) {
-               net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
-       } else if (negotiate_nvsp_ver(device, net_device, init_packet,
-                                   NVSP_PROTOCOL_VERSION_1) == 0) {
-               net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
-       } else {
+       for (i = num_ver - 1; i >= 0; i--)
+               if (negotiate_nvsp_ver(device, net_device, init_packet,
+                                      ver_list[i])  == 0) {
+                       net_device->nvsp_version = ver_list[i];
+                       break;
+               }
+
+       if (i < 0) {
                ret = -EPROTO;
                goto cleanup;
        }
@@ -339,7 +343,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
        /* Send the ndis version */
        memset(init_packet, 0, sizeof(struct nvsp_message));
 
-       ndis_version = 0x00050001;
+       if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
+               ndis_version = 0x00050001;
+       else
+               ndis_version = 0x0006001e;
 
        init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
        init_packet->msg.v1_msg.
@@ -358,6 +365,11 @@ static int netvsc_connect_vsp(struct hv_device *device)
                goto cleanup;
 
        /* Post the big receive buffer to NetVSP */
+       if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
+               net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
+       else
+               net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
+
        ret = netvsc_init_recv_buf(device);
 
 cleanup:
@@ -432,17 +444,14 @@ static inline u32 hv_ringbuf_avail_percent(
        return avail_write * 100 / ring_info->ring_datasize;
 }
 
-static void netvsc_send_completion(struct hv_device *device,
+static void netvsc_send_completion(struct netvsc_device *net_device,
+                                  struct hv_device *device,
                                   struct vmpacket_descriptor *packet)
 {
-       struct netvsc_device *net_device;
        struct nvsp_message *nvsp_packet;
        struct hv_netvsc_packet *nvsc_packet;
        struct net_device *ndev;
 
-       net_device = get_inbound_net_device(device);
-       if (!net_device)
-               return;
        ndev = net_device->ndev;
 
        nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
@@ -561,13 +570,13 @@ int netvsc_send(struct hv_device *device,
 }
 
 static void netvsc_send_recv_completion(struct hv_device *device,
+                                       struct netvsc_device *net_device,
                                        u64 transaction_id, u32 status)
 {
        struct nvsp_message recvcompMessage;
        int retries = 0;
        int ret;
        struct net_device *ndev;
-       struct netvsc_device *net_device = hv_get_drvdata(device);
 
        ndev = net_device->ndev;
 
@@ -653,14 +662,15 @@ static void netvsc_receive_completion(void *context)
 
        /* Send a receive completion for the xfer page packet */
        if (fsend_receive_comp)
-               netvsc_send_recv_completion(device, transaction_id, status);
+               netvsc_send_recv_completion(device, net_device, transaction_id,
+                                       status);
 
 }
 
-static void netvsc_receive(struct hv_device *device,
-                           struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct netvsc_device *net_device,
+                       struct hv_device *device,
+                       struct vmpacket_descriptor *packet)
 {
-       struct netvsc_device *net_device;
        struct vmtransfer_page_packet_header *vmxferpage_packet;
        struct nvsp_message *nvsp_packet;
        struct hv_netvsc_packet *netvsc_packet = NULL;
@@ -673,9 +683,6 @@ static void netvsc_receive(struct hv_device *device,
 
        LIST_HEAD(listHead);
 
-       net_device = get_inbound_net_device(device);
-       if (!net_device)
-               return;
        ndev = net_device->ndev;
 
        /*
@@ -741,7 +748,7 @@ static void netvsc_receive(struct hv_device *device,
                spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
                                       flags);
 
-               netvsc_send_recv_completion(device,
+               netvsc_send_recv_completion(device, net_device,
                                            vmxferpage_packet->d.trans_id,
                                            NVSP_STAT_FAIL);
 
@@ -800,22 +807,16 @@ static void netvsc_channel_cb(void *context)
        struct netvsc_device *net_device;
        u32 bytes_recvd;
        u64 request_id;
-       unsigned char *packet;
        struct vmpacket_descriptor *desc;
        unsigned char *buffer;
        int bufferlen = NETVSC_PACKET_SIZE;
        struct net_device *ndev;
 
-       packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
-                        GFP_ATOMIC);
-       if (!packet)
-               return;
-       buffer = packet;
-
        net_device = get_inbound_net_device(device);
        if (!net_device)
-               goto out;
+               return;
        ndev = net_device->ndev;
+       buffer = net_device->cb_buffer;
 
        do {
                ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -825,11 +826,13 @@ static void netvsc_channel_cb(void *context)
                                desc = (struct vmpacket_descriptor *)buffer;
                                switch (desc->type) {
                                case VM_PKT_COMP:
-                                       netvsc_send_completion(device, desc);
+                                       netvsc_send_completion(net_device,
+                                                               device, desc);
                                        break;
 
                                case VM_PKT_DATA_USING_XFER_PAGES:
-                                       netvsc_receive(device, desc);
+                                       netvsc_receive(net_device,
+                                                       device, desc);
                                        break;
 
                                default:
@@ -841,23 +844,16 @@ static void netvsc_channel_cb(void *context)
                                        break;
                                }
 
-                               /* reset */
-                               if (bufferlen > NETVSC_PACKET_SIZE) {
-                                       kfree(buffer);
-                                       buffer = packet;
-                                       bufferlen = NETVSC_PACKET_SIZE;
-                               }
                        } else {
-                               /* reset */
-                               if (bufferlen > NETVSC_PACKET_SIZE) {
-                                       kfree(buffer);
-                                       buffer = packet;
-                                       bufferlen = NETVSC_PACKET_SIZE;
-                               }
-
+                               /*
+                                * We are done for this pass.
+                                */
                                break;
                        }
+
                } else if (ret == -ENOBUFS) {
+                       if (bufferlen > NETVSC_PACKET_SIZE)
+                               kfree(buffer);
                        /* Handle large packet */
                        buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
                        if (buffer == NULL) {
@@ -872,8 +868,8 @@ static void netvsc_channel_cb(void *context)
                }
        } while (1);
 
-out:
-       kfree(buffer);
+       if (bufferlen > NETVSC_PACKET_SIZE)
+               kfree(buffer);
        return;
 }
 
@@ -907,7 +903,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
        ndev = net_device->ndev;
 
        /* Initialize the NetVSC channel extension */
-       net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
        spin_lock_init(&net_device->recv_pkt_list_lock);
 
        INIT_LIST_HEAD(&net_device->recv_pkt_list);
index d6fce9750b9553b221759f18fd4ec48158af4e73..4e4cf9e0c8d7a6c6472f2dee357b8b31a814c2b8 100644 (file)
@@ -128,6 +128,27 @@ static int netvsc_close(struct net_device *net)
        return ret;
 }
 
+static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
+                               int pkt_type)
+{
+       struct rndis_packet *rndis_pkt;
+       struct rndis_per_packet_info *ppi;
+
+       rndis_pkt = &msg->msg.pkt;
+       rndis_pkt->data_offset += ppi_size;
+
+       ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
+               rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
+
+       ppi->size = ppi_size;
+       ppi->type = pkt_type;
+       ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
+
+       rndis_pkt->per_pkt_info_len += ppi_size;
+
+       return ppi;
+}
+
 static void netvsc_xmit_completion(void *context)
 {
        struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
@@ -140,22 +161,164 @@ static void netvsc_xmit_completion(void *context)
                dev_kfree_skb_any(skb);
 }
 
+static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
+                       struct hv_page_buffer *pb)
+{
+       int j = 0;
+
+       /* Deal with compund pages by ignoring unused part
+        * of the page.
+        */
+       page += (offset >> PAGE_SHIFT);
+       offset &= ~PAGE_MASK;
+
+       while (len > 0) {
+               unsigned long bytes;
+
+               bytes = PAGE_SIZE - offset;
+               if (bytes > len)
+                       bytes = len;
+               pb[j].pfn = page_to_pfn(page);
+               pb[j].offset = offset;
+               pb[j].len = bytes;
+
+               offset += bytes;
+               len -= bytes;
+
+               if (offset == PAGE_SIZE && len) {
+                       page++;
+                       offset = 0;
+                       j++;
+               }
+       }
+
+       return j + 1;
+}
+
+static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
+                          struct hv_page_buffer *pb)
+{
+       u32 slots_used = 0;
+       char *data = skb->data;
+       int frags = skb_shinfo(skb)->nr_frags;
+       int i;
+
+       /* The packet is laid out thus:
+        * 1. hdr
+        * 2. skb linear data
+        * 3. skb fragment data
+        */
+       if (hdr != NULL)
+               slots_used += fill_pg_buf(virt_to_page(hdr),
+                                       offset_in_page(hdr),
+                                       len, &pb[slots_used]);
+
+       slots_used += fill_pg_buf(virt_to_page(data),
+                               offset_in_page(data),
+                               skb_headlen(skb), &pb[slots_used]);
+
+       for (i = 0; i < frags; i++) {
+               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+
+               slots_used += fill_pg_buf(skb_frag_page(frag),
+                                       frag->page_offset,
+                                       skb_frag_size(frag), &pb[slots_used]);
+       }
+       return slots_used;
+}
+
+static int count_skb_frag_slots(struct sk_buff *skb)
+{
+       int i, frags = skb_shinfo(skb)->nr_frags;
+       int pages = 0;
+
+       for (i = 0; i < frags; i++) {
+               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+               unsigned long size = skb_frag_size(frag);
+               unsigned long offset = frag->page_offset;
+
+               /* Skip unused frames from start of page */
+               offset &= ~PAGE_MASK;
+               pages += PFN_UP(offset + size);
+       }
+       return pages;
+}
+
+static int netvsc_get_slots(struct sk_buff *skb)
+{
+       char *data = skb->data;
+       unsigned int offset = offset_in_page(data);
+       unsigned int len = skb_headlen(skb);
+       int slots;
+       int frag_slots;
+
+       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
+       frag_slots = count_skb_frag_slots(skb);
+       return slots + frag_slots;
+}
+
+static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
+{
+       u32 ret_val = TRANSPORT_INFO_NOT_IP;
+
+       if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
+               (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
+               goto not_ip;
+       }
+
+       *trans_off = skb_transport_offset(skb);
+
+       if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
+               struct iphdr *iphdr = ip_hdr(skb);
+
+               if (iphdr->protocol == IPPROTO_TCP)
+                       ret_val = TRANSPORT_INFO_IPV4_TCP;
+               else if (iphdr->protocol == IPPROTO_UDP)
+                       ret_val = TRANSPORT_INFO_IPV4_UDP;
+       } else {
+               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+                       ret_val = TRANSPORT_INFO_IPV6_TCP;
+               else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+                       ret_val = TRANSPORT_INFO_IPV6_UDP;
+       }
+
+not_ip:
+       return ret_val;
+}
+
 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_netvsc_packet *packet;
        int ret;
-       unsigned int i, num_pages, npg_data;
-
-       /* Add multipages for skb->data and additional 2 for RNDIS */
-       npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
-               >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
-       num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
+       unsigned int num_data_pgs;
+       struct rndis_message *rndis_msg;
+       struct rndis_packet *rndis_pkt;
+       u32 rndis_msg_size;
+       bool isvlan;
+       struct rndis_per_packet_info *ppi;
+       struct ndis_tcp_ip_checksum_info *csum_info;
+       struct ndis_tcp_lso_info *lso_info;
+       int  hdr_offset;
+       u32 net_trans_info;
+
+
+       /* We will atmost need two pages to describe the rndis
+        * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
+        * of pages in a single packet.
+        */
+       num_data_pgs = netvsc_get_slots(skb) + 2;
+       if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
+               netdev_err(net, "Packet too big: %u\n", skb->len);
+               dev_kfree_skb(skb);
+               net->stats.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
 
        /* Allocate a netvsc packet based on # of frags. */
        packet = kzalloc(sizeof(struct hv_netvsc_packet) +
-                        (num_pages * sizeof(struct hv_page_buffer)) +
-                        sizeof(struct rndis_filter_packet) +
+                        (num_data_pgs * sizeof(struct hv_page_buffer)) +
+                        sizeof(struct rndis_message) +
                         NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
        if (!packet) {
                /* out of memory, drop packet */
@@ -168,53 +331,111 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
 
        packet->vlan_tci = skb->vlan_tci;
 
-       packet->extension = (void *)(unsigned long)packet +
+       packet->is_data_pkt = true;
+       packet->total_data_buflen = skb->len;
+
+       packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
                                sizeof(struct hv_netvsc_packet) +
-                                   (num_pages * sizeof(struct hv_page_buffer));
+                               (num_data_pgs * sizeof(struct hv_page_buffer)));
+
+       /* Set the completion routine */
+       packet->completion.send.send_completion = netvsc_xmit_completion;
+       packet->completion.send.send_completion_ctx = packet;
+       packet->completion.send.send_completion_tid = (unsigned long)skb;
 
-       /* If the rndis msg goes beyond 1 page, we will add 1 later */
-       packet->page_buf_cnt = num_pages - 1;
+       isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
+
+       /* Add the rndis header */
+       rndis_msg = packet->rndis_msg;
+       rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
+       rndis_msg->msg_len = packet->total_data_buflen;
+       rndis_pkt = &rndis_msg->msg.pkt;
+       rndis_pkt->data_offset = sizeof(struct rndis_packet);
+       rndis_pkt->data_len = packet->total_data_buflen;
+       rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
+
+       rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
+
+       if (isvlan) {
+               struct ndis_pkt_8021q_info *vlan;
+
+               rndis_msg_size += NDIS_VLAN_PPI_SIZE;
+               ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
+                                       IEEE_8021Q_INFO);
+               vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
+                                               ppi->ppi_offset);
+               vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
+               vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
+                               VLAN_PRIO_SHIFT;
+       }
 
-       /* Initialize it from the skb */
-       packet->total_data_buflen = skb->len;
+       net_trans_info = get_net_transport_info(skb, &hdr_offset);
+       if (net_trans_info == TRANSPORT_INFO_NOT_IP)
+               goto do_send;
+
+       /*
+        * Setup the sendside checksum offload only if this is not a
+        * GSO packet.
+        */
+       if (skb_is_gso(skb))
+               goto do_lso;
+
+       rndis_msg_size += NDIS_CSUM_PPI_SIZE;
+       ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
+                           TCPIP_CHKSUM_PKTINFO);
+
+       csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
+                       ppi->ppi_offset);
 
-       /* Start filling in the page buffers starting after RNDIS buffer. */
-       packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
-       packet->page_buf[1].offset
-               = (unsigned long)skb->data & (PAGE_SIZE - 1);
-       if (npg_data == 1)
-               packet->page_buf[1].len = skb_headlen(skb);
+       if (net_trans_info & (INFO_IPV4 << 16))
+               csum_info->transmit.is_ipv4 = 1;
        else
-               packet->page_buf[1].len = PAGE_SIZE
-                       - packet->page_buf[1].offset;
-
-       for (i = 2; i <= npg_data; i++) {
-               packet->page_buf[i].pfn = virt_to_phys(skb->data
-                       + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
-               packet->page_buf[i].offset = 0;
-               packet->page_buf[i].len = PAGE_SIZE;
+               csum_info->transmit.is_ipv6 = 1;
+
+       if (net_trans_info & INFO_TCP) {
+               csum_info->transmit.tcp_checksum = 1;
+               csum_info->transmit.tcp_header_offset = hdr_offset;
+       } else if (net_trans_info & INFO_UDP) {
+               csum_info->transmit.udp_checksum = 1;
        }
-       if (npg_data > 1)
-               packet->page_buf[npg_data].len = (((unsigned long)skb->data
-                       + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
-
-       /* Additional fragments are after SKB data */
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
-
-               packet->page_buf[i+npg_data+1].pfn =
-                       page_to_pfn(skb_frag_page(f));
-               packet->page_buf[i+npg_data+1].offset = f->page_offset;
-               packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
+       goto do_send;
+
+do_lso:
+       rndis_msg_size += NDIS_LSO_PPI_SIZE;
+       ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
+                           TCP_LARGESEND_PKTINFO);
+
+       lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
+                       ppi->ppi_offset);
+
+       lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
+       if (net_trans_info & (INFO_IPV4 << 16)) {
+               lso_info->lso_v2_transmit.ip_version =
+                       NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
+               ip_hdr(skb)->tot_len = 0;
+               ip_hdr(skb)->check = 0;
+               tcp_hdr(skb)->check =
+               ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+                                  ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
+       } else {
+               lso_info->lso_v2_transmit.ip_version =
+                       NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
+               ipv6_hdr(skb)->payload_len = 0;
+               tcp_hdr(skb)->check =
+               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                               &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
        }
+       lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
+       lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
 
-       /* Set the completion routine */
-       packet->completion.send.send_completion = netvsc_xmit_completion;
-       packet->completion.send.send_completion_ctx = packet;
-       packet->completion.send.send_completion_tid = (unsigned long)skb;
+do_send:
+       /* Start filling in the page buffers with the rndis hdr */
+       rndis_msg->msg_len += rndis_msg_size;
+       packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
+                                       skb, &packet->page_buf[0]);
+
+       ret = netvsc_send(net_device_ctx->device_ctx, packet);
 
-       ret = rndis_filter_send(net_device_ctx->device_ctx,
-                                 packet);
        if (ret == 0) {
                net->stats.tx_bytes += skb->len;
                net->stats.tx_packets++;
@@ -264,7 +485,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
  * "wire" on the specified device.
  */
 int netvsc_recv_callback(struct hv_device *device_obj,
-                               struct hv_netvsc_packet *packet)
+                               struct hv_netvsc_packet *packet,
+                               struct ndis_tcp_ip_checksum_info *csum_info)
 {
        struct net_device *net;
        struct sk_buff *skb;
@@ -291,7 +513,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                packet->total_data_buflen);
 
        skb->protocol = eth_type_trans(skb, net);
-       skb->ip_summed = CHECKSUM_NONE;
+       if (csum_info) {
+               /* We only look at the IP checksum here.
+                * Should we be dropping the packet if checksum
+                * failed? How do we deal with other checksums - TCP/UDP?
+                */
+               if (csum_info->receive.ip_checksum_succeeded)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               else
+                       skb->ip_summed = CHECKSUM_NONE;
+       }
+
        if (packet->vlan_tci & VLAN_TAG_PRESENT)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       packet->vlan_tci);
@@ -327,7 +559,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        if (nvdev == NULL || nvdev->destroy)
                return -ENODEV;
 
-       if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+       if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
                limit = NETVSC_MTU;
 
        if (mtu < 68 || mtu > limit)
@@ -452,9 +684,10 @@ static int netvsc_probe(struct hv_device *dev,
 
        net->netdev_ops = &device_ops;
 
-       /* TODO: Add GSO and Checksum offload */
-       net->hw_features = 0;
-       net->features = NETIF_F_HW_VLAN_CTAG_TX;
+       net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
+                               NETIF_F_TSO;
+       net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
+                       NETIF_F_IP_CSUM | NETIF_F_TSO;
 
        SET_ETHTOOL_OPS(net, &ethtool_ops);
        SET_NETDEV_DEV(net, &dev->device);
index b54fd257652bb629b8228162712bb557e9023943..4a37e3db9e32f06c1c820c66cb08d79920bf02c0 100644 (file)
@@ -58,9 +58,6 @@ struct rndis_request {
        u8 request_ext[RNDIS_EXT_LEN];
 };
 
-static void rndis_filter_send_completion(void *ctx);
-
-
 static struct rndis_device *get_rndis_device(void)
 {
        struct rndis_device *device;
@@ -297,7 +294,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
                                "rndis response buffer overflow "
                                "detected (size %u max %zu)\n",
                                resp->msg_len,
-                               sizeof(struct rndis_filter_packet));
+                               sizeof(struct rndis_message));
 
                        if (resp->ndis_msg_type ==
                            RNDIS_MSG_RESET_C) {
@@ -373,6 +370,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
        struct rndis_packet *rndis_pkt;
        u32 data_offset;
        struct ndis_pkt_8021q_info *vlan;
+       struct ndis_tcp_ip_checksum_info *csum_info;
 
        rndis_pkt = &msg->msg.pkt;
 
@@ -411,7 +409,8 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
                pkt->vlan_tci = 0;
        }
 
-       netvsc_recv_callback(dev->net_dev->dev, pkt);
+       csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
+       netvsc_recv_callback(dev->net_dev->dev, pkt, csum_info);
 }
 
 int rndis_filter_receive(struct hv_device *dev,
@@ -630,6 +629,61 @@ cleanup:
        return ret;
 }
 
+int rndis_filter_set_offload_params(struct hv_device *hdev,
+                               struct ndis_offload_params *req_offloads)
+{
+       struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+       struct rndis_device *rdev = nvdev->extension;
+       struct net_device *ndev = nvdev->ndev;
+       struct rndis_request *request;
+       struct rndis_set_request *set;
+       struct ndis_offload_params *offload_params;
+       struct rndis_set_complete *set_complete;
+       u32 extlen = sizeof(struct ndis_offload_params);
+       int ret, t;
+
+       request = get_rndis_request(rdev, RNDIS_MSG_SET,
+               RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+       if (!request)
+               return -ENOMEM;
+
+       set = &request->request_msg.msg.set_req;
+       set->oid = OID_TCP_OFFLOAD_PARAMETERS;
+       set->info_buflen = extlen;
+       set->info_buf_offset = sizeof(struct rndis_set_request);
+       set->dev_vc_handle = 0;
+
+       offload_params = (struct ndis_offload_params *)((ulong)set +
+                               set->info_buf_offset);
+       *offload_params = *req_offloads;
+       offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
+       offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+       offload_params->header.size = extlen;
+
+       ret = rndis_filter_send_request(rdev, request);
+       if (ret != 0)
+               goto cleanup;
+
+       t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+       if (t == 0) {
+               netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n");
+               /* can't put_rndis_request, since we may still receive a
+                * send-completion.
+                */
+               return -EBUSY;
+       } else {
+               set_complete = &request->response_msg.msg.set_complete;
+               if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+                       netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
+                                  set_complete->status);
+                       ret = -EINVAL;
+               }
+       }
+
+cleanup:
+       put_rndis_request(rdev, request);
+       return ret;
+}
 
 static int rndis_filter_query_device_link_status(struct rndis_device *dev)
 {
@@ -829,6 +883,7 @@ int rndis_filter_device_add(struct hv_device *dev,
        struct netvsc_device *net_device;
        struct rndis_device *rndis_device;
        struct netvsc_device_info *device_info = additional_info;
+       struct ndis_offload_params offloads;
 
        rndis_device = get_rndis_device();
        if (!rndis_device)
@@ -868,6 +923,26 @@ int rndis_filter_device_add(struct hv_device *dev,
 
        memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
 
+       /* Turn on the offloads; the host supports all of the relevant
+        * offloads.
+        */
+       memset(&offloads, 0, sizeof(struct ndis_offload_params));
+       /* A value of zero means "no change"; now turn on what we
+        * want.
+        */
+       offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+       offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+       offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+       offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+       offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+       offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+
+
+       ret = rndis_filter_set_offload_params(dev, &offloads);
+       if (ret)
+               goto err_dev_remv;
+
+
        rndis_filter_query_device_link_status(rndis_device);
 
        device_info->link_state = rndis_device->link_state;
@@ -877,6 +952,10 @@ int rndis_filter_device_add(struct hv_device *dev,
                 device_info->link_state ? "down" : "up");
 
        return ret;
+
+err_dev_remv:
+       rndis_filter_device_remove(dev);
+       return ret;
 }
 
 void rndis_filter_device_remove(struct hv_device *dev)
@@ -913,101 +992,3 @@ int rndis_filter_close(struct hv_device *dev)
 
        return rndis_filter_close_device(nvdev->extension);
 }
-
-int rndis_filter_send(struct hv_device *dev,
-                            struct hv_netvsc_packet *pkt)
-{
-       int ret;
-       struct rndis_filter_packet *filter_pkt;
-       struct rndis_message *rndis_msg;
-       struct rndis_packet *rndis_pkt;
-       u32 rndis_msg_size;
-       bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
-
-       /* Add the rndis header */
-       filter_pkt = (struct rndis_filter_packet *)pkt->extension;
-
-       rndis_msg = &filter_pkt->msg;
-       rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
-       if (isvlan)
-               rndis_msg_size += NDIS_VLAN_PPI_SIZE;
-
-       rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
-       rndis_msg->msg_len = pkt->total_data_buflen +
-                                     rndis_msg_size;
-
-       rndis_pkt = &rndis_msg->msg.pkt;
-       rndis_pkt->data_offset = sizeof(struct rndis_packet);
-       if (isvlan)
-               rndis_pkt->data_offset += NDIS_VLAN_PPI_SIZE;
-       rndis_pkt->data_len = pkt->total_data_buflen;
-
-       if (isvlan) {
-               struct rndis_per_packet_info *ppi;
-               struct ndis_pkt_8021q_info *vlan;
-
-               rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
-               rndis_pkt->per_pkt_info_len = NDIS_VLAN_PPI_SIZE;
-
-               ppi = (struct rndis_per_packet_info *)((ulong)rndis_pkt +
-                       rndis_pkt->per_pkt_info_offset);
-               ppi->size = NDIS_VLAN_PPI_SIZE;
-               ppi->type = IEEE_8021Q_INFO;
-               ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
-
-               vlan = (struct ndis_pkt_8021q_info *)((ulong)ppi +
-                       ppi->ppi_offset);
-               vlan->vlanid = pkt->vlan_tci & VLAN_VID_MASK;
-               vlan->pri = (pkt->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-       }
-
-       pkt->is_data_pkt = true;
-       pkt->page_buf[0].pfn = virt_to_phys(rndis_msg) >> PAGE_SHIFT;
-       pkt->page_buf[0].offset =
-                       (unsigned long)rndis_msg & (PAGE_SIZE-1);
-       pkt->page_buf[0].len = rndis_msg_size;
-
-       /* Add one page_buf if the rndis msg goes beyond page boundary */
-       if (pkt->page_buf[0].offset + rndis_msg_size > PAGE_SIZE) {
-               int i;
-               for (i = pkt->page_buf_cnt; i > 1; i--)
-                       pkt->page_buf[i] = pkt->page_buf[i-1];
-               pkt->page_buf_cnt++;
-               pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
-               pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
-                       rndis_msg + pkt->page_buf[0].len)) >> PAGE_SHIFT;
-               pkt->page_buf[1].offset = 0;
-               pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
-       }
-
-       /* Save the packet send completion and context */
-       filter_pkt->completion = pkt->completion.send.send_completion;
-       filter_pkt->completion_ctx =
-                               pkt->completion.send.send_completion_ctx;
-
-       /* Use ours */
-       pkt->completion.send.send_completion = rndis_filter_send_completion;
-       pkt->completion.send.send_completion_ctx = filter_pkt;
-
-       ret = netvsc_send(dev, pkt);
-       if (ret != 0) {
-               /*
-                * Reset the completion to originals to allow retries from
-                * above
-                */
-               pkt->completion.send.send_completion =
-                               filter_pkt->completion;
-               pkt->completion.send.send_completion_ctx =
-                               filter_pkt->completion_ctx;
-       }
-
-       return ret;
-}
-
-static void rndis_filter_send_completion(void *ctx)
-{
-       struct rndis_filter_packet *filter_pkt = ctx;
-
-       /* Pass it back to the original handler */
-       filter_pkt->completion(filter_pkt->completion_ctx);
-}
index 08ae4655423a6b8f7deeee92d588fa38f0ec5cf4..3e89beab64fdc87559a2b1b9e27a7e6c6ba2b04c 100644 (file)
@@ -15,9 +15,9 @@ config IEEE802154_FAKEHARD
        depends on  IEEE802154_DRIVERS
        ---help---
          Say Y here to enable the fake driver that serves as an example
-          of HardMAC device driver.
+         of HardMAC device driver.
 
-          This driver can also be built as a module. To do so say M here.
+         This driver can also be built as a module. To do so say M here.
          The module will be called 'fakehard'.
 
 config IEEE802154_FAKELB
@@ -31,17 +31,23 @@ config IEEE802154_FAKELB
          The module will be called 'fakelb'.
 
 config IEEE802154_AT86RF230
-        depends on IEEE802154_DRIVERS && MAC802154
-        tristate "AT86RF230/231 transceiver driver"
-        depends on SPI
+       depends on IEEE802154_DRIVERS && MAC802154
+       tristate "AT86RF230/231/233/212 transceiver driver"
+       depends on SPI
+       ---help---
+         Say Y here to enable the at86rf230/231/233/212 SPI 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so, say M here.
+         the module will be called 'at86rf230'.
 
 config IEEE802154_MRF24J40
-       tristate "Microchip MRF24J40 transceiver driver"
-       depends on IEEE802154_DRIVERS && MAC802154
-       depends on SPI
-       ---help---
-         Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
-         controller.
-
-         This driver can also be built as a module. To do so, say M here.
-         the module will be called 'mrf24j40'.
+       tristate "Microchip MRF24J40 transceiver driver"
+       depends on IEEE802154_DRIVERS && MAC802154
+       depends on SPI
+       ---help---
+         Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so, say M here.
+         the module will be called 'mrf24j40'.
index fbb83d175da31b4491e2d375ae7d6f0e846005ea..89417ac41083e59c2c44512ef804a7861ef1d1cc 100644 (file)
 #include <linux/spi/spi.h>
 #include <linux/spi/at86rf230.h>
 #include <linux/skbuff.h>
+#include <linux/of_gpio.h>
 
 #include <net/mac802154.h>
 #include <net/wpan-phy.h>
 
 struct at86rf230_local {
        struct spi_device *spi;
-       int rstn, slp_tr, dig2;
 
        u8 part;
        u8 vers;
@@ -53,8 +53,16 @@ struct at86rf230_local {
        spinlock_t lock;
        bool irq_busy;
        bool is_tx;
+       bool tx_aret;
+
+       int rssi_base_val;
 };
 
+static bool is_rf212(struct at86rf230_local *local)
+{
+       return local->part == 7;
+}
+
 #define        RG_TRX_STATUS   (0x01)
 #define        SR_TRX_STATUS           0x01, 0x1f, 0
 #define        SR_RESERVED_01_3        0x01, 0x20, 5
@@ -100,7 +108,10 @@ struct at86rf230_local {
 #define        SR_SFD_VALUE            0x0b, 0xff, 0
 #define        RG_TRX_CTRL_2   (0x0c)
 #define        SR_OQPSK_DATA_RATE      0x0c, 0x03, 0
-#define        SR_RESERVED_0c_2        0x0c, 0x7c, 2
+#define        SR_SUB_MODE             0x0c, 0x04, 2
+#define        SR_BPSK_QPSK            0x0c, 0x08, 3
+#define        SR_OQPSK_SUB1_RC_EN     0x0c, 0x10, 4
+#define        SR_RESERVED_0c_5        0x0c, 0x60, 5
 #define        SR_RX_SAFE_MODE         0x0c, 0x80, 7
 #define        RG_ANT_DIV      (0x0d)
 #define        SR_ANT_CTRL             0x0d, 0x03, 0
@@ -145,7 +156,7 @@ struct at86rf230_local {
 #define        SR_RESERVED_17_5        0x17, 0x08, 3
 #define        SR_AACK_UPLD_RES_FT     0x17, 0x10, 4
 #define        SR_AACK_FLTR_RES_FT     0x17, 0x20, 5
-#define        SR_RESERVED_17_2        0x17, 0x40, 6
+#define        SR_CSMA_LBT_MODE        0x17, 0x40, 6
 #define        SR_RESERVED_17_1        0x17, 0x80, 7
 #define        RG_FTN_CTRL     (0x18)
 #define        SR_RESERVED_18_2        0x18, 0x7f, 0
@@ -234,6 +245,7 @@ struct at86rf230_local {
 #define STATE_TX_ON            0x09
 /* 0x0a - 0x0e */                      /* 0x0a - UNSUPPORTED_ATTRIBUTE */
 #define STATE_SLEEP            0x0F
+#define STATE_PREP_DEEP_SLEEP  0x10
 #define STATE_BUSY_RX_AACK     0x11
 #define STATE_BUSY_TX_ARET     0x12
 #define STATE_RX_AACK_ON       0x16
@@ -243,6 +255,57 @@ struct at86rf230_local {
 #define STATE_BUSY_RX_AACK_NOCLK 0x1E
 #define STATE_TRANSITION_IN_PROGRESS 0x1F
 
+static int
+__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
+               u8 *version)
+{
+       u8 data[4];
+       u8 *buf = kmalloc(2, GFP_KERNEL);
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len    = 2,
+               .tx_buf = buf,
+               .rx_buf = buf,
+       };
+       u8 reg;
+
+       if (!buf)
+               return -ENOMEM;
+
+       for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
+               buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
+               buf[1] = 0xff;
+               dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+               spi_message_init(&msg);
+               spi_message_add_tail(&xfer, &msg);
+
+               status = spi_sync(spi, &msg);
+               dev_vdbg(&spi->dev, "status = %d\n", status);
+               if (msg.status)
+                       status = msg.status;
+
+               dev_vdbg(&spi->dev, "status = %d\n", status);
+               dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+               dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
+
+               if (status == 0)
+                       data[reg - RG_PART_NUM] = buf[1];
+               else
+                       break;
+       }
+
+       if (status == 0) {
+               *part = data[0];
+               *version = data[1];
+               *man_id = (data[3] << 8) | data[2];
+       }
+
+       kfree(buf);
+
+       return status;
+}
+
 static int
 __at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
 {
@@ -489,7 +552,9 @@ at86rf230_state(struct ieee802154_dev *dev, int state)
        } while (val == STATE_TRANSITION_IN_PROGRESS);
 
 
-       if (val == desired_status)
+       if (val == desired_status ||
+           (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
+           (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
                return 0;
 
        pr_err("unexpected state change: %d, asked for %d\n", val, state);
@@ -510,7 +575,11 @@ at86rf230_start(struct ieee802154_dev *dev)
        if (rc)
                return rc;
 
-       return at86rf230_state(dev, STATE_RX_ON);
+       rc = at86rf230_state(dev, STATE_TX_ON);
+       if (rc)
+               return rc;
+
+       return at86rf230_state(dev, STATE_RX_AACK_ON);
 }
 
 static void
@@ -519,6 +588,39 @@ at86rf230_stop(struct ieee802154_dev *dev)
        at86rf230_state(dev, STATE_FORCE_TRX_OFF);
 }
 
+static int
+at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+       lp->rssi_base_val = -91;
+
+       return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
+at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+       int rc;
+
+       if (channel == 0)
+               rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 0);
+       else
+               rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 1);
+       if (rc < 0)
+               return rc;
+
+       if (page == 0) {
+               rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
+               lp->rssi_base_val = -100;
+       } else {
+               rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
+               lp->rssi_base_val = -98;
+       }
+       if (rc < 0)
+               return rc;
+
+       return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
 static int
 at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
 {
@@ -527,14 +629,22 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
 
        might_sleep();
 
-       if (page != 0 || channel < 11 || channel > 26) {
+       if (page < 0 || page > 31 ||
+           !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
                WARN_ON(1);
                return -EINVAL;
        }
 
-       rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+       if (is_rf212(lp))
+               rc = at86rf212_set_channel(lp, page, channel);
+       else
+               rc = at86rf230_set_channel(lp, page, channel);
+       if (rc < 0)
+               return rc;
+
        msleep(1); /* Wait for PLL */
        dev->phy->current_channel = channel;
+       dev->phy->current_page = page;
 
        return 0;
 }
@@ -568,6 +678,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
        if (rc)
                goto err_rx;
 
+       if (lp->tx_aret) {
+               rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
+               if (rc)
+                       goto err_rx;
+       }
+
        rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
        if (rc)
                goto err_rx;
@@ -630,30 +746,31 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
        struct at86rf230_local *lp = dev->priv;
 
        if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+               u16 addr = le16_to_cpu(filt->short_addr);
+
                dev_vdbg(&lp->spi->dev,
                        "at86rf230_set_hw_addr_filt called for saddr\n");
-               __at86rf230_write(lp, RG_SHORT_ADDR_0, filt->short_addr);
-               __at86rf230_write(lp, RG_SHORT_ADDR_1, filt->short_addr >> 8);
+               __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
+               __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
        }
 
        if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+               u16 pan = le16_to_cpu(filt->pan_id);
+
                dev_vdbg(&lp->spi->dev,
                        "at86rf230_set_hw_addr_filt called for pan id\n");
-               __at86rf230_write(lp, RG_PAN_ID_0, filt->pan_id);
-               __at86rf230_write(lp, RG_PAN_ID_1, filt->pan_id >> 8);
+               __at86rf230_write(lp, RG_PAN_ID_0, pan);
+               __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
        }
 
        if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+               u8 i, addr[8];
+
+               memcpy(addr, &filt->ieee_addr, 8);
                dev_vdbg(&lp->spi->dev,
                        "at86rf230_set_hw_addr_filt called for IEEE addr\n");
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_0, filt->ieee_addr[7]);
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_1, filt->ieee_addr[6]);
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_2, filt->ieee_addr[5]);
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_3, filt->ieee_addr[4]);
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_4, filt->ieee_addr[3]);
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_5, filt->ieee_addr[2]);
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_6, filt->ieee_addr[1]);
-               at86rf230_write_subreg(lp, SR_IEEE_ADDR_7, filt->ieee_addr[0]);
+               for (i = 0; i < 8; i++)
+                       __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
        }
 
        if (changed & IEEE802515_AFILT_PANC_CHANGED) {
@@ -668,6 +785,93 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
        return 0;
 }
 
+static int
+at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
+{
+       struct at86rf230_local *lp = dev->priv;
+
+       /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
+        * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
+        * 0dB.
+        * thus, supported values for db range from -26 to 5, for 31dB of
+        * reduction to 0dB of reduction.
+        */
+       if (db > 5 || db < -26)
+               return -EINVAL;
+
+       db = -(db - 5);
+
+       return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+}
+
+static int
+at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
+{
+       struct at86rf230_local *lp = dev->priv;
+
+       return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
+}
+
+static int
+at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+{
+       struct at86rf230_local *lp = dev->priv;
+
+       return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+}
+
+static int
+at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+{
+       struct at86rf230_local *lp = dev->priv;
+       int desens_steps;
+
+       if (level < lp->rssi_base_val || level > 30)
+               return -EINVAL;
+
+       desens_steps = (level - lp->rssi_base_val) * 100 / 207;
+
+       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
+}
+
+static int
+at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+                         u8 retries)
+{
+       struct at86rf230_local *lp = dev->priv;
+       int rc;
+
+       if (min_be > max_be || max_be > 8 || retries > 5)
+               return -EINVAL;
+
+       rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
+       if (rc)
+               return rc;
+
+       rc = at86rf230_write_subreg(lp, SR_MAX_BE, max_be);
+       if (rc)
+               return rc;
+
+       return at86rf230_write_subreg(lp, SR_MAX_CSMA_RETRIES, max_be);
+}
+
+static int
+at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+{
+       struct at86rf230_local *lp = dev->priv;
+       int rc = 0;
+
+       if (retries < -1 || retries > 15)
+               return -EINVAL;
+
+       lp->tx_aret = retries >= 0;
+
+       if (retries >= 0)
+               rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
+
+       return rc;
+}
+
 static struct ieee802154_ops at86rf230_ops = {
        .owner = THIS_MODULE,
        .xmit = at86rf230_xmit,
@@ -678,6 +882,22 @@ static struct ieee802154_ops at86rf230_ops = {
        .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
 };
 
+static struct ieee802154_ops at86rf212_ops = {
+       .owner = THIS_MODULE,
+       .xmit = at86rf230_xmit,
+       .ed = at86rf230_ed,
+       .set_channel = at86rf230_channel,
+       .start = at86rf230_start,
+       .stop = at86rf230_stop,
+       .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
+       .set_txpower = at86rf212_set_txpower,
+       .set_lbt = at86rf212_set_lbt,
+       .set_cca_mode = at86rf212_set_cca_mode,
+       .set_cca_ed_level = at86rf212_set_cca_ed_level,
+       .set_csma_params = at86rf212_set_csma_params,
+       .set_frame_retries = at86rf212_set_frame_retries,
+};
+
 static void at86rf230_irqwork(struct work_struct *work)
 {
        struct at86rf230_local *lp =
@@ -695,8 +915,8 @@ static void at86rf230_irqwork(struct work_struct *work)
        status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
 
        if (status & IRQ_TRX_END) {
-               spin_lock_irqsave(&lp->lock, flags);
                status &= ~IRQ_TRX_END;
+               spin_lock_irqsave(&lp->lock, flags);
                if (lp->is_tx) {
                        lp->is_tx = 0;
                        spin_unlock_irqrestore(&lp->lock, flags);
@@ -753,22 +973,15 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
        int rc, irq_pol;
        u8 status;
+       u8 csma_seed[2];
 
        rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
        if (rc)
                return rc;
 
-       dev_info(&lp->spi->dev, "Status: %02x\n", status);
-       if (status == STATE_P_ON) {
-               rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
-               if (rc)
-                       return rc;
-               msleep(1);
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
-               if (rc)
-                       return rc;
-               dev_info(&lp->spi->dev, "Status: %02x\n", status);
-       }
+       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
+       if (rc)
+               return rc;
 
        /* configure irq polarity, defaults to high active */
        if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
@@ -784,6 +997,14 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        if (rc)
                return rc;
 
+       get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
+       rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
+       if (rc)
+               return rc;
+       rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]);
+       if (rc)
+               return rc;
+
        /* CLKM changes are applied immediately */
        rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
        if (rc)
@@ -796,16 +1017,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        /* Wait the next SLEEP cycle */
        msleep(100);
 
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
-       if (rc)
-               return rc;
-       msleep(1);
-
-       rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
-       if (rc)
-               return rc;
-       dev_info(&lp->spi->dev, "Status: %02x\n", status);
-
        rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
        if (rc)
                return rc;
@@ -825,14 +1036,38 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        return 0;
 }
 
-static void at86rf230_fill_data(struct spi_device *spi)
+static struct at86rf230_platform_data *
+at86rf230_get_pdata(struct spi_device *spi)
 {
-       struct at86rf230_local *lp = spi_get_drvdata(spi);
-       struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+       struct at86rf230_platform_data *pdata;
+       const char *irq_type;
+
+       if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node)
+               return spi->dev.platform_data;
+
+       pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               goto done;
+
+       pdata->rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
+       pdata->slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
+
+       pdata->irq_type = IRQF_TRIGGER_RISING;
+       of_property_read_string(spi->dev.of_node, "irq-type", &irq_type);
+       if (!strcmp(irq_type, "level-high"))
+               pdata->irq_type = IRQF_TRIGGER_HIGH;
+       else if (!strcmp(irq_type, "level-low"))
+               pdata->irq_type = IRQF_TRIGGER_LOW;
+       else if (!strcmp(irq_type, "edge-rising"))
+               pdata->irq_type = IRQF_TRIGGER_RISING;
+       else if (!strcmp(irq_type, "edge-falling"))
+               pdata->irq_type = IRQF_TRIGGER_FALLING;
+       else
+               dev_warn(&spi->dev, "wrong irq-type specified using edge-rising\n");
 
-       lp->rstn = pdata->rstn;
-       lp->slp_tr = pdata->slp_tr;
-       lp->dig2 = pdata->dig2;
+       spi->dev.platform_data = pdata;
+done:
+       return pdata;
 }
 
 static int at86rf230_probe(struct spi_device *spi)
@@ -840,133 +1075,146 @@ static int at86rf230_probe(struct spi_device *spi)
        struct at86rf230_platform_data *pdata;
        struct ieee802154_dev *dev;
        struct at86rf230_local *lp;
-       u8 man_id_0, man_id_1, status;
+       u16 man_id = 0;
+       u8 part = 0, version = 0, status;
        irq_handler_t irq_handler;
        work_func_t irq_worker;
-       int rc, supported = 0;
+       int rc;
        const char *chip;
+       struct ieee802154_ops *ops = NULL;
 
        if (!spi->irq) {
                dev_err(&spi->dev, "no IRQ specified\n");
                return -EINVAL;
        }
 
-       pdata = spi->dev.platform_data;
+       pdata = at86rf230_get_pdata(spi);
        if (!pdata) {
                dev_err(&spi->dev, "no platform_data\n");
                return -EINVAL;
        }
 
-       dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
-       if (!dev)
-               return -ENOMEM;
-
-       lp = dev->priv;
-       lp->dev = dev;
-
-       lp->spi = spi;
-
-       dev->parent = &spi->dev;
-       dev->extra_tx_headroom = 0;
-       /* We do support only 2.4 Ghz */
-       dev->phy->channels_supported[0] = 0x7FFF800;
-       dev->flags = IEEE802154_HW_OMIT_CKSUM;
-
-       if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
-               irq_worker = at86rf230_irqwork;
-               irq_handler = at86rf230_isr;
-       } else {
-               irq_worker = at86rf230_irqwork_level;
-               irq_handler = at86rf230_isr_level;
+       if (gpio_is_valid(pdata->rstn)) {
+               rc = gpio_request(pdata->rstn, "rstn");
+               if (rc)
+                       return rc;
        }
 
-       mutex_init(&lp->bmux);
-       INIT_WORK(&lp->irqwork, irq_worker);
-       spin_lock_init(&lp->lock);
-       init_completion(&lp->tx_complete);
-
-       spi_set_drvdata(spi, lp);
-
-       at86rf230_fill_data(spi);
-
-       rc = gpio_request(lp->rstn, "rstn");
-       if (rc)
-               goto err_rstn;
-
-       if (gpio_is_valid(lp->slp_tr)) {
-               rc = gpio_request(lp->slp_tr, "slp_tr");
+       if (gpio_is_valid(pdata->slp_tr)) {
+               rc = gpio_request(pdata->slp_tr, "slp_tr");
                if (rc)
                        goto err_slp_tr;
        }
 
-       rc = gpio_direction_output(lp->rstn, 1);
-       if (rc)
-               goto err_gpio_dir;
+       if (gpio_is_valid(pdata->rstn)) {
+               rc = gpio_direction_output(pdata->rstn, 1);
+               if (rc)
+                       goto err_gpio_dir;
+       }
 
-       if (gpio_is_valid(lp->slp_tr)) {
-               rc = gpio_direction_output(lp->slp_tr, 0);
+       if (gpio_is_valid(pdata->slp_tr)) {
+               rc = gpio_direction_output(pdata->slp_tr, 0);
                if (rc)
                        goto err_gpio_dir;
        }
 
        /* Reset */
-       msleep(1);
-       gpio_set_value(lp->rstn, 0);
-       msleep(1);
-       gpio_set_value(lp->rstn, 1);
-       msleep(1);
+       if (gpio_is_valid(pdata->rstn)) {
+               udelay(1);
+               gpio_set_value(pdata->rstn, 0);
+               udelay(1);
+               gpio_set_value(pdata->rstn, 1);
+               usleep_range(120, 240);
+       }
 
-       rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
-       if (rc)
-               goto err_gpio_dir;
-       rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
-       if (rc)
+       rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
+       if (rc < 0)
                goto err_gpio_dir;
 
-       if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+       if (man_id != 0x001f) {
                dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
-                       man_id_1, man_id_0);
+                       man_id >> 8, man_id & 0xFF);
                rc = -EINVAL;
                goto err_gpio_dir;
        }
 
-       rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
-       if (rc)
-               goto err_gpio_dir;
-
-       rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
-       if (rc)
-               goto err_gpio_dir;
-
-       switch (lp->part) {
+       switch (part) {
        case 2:
                chip = "at86rf230";
-               /* supported = 1;  FIXME: should be easy to support; */
+               /* FIXME: should be easy to support; */
                break;
        case 3:
                chip = "at86rf231";
-               supported = 1;
+               ops = &at86rf230_ops;
+               break;
+       case 7:
+               chip = "at86rf212";
+               if (version == 1)
+                       ops = &at86rf212_ops;
+               break;
+       case 11:
+               chip = "at86rf233";
+               ops = &at86rf230_ops;
                break;
        default:
                chip = "UNKNOWN";
                break;
        }
 
-       dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
-       if (!supported) {
+       dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
+       if (!ops) {
                rc = -ENOTSUPP;
                goto err_gpio_dir;
        }
 
+       dev = ieee802154_alloc_device(sizeof(*lp), ops);
+       if (!dev) {
+               rc = -ENOMEM;
+               goto err_gpio_dir;
+       }
+
+       lp = dev->priv;
+       lp->dev = dev;
+       lp->part = part;
+       lp->vers = version;
+
+       lp->spi = spi;
+
+       dev->parent = &spi->dev;
+       dev->extra_tx_headroom = 0;
+       dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+
+       if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+               irq_worker = at86rf230_irqwork;
+               irq_handler = at86rf230_isr;
+       } else {
+               irq_worker = at86rf230_irqwork_level;
+               irq_handler = at86rf230_isr_level;
+       }
+
+       mutex_init(&lp->bmux);
+       INIT_WORK(&lp->irqwork, irq_worker);
+       spin_lock_init(&lp->lock);
+       init_completion(&lp->tx_complete);
+
+       spi_set_drvdata(spi, lp);
+
+       if (is_rf212(lp)) {
+               dev->phy->channels_supported[0] = 0x00007FF;
+               dev->phy->channels_supported[2] = 0x00007FF;
+       } else {
+               dev->phy->channels_supported[0] = 0x7FFF800;
+       }
+
        rc = at86rf230_hw_init(lp);
        if (rc)
-               goto err_gpio_dir;
+               goto err_hw_init;
 
        rc = request_irq(spi->irq, irq_handler,
                         IRQF_SHARED | pdata->irq_type,
                         dev_name(&spi->dev), lp);
        if (rc)
-               goto err_gpio_dir;
+               goto err_hw_init;
 
        /* Read irq status register to reset irq line */
        rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
@@ -981,21 +1229,25 @@ static int at86rf230_probe(struct spi_device *spi)
 
 err_irq:
        free_irq(spi->irq, lp);
+err_hw_init:
        flush_work(&lp->irqwork);
-err_gpio_dir:
-       if (gpio_is_valid(lp->slp_tr))
-               gpio_free(lp->slp_tr);
-err_slp_tr:
-       gpio_free(lp->rstn);
-err_rstn:
+       spi_set_drvdata(spi, NULL);
        mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
+
+err_gpio_dir:
+       if (gpio_is_valid(pdata->slp_tr))
+               gpio_free(pdata->slp_tr);
+err_slp_tr:
+       if (gpio_is_valid(pdata->rstn))
+               gpio_free(pdata->rstn);
        return rc;
 }
 
 static int at86rf230_remove(struct spi_device *spi)
 {
        struct at86rf230_local *lp = spi_get_drvdata(spi);
+       struct at86rf230_platform_data *pdata = spi->dev.platform_data;
 
        /* mask all at86rf230 irq's */
        at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
@@ -1004,9 +1256,10 @@ static int at86rf230_remove(struct spi_device *spi)
        free_irq(spi->irq, lp);
        flush_work(&lp->irqwork);
 
-       if (gpio_is_valid(lp->slp_tr))
-               gpio_free(lp->slp_tr);
-       gpio_free(lp->rstn);
+       if (gpio_is_valid(pdata->slp_tr))
+               gpio_free(pdata->slp_tr);
+       if (gpio_is_valid(pdata->rstn))
+               gpio_free(pdata->rstn);
 
        mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
@@ -1015,8 +1268,19 @@ static int at86rf230_remove(struct spi_device *spi)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_OF)
+static struct of_device_id at86rf230_of_match[] = {
+       { .compatible = "atmel,at86rf230", },
+       { .compatible = "atmel,at86rf231", },
+       { .compatible = "atmel,at86rf233", },
+       { .compatible = "atmel,at86rf212", },
+       { },
+};
+#endif
+
 static struct spi_driver at86rf230_driver = {
        .driver = {
+               .of_match_table = of_match_ptr(at86rf230_of_match),
                .name   = "at86rf230",
                .owner  = THIS_MODULE,
        },
index bf0d55e2dd635613c3712b5b73ab528217e5cff7..78f18be3bbf2a76b7c245abef4aabd34faf3c72f 100644 (file)
@@ -63,11 +63,11 @@ static struct wpan_phy *fake_get_phy(const struct net_device *dev)
  *
  * Return the ID of the PAN from the PIB.
  */
-static u16 fake_get_pan_id(const struct net_device *dev)
+static __le16 fake_get_pan_id(const struct net_device *dev)
 {
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
-       return 0xeba1;
+       return cpu_to_le16(0xeba1);
 }
 
 /**
@@ -78,11 +78,11 @@ static u16 fake_get_pan_id(const struct net_device *dev)
  * device. If the device has not yet had a short address assigned
  * then this should return 0xFFFF to indicate a lack of association.
  */
-static u16 fake_get_short_addr(const struct net_device *dev)
+static __le16 fake_get_short_addr(const struct net_device *dev)
 {
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
-       return 0x1;
+       return cpu_to_le16(0x1);
 }
 
 /**
@@ -149,7 +149,7 @@ static int fake_assoc_req(struct net_device *dev,
  *       802.15.4-2006 document.
  */
 static int fake_assoc_resp(struct net_device *dev,
-               struct ieee802154_addr *addr, u16 short_addr, u8 status)
+               struct ieee802154_addr *addr, __le16 short_addr, u8 status)
 {
        return 0;
 }
@@ -191,10 +191,10 @@ static int fake_disassoc_req(struct net_device *dev,
  * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006
  * document, with 7.3.8 describing coordinator realignment.
  */
-static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
-                               u8 channel, u8 page,
-                               u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
-                               u8 coord_realign)
+static int fake_start_req(struct net_device *dev,
+                         struct ieee802154_addr *addr, u8 channel, u8 page,
+                         u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
+                         u8 coord_realign)
 {
        struct wpan_phy *phy = fake_to_phy(dev);
 
@@ -281,8 +281,8 @@ static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr,
        switch (cmd) {
        case SIOCGIFADDR:
                /* FIXME: fixed here, get from device IRL */
-               pan_id = fake_get_pan_id(dev);
-               short_addr = fake_get_short_addr(dev);
+               pan_id = le16_to_cpu(fake_get_pan_id(dev));
+               short_addr = le16_to_cpu(fake_get_short_addr(dev));
                if (pan_id == IEEE802154_PANID_BROADCAST ||
                    short_addr == IEEE802154_ADDR_BROADCAST)
                        return -EADDRNOTAVAIL;
index 246befa4ba0571383f13cbdef2a701156b0326d0..78a6552ed7072d04672b98d7731c0ffd4af6117b 100644 (file)
@@ -465,8 +465,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
        if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
                /* Short Addr */
                u8 addrh, addrl;
-               addrh = filt->short_addr >> 8 & 0xff;
-               addrl = filt->short_addr & 0xff;
+               addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff;
+               addrl = le16_to_cpu(filt->short_addr) & 0xff;
 
                write_short_reg(devrec, REG_SADRH, addrh);
                write_short_reg(devrec, REG_SADRL, addrl);
@@ -476,15 +476,16 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
 
        if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
                /* Device Address */
-               int i;
+               u8 i, addr[8];
+
+               memcpy(addr, &filt->ieee_addr, 8);
                for (i = 0; i < 8; i++)
-                       write_short_reg(devrec, REG_EADR0+i,
-                                       filt->ieee_addr[7-i]);
+                       write_short_reg(devrec, REG_EADR0 + i, addr[i]);
 
 #ifdef DEBUG
                printk(KERN_DEBUG "Set long addr to: ");
                for (i = 0; i < 8; i++)
-                       printk("%02hhx ", filt->ieee_addr[i]);
+                       printk("%02hhx ", addr[7 - i]);
                printk(KERN_DEBUG "\n");
 #endif
        }
@@ -492,8 +493,8 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
        if (changed & IEEE802515_AFILT_PANID_CHANGED) {
                /* PAN ID */
                u8 panidl, panidh;
-               panidh = filt->pan_id >> 8 & 0xff;
-               panidl = filt->pan_id & 0xff;
+               panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff;
+               panidl = le16_to_cpu(filt->pan_id) & 0xff;
                write_short_reg(devrec, REG_PANIDH, panidh);
                write_short_reg(devrec, REG_PANIDL, panidl);
 
index d7b2e947184b549a5034e4a54423ecde9050d721..46a7790be004a7653d391ce96d51ae48975baa8c 100644 (file)
@@ -136,18 +136,18 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
        unsigned int start;
 
        do {
-               start = u64_stats_fetch_begin_bh(&dp->rsync);
+               start = u64_stats_fetch_begin_irq(&dp->rsync);
                stats->rx_packets = dp->rx_packets;
                stats->rx_bytes = dp->rx_bytes;
-       } while (u64_stats_fetch_retry_bh(&dp->rsync, start));
+       } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
 
        do {
-               start = u64_stats_fetch_begin_bh(&dp->tsync);
+               start = u64_stats_fetch_begin_irq(&dp->tsync);
 
                stats->tx_packets = dp->tx_packets;
                stats->tx_bytes = dp->tx_bytes;
 
-       } while (u64_stats_fetch_retry_bh(&dp->tsync, start));
+       } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
 
        stats->rx_dropped = dev->stats.rx_dropped;
        stats->tx_dropped = dev->stats.tx_dropped;
index c5011e078e1b48f4181464009afc0fe7a1ebbd10..bb96409f8c056b85b77255f1c397edb4b28b0511 100644 (file)
@@ -111,10 +111,10 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
 
                lb_stats = per_cpu_ptr(dev->lstats, i);
                do {
-                       start = u64_stats_fetch_begin_bh(&lb_stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&lb_stats->syncp);
                        tbytes = lb_stats->bytes;
                        tpackets = lb_stats->packets;
-               } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&lb_stats->syncp, start));
                bytes   += tbytes;
                packets += tpackets;
        }
@@ -136,16 +136,9 @@ static const struct ethtool_ops loopback_ethtool_ops = {
 
 static int loopback_dev_init(struct net_device *dev)
 {
-       int i;
-       dev->lstats = alloc_percpu(struct pcpu_lstats);
+       dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
        if (!dev->lstats)
                return -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               struct pcpu_lstats *lb_stats;
-               lb_stats = per_cpu_ptr(dev->lstats, i);
-               u64_stats_init(&lb_stats->syncp);
-       }
        return 0;
 }
 
@@ -160,6 +153,7 @@ static const struct net_device_ops loopback_ops = {
        .ndo_init      = loopback_dev_init,
        .ndo_start_xmit= loopback_xmit,
        .ndo_get_stats64 = loopback_get_stats64,
+       .ndo_set_mac_address = eth_mac_addr,
 };
 
 /*
@@ -174,6 +168,7 @@ static void loopback_setup(struct net_device *dev)
        dev->tx_queue_len       = 0;
        dev->type               = ARPHRD_LOOPBACK;      /* 0x0001*/
        dev->flags              = IFF_LOOPBACK;
+       dev->priv_flags         |= IFF_LIVE_ADDR_CHANGE;
        dev->priv_flags        &= ~IFF_XMIT_DST_RELEASE;
        dev->hw_features        = NETIF_F_ALL_TSO | NETIF_F_UFO;
        dev->features           = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -181,6 +176,7 @@ static void loopback_setup(struct net_device *dev)
                | NETIF_F_UFO
                | NETIF_F_HW_CSUM
                | NETIF_F_RXCSUM
+               | NETIF_F_SCTP_CSUM
                | NETIF_F_HIGHDMA
                | NETIF_F_LLTX
                | NETIF_F_NETNS_LOCAL
index 1831fb7cd0174d9afd6696bf029d37a657b07222..753a8c23d15d9af1e138ec6fda410aeda32d288b 100644 (file)
@@ -537,7 +537,6 @@ static int macvlan_init(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        const struct net_device *lowerdev = vlan->lowerdev;
-       int i;
 
        dev->state              = (dev->state & ~MACVLAN_STATE_MASK) |
                                  (lowerdev->state & MACVLAN_STATE_MASK);
@@ -549,16 +548,10 @@ static int macvlan_init(struct net_device *dev)
 
        macvlan_set_lockdep_class(dev);
 
-       vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+       vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->pcpu_stats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct vlan_pcpu_stats *mvlstats;
-               mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
-               u64_stats_init(&mvlstats->syncp);
-       }
-
        return 0;
 }
 
@@ -589,13 +582,13 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
                for_each_possible_cpu(i) {
                        p = per_cpu_ptr(vlan->pcpu_stats, i);
                        do {
-                               start = u64_stats_fetch_begin_bh(&p->syncp);
+                               start = u64_stats_fetch_begin_irq(&p->syncp);
                                rx_packets      = p->rx_packets;
                                rx_bytes        = p->rx_bytes;
                                rx_multicast    = p->rx_multicast;
                                tx_packets      = p->tx_packets;
                                tx_bytes        = p->tx_bytes;
-                       } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+                       } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 
                        stats->rx_packets       += rx_packets;
                        stats->rx_bytes         += rx_bytes;
index d2bb12bfabd5501055dfd52760c19c598b254bc9..34924dfadd0097608dac20dcf0aced6bb1c9805e 100644 (file)
@@ -47,16 +47,7 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
 
 static int nlmon_dev_init(struct net_device *dev)
 {
-       int i;
-
-       dev->lstats = alloc_percpu(struct pcpu_lstats);
-
-       for_each_possible_cpu(i) {
-               struct pcpu_lstats *nlmstats;
-               nlmstats = per_cpu_ptr(dev->lstats, i);
-               u64_stats_init(&nlmstats->syncp);
-       }
-
+       dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
        return dev->lstats == NULL ? -ENOMEM : 0;
 }
 
@@ -99,10 +90,10 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                nl_stats = per_cpu_ptr(dev->lstats, i);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&nl_stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&nl_stats->syncp);
                        tbytes = nl_stats->bytes;
                        tpackets = nl_stats->packets;
-               } while (u64_stats_fetch_retry_bh(&nl_stats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&nl_stats->syncp, start));
 
                packets += tpackets;
                bytes += tbytes;
@@ -145,7 +136,8 @@ static void nlmon_setup(struct net_device *dev)
        dev->ethtool_ops = &nlmon_ethtool_ops;
        dev->destructor = free_netdev;
 
-       dev->features = NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+       dev->features = NETIF_F_SG | NETIF_F_FRAGLIST |
+                       NETIF_F_HIGHDMA | NETIF_F_LLTX;
        dev->flags = IFF_NOARP;
 
        /* That's rather a softlimit here, which, of course,
index 9b5d46c03eed34d244bf279ea13bf38ffebea009..6a17f92153b31b3b2fe95a5d7e087eeb3373032b 100644 (file)
@@ -71,6 +71,12 @@ config BCM63XX_PHY
        ---help---
          Currently supports the 6348 and 6358 PHYs.
 
+config BCM7XXX_PHY
+       tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+       ---help---
+         Currently supports the BCM7366, BCM7439, BCM7445, and
+         40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+
 config BCM87XX_PHY
        tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
        help
index 9013dfa12aa39ac7fe504734e6b3e62d7865b0d7..07d24024863e0805af4127c72e4663174a47babb 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY)                += smsc.o
 obj-$(CONFIG_VITESSE_PHY)      += vitesse.o
 obj-$(CONFIG_BROADCOM_PHY)     += broadcom.o
 obj-$(CONFIG_BCM63XX_PHY)      += bcm63xx.o
+obj-$(CONFIG_BCM7XXX_PHY)      += bcm7xxx.o
 obj-$(CONFIG_BCM87XX_PHY)      += bcm87xx.o
 obj-$(CONFIG_ICPLUS_PHY)       += icplus.o
 obj-$(CONFIG_REALTEK_PHY)      += realtek.o
index bc71947b1ec329f2eacd52b915b37ce56a4f16bb..643464d5a727b3b937fbe803072861de15a1414f 100644 (file)
@@ -27,6 +27,9 @@
 #define AT803X_MMD_ACCESS_CONTROL              0x0D
 #define AT803X_MMD_ACCESS_CONTROL_DATA         0x0E
 #define AT803X_FUNC_DATA                       0x4003
+#define AT803X_INER                            0x0012
+#define AT803X_INER_INIT                       0xec00
+#define AT803X_INSR                            0x0013
 #define AT803X_DEBUG_ADDR                      0x1D
 #define AT803X_DEBUG_DATA                      0x1E
 #define AT803X_DEBUG_SYSTEM_MODE_CTRL          0x05
@@ -191,6 +194,31 @@ static int at803x_config_init(struct phy_device *phydev)
        return 0;
 }
 
+static int at803x_ack_interrupt(struct phy_device *phydev)
+{
+       int err;
+
+       err = phy_read(phydev, AT803X_INSR);
+
+       return (err < 0) ? err : 0;
+}
+
+static int at803x_config_intr(struct phy_device *phydev)
+{
+       int err;
+       int value;
+
+       value = phy_read(phydev, AT803X_INER);
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+               err = phy_write(phydev, AT803X_INER,
+                               value | AT803X_INER_INIT);
+       else
+               err = phy_write(phydev, AT803X_INER, 0);
+
+       return err;
+}
+
 static struct phy_driver at803x_driver[] = {
 {
        /* ATHEROS 8035 */
@@ -240,6 +268,8 @@ static struct phy_driver at803x_driver[] = {
        .flags          = PHY_HAS_INTERRUPT,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
+       .ack_interrupt  = &at803x_ack_interrupt,
+       .config_intr    = &at803x_config_intr,
        .driver         = {
                .owner = THIS_MODULE,
        },
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
new file mode 100644 (file)
index 0000000..526b94c
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+ * Broadcom BCM7xxx internal transceivers support.
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/brcmphy.h>
+
+/* Broadcom BCM7xxx internal PHY registers */
+#define MII_BCM7XXX_CHANNEL_WIDTH      0x2000
+
+/* 40nm only register definitions */
+#define MII_BCM7XXX_100TX_AUX_CTL      0x10
+#define MII_BCM7XXX_100TX_FALSE_CAR    0x13
+#define MII_BCM7XXX_100TX_DISC         0x14
+#define MII_BCM7XXX_AUX_MODE           0x1d
+#define  MII_BCM7XX_64CLK_MDIO         BIT(12)
+#define MII_BCM7XXX_CORE_BASE1E                0x1e
+#define MII_BCM7XXX_TEST               0x1f
+#define  MII_BCM7XXX_SHD_MODE_2                BIT(2)
+
+/* 28nm only register definitions */
+#define MISC_ADDR(base, channel)       base, channel
+
+#define DSP_TAP10                      MISC_ADDR(0x0a, 0)
+#define PLL_PLLCTRL_1                  MISC_ADDR(0x32, 1)
+#define PLL_PLLCTRL_2                  MISC_ADDR(0x32, 2)
+#define PLL_PLLCTRL_4                  MISC_ADDR(0x33, 0)
+
+#define AFE_RXCONFIG_0                 MISC_ADDR(0x38, 0)
+#define AFE_RXCONFIG_1                 MISC_ADDR(0x38, 1)
+#define AFE_RX_LP_COUNTER              MISC_ADDR(0x38, 3)
+#define AFE_TX_CONFIG                  MISC_ADDR(0x39, 0)
+#define AFE_HPF_TRIM_OTHERS            MISC_ADDR(0x3a, 0)
+
+#define CORE_EXPB0                     0xb0
+
+static int bcm7445_config_init(struct phy_device *phydev)
+{
+       int ret;
+       const struct bcm7445_regs {
+               int reg;
+               u16 value;
+       } bcm7445_regs_cfg[] = {
+               /* increases ADC latency by 24ns */
+               { MII_BCM54XX_EXP_SEL, 0x0038 },
+               { MII_BCM54XX_EXP_DATA, 0xAB95 },
+               /* increases internal 1V LDO voltage by 5% */
+               { MII_BCM54XX_EXP_SEL, 0x2038 },
+               { MII_BCM54XX_EXP_DATA, 0xBB22 },
+               /* reduce RX low pass filter corner frequency */
+               { MII_BCM54XX_EXP_SEL, 0x6038 },
+               { MII_BCM54XX_EXP_DATA, 0xFFC5 },
+               /* reduce RX high pass filter corner frequency */
+               { MII_BCM54XX_EXP_SEL, 0x003a },
+               { MII_BCM54XX_EXP_DATA, 0x2002 },
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
+               ret = phy_write(phydev,
+                               bcm7445_regs_cfg[i].reg,
+                               bcm7445_regs_cfg[i].value);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void phy_write_exp(struct phy_device *phydev,
+                                       u16 reg, u16 value)
+{
+       phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
+       phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static void phy_write_misc(struct phy_device *phydev,
+                                       u16 reg, u16 chl, u16 value)
+{
+       int tmp;
+
+       phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+
+       tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+       tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+       phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+
+       tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
+       phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
+
+       phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
+{
+       /* Increase VCO range to prevent unlocking problem of PLL at low
+        * temp
+        */
+       phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+
+       /* Change Ki to 011 */
+       phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+
+       /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+        * to 111
+        */
+       phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+
+       /* Adjust bias current trim by -3 */
+       phy_write_misc(phydev, DSP_TAP10, 0x690b);
+
+       /* Switch to CORE_BASE1E */
+       phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
+
+       /* Reset R_CAL/RC_CAL Engine */
+       phy_write_exp(phydev, CORE_EXPB0, 0x0010);
+
+       /* Disable Reset R_CAL/RC_CAL Engine */
+       phy_write_exp(phydev, CORE_EXPB0, 0x0000);
+
+       /* write AFE_RXCONFIG_0 */
+       phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+
+       /* write AFE_RXCONFIG_1 */
+       phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+
+       /* write AFE_RX_LP_COUNTER */
+       phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+       /* write AFE_HPF_TRIM_OTHERS */
+       phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+
+       /* write AFTE_TX_CONFIG */
+       phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+
+       return 0;
+}
+
+static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = bcm7445_config_init(phydev);
+       if (ret)
+               return ret;
+
+       return bcm7xxx_28nm_afe_config_init(phydev);
+}
+
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+                                       int set_mask, int clr_mask)
+{
+       int v, ret;
+
+       v = phy_read(dev, location);
+       if (v < 0)
+               return v;
+
+       v &= ~clr_mask;
+       v |= set_mask;
+
+       ret = phy_write(dev, location, v);
+       if (ret < 0)
+               return ret;
+
+       return v;
+}
+
+static int bcm7xxx_config_init(struct phy_device *phydev)
+{
+       int ret;
+
+       /* Enable 64 clock MDIO */
+       phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
+       phy_read(phydev, MII_BCM7XXX_AUX_MODE);
+
+       /* Workaround only required for 100Mbits/sec */
+       if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR))
+               return 0;
+
+       /* set shadow mode 2 */
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+                       MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
+       if (ret < 0)
+               return ret;
+
+       /* set iddq_clkbias */
+       phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0F00);
+       udelay(10);
+
+       /* reset iddq_clkbias */
+       phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0C00);
+
+       phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
+
+       /* reset shadow mode 2 */
+       ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/* Workaround for putting the PHY in IDDQ mode, required
+ * for all BCM7XXX PHYs
+ */
+static int bcm7xxx_suspend(struct phy_device *phydev)
+{
+       int ret;
+       const struct bcm7xxx_regs {
+               int reg;
+               u16 value;
+       } bcm7xxx_suspend_cfg[] = {
+               { MII_BCM7XXX_TEST, 0x008b },
+               { MII_BCM7XXX_100TX_AUX_CTL, 0x01c0 },
+               { MII_BCM7XXX_100TX_DISC, 0x7000 },
+               { MII_BCM7XXX_TEST, 0x000f },
+               { MII_BCM7XXX_100TX_AUX_CTL, 0x20d0 },
+               { MII_BCM7XXX_TEST, 0x000b },
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(bcm7xxx_suspend_cfg); i++) {
+               ret = phy_write(phydev,
+                               bcm7xxx_suspend_cfg[i].reg,
+                               bcm7xxx_suspend_cfg[i].value);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
+{
+       return 0;
+}
+
+static struct phy_driver bcm7xxx_driver[] = {
+{
+       .phy_id         = PHY_ID_BCM7366,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Broadcom BCM7366",
+       .features       = PHY_GBIT_FEATURES |
+                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+       .flags          = PHY_IS_INTERNAL,
+       .config_init    = bcm7xxx_28nm_afe_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .suspend        = bcm7xxx_suspend,
+       .resume         = bcm7xxx_28nm_afe_config_init,
+       .driver         = { .owner = THIS_MODULE },
+}, {
+       .phy_id         = PHY_ID_BCM7439,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Broadcom BCM7439",
+       .features       = PHY_GBIT_FEATURES |
+                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+       .flags          = PHY_IS_INTERNAL,
+       .config_init    = bcm7xxx_28nm_afe_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .suspend        = bcm7xxx_suspend,
+       .resume         = bcm7xxx_28nm_afe_config_init,
+       .driver         = { .owner = THIS_MODULE },
+}, {
+       .phy_id         = PHY_ID_BCM7445,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Broadcom BCM7445",
+       .features       = PHY_GBIT_FEATURES |
+                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+       .flags          = PHY_IS_INTERNAL,
+       .config_init    = bcm7xxx_28nm_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .suspend        = bcm7xxx_suspend,
+       .resume         = bcm7xxx_28nm_config_init,
+       .driver         = { .owner = THIS_MODULE },
+}, {
+       .name           = "Broadcom BCM7XXX 28nm",
+       .phy_id         = PHY_ID_BCM7XXX_28,
+       .phy_id_mask    = PHY_BCM_OUI_MASK,
+       .features       = PHY_GBIT_FEATURES |
+                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+       .flags          = PHY_IS_INTERNAL,
+       .config_init    = bcm7xxx_28nm_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .suspend        = bcm7xxx_suspend,
+       .resume         = bcm7xxx_28nm_config_init,
+       .driver         = { .owner = THIS_MODULE },
+}, {
+       .phy_id         = PHY_BCM_OUI_4,
+       .phy_id_mask    = 0xffff0000,
+       .name           = "Broadcom BCM7XXX 40nm",
+       .features       = PHY_GBIT_FEATURES |
+                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+       .flags          = PHY_IS_INTERNAL,
+       .config_init    = bcm7xxx_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .suspend        = bcm7xxx_suspend,
+       .resume         = bcm7xxx_config_init,
+       .driver         = { .owner = THIS_MODULE },
+}, {
+       .phy_id         = PHY_BCM_OUI_5,
+       .phy_id_mask    = 0xffffff00,
+       .name           = "Broadcom BCM7XXX 65nm",
+       .features       = PHY_BASIC_FEATURES |
+                         SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+       .flags          = PHY_IS_INTERNAL,
+       .config_init    = bcm7xxx_dummy_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .suspend        = bcm7xxx_suspend,
+       .resume         = bcm7xxx_config_init,
+       .driver         = { .owner = THIS_MODULE },
+} };
+
+static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+       { PHY_ID_BCM7366, 0xfffffff0, },
+       { PHY_ID_BCM7439, 0xfffffff0, },
+       { PHY_ID_BCM7445, 0xfffffff0, },
+       { PHY_ID_BCM7XXX_28, 0xfffffc00 },
+       { PHY_BCM_OUI_4, 0xffff0000 },
+       { PHY_BCM_OUI_5, 0xffffff00 },
+       { }
+};
+
+static int __init bcm7xxx_phy_init(void)
+{
+       return phy_drivers_register(bcm7xxx_driver,
+                       ARRAY_SIZE(bcm7xxx_driver));
+}
+
+static void __exit bcm7xxx_phy_exit(void)
+{
+       phy_drivers_unregister(bcm7xxx_driver,
+                       ARRAY_SIZE(bcm7xxx_driver));
+}
+
+module_init(bcm7xxx_phy_init);
+module_exit(bcm7xxx_phy_exit);
+
+MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
+
+MODULE_DESCRIPTION("Broadcom BCM7xxx internal PHY driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
index f8c90ea751083456e7e07135aaa16a215e23f43b..34088d60da74613cd007916ddd4844ee5420f90a 100644 (file)
 #define BRCM_PHY_REV(phydev) \
        ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
 
-
-#define MII_BCM54XX_ECR                0x10    /* BCM54xx extended control register */
-#define MII_BCM54XX_ECR_IM     0x1000  /* Interrupt mask */
-#define MII_BCM54XX_ECR_IF     0x0800  /* Interrupt force */
-
-#define MII_BCM54XX_ESR                0x11    /* BCM54xx extended status register */
-#define MII_BCM54XX_ESR_IS     0x1000  /* Interrupt status */
-
-#define MII_BCM54XX_EXP_DATA   0x15    /* Expansion register data */
-#define MII_BCM54XX_EXP_SEL    0x17    /* Expansion register select */
-#define MII_BCM54XX_EXP_SEL_SSD        0x0e00  /* Secondary SerDes select */
-#define MII_BCM54XX_EXP_SEL_ER 0x0f00  /* Expansion register select */
-
-#define MII_BCM54XX_AUX_CTL    0x18    /* Auxiliary control register */
-#define MII_BCM54XX_ISR                0x1a    /* BCM54xx interrupt status register */
-#define MII_BCM54XX_IMR                0x1b    /* BCM54xx interrupt mask register */
-#define MII_BCM54XX_INT_CRCERR 0x0001  /* CRC error */
-#define MII_BCM54XX_INT_LINK   0x0002  /* Link status changed */
-#define MII_BCM54XX_INT_SPEED  0x0004  /* Link speed change */
-#define MII_BCM54XX_INT_DUPLEX 0x0008  /* Duplex mode changed */
-#define MII_BCM54XX_INT_LRS    0x0010  /* Local receiver status changed */
-#define MII_BCM54XX_INT_RRS    0x0020  /* Remote receiver status changed */
-#define MII_BCM54XX_INT_SSERR  0x0040  /* Scrambler synchronization error */
-#define MII_BCM54XX_INT_UHCD   0x0080  /* Unsupported HCD negotiated */
-#define MII_BCM54XX_INT_NHCD   0x0100  /* No HCD */
-#define MII_BCM54XX_INT_NHCDL  0x0200  /* No HCD link */
-#define MII_BCM54XX_INT_ANPR   0x0400  /* Auto-negotiation page received */
-#define MII_BCM54XX_INT_LC     0x0800  /* All counters below 128 */
-#define MII_BCM54XX_INT_HC     0x1000  /* Counter above 32768 */
-#define MII_BCM54XX_INT_MDIX   0x2000  /* MDIX status change */
-#define MII_BCM54XX_INT_PSERR  0x4000  /* Pair swap error */
-
-#define MII_BCM54XX_SHD                0x1c    /* 0x1c shadow registers */
-#define MII_BCM54XX_SHD_WRITE  0x8000
-#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
-#define MII_BCM54XX_SHD_DATA(x)        ((x & 0x3ff) << 0)
-
-/*
- * AUXILIARY CONTROL SHADOW ACCESS REGISTERS.  (PHY REG 0x18)
- */
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL      0x0000
-#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB         0x0400
-#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA      0x0800
-
-#define MII_BCM54XX_AUXCTL_MISC_WREN   0x8000
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX    0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC     0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC        0x0007
-
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL      0x0000
-
-
 /*
  * Broadcom LED source encodings.  These are used in BCM5461, BCM5481,
  * BCM5482, and possibly some others.
index 98e7cbf720a5859b8639ebe0653c6ccfd9c51f02..352c5e45fe9cc4477286970612a15b72a1e9dd81 100644 (file)
@@ -47,6 +47,7 @@
 #define CAL_EVENT      7
 #define CAL_TRIGGER    7
 #define PER_TRIGGER    6
+#define DP83640_N_PINS 12
 
 #define MII_DP83640_MICR 0x11
 #define MII_DP83640_MISR 0x12
@@ -173,6 +174,37 @@ MODULE_PARM_DESC(chosen_phy, \
 MODULE_PARM_DESC(gpio_tab, \
        "Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
 
+static void dp83640_gpio_defaults(struct ptp_pin_desc *pd)
+{
+       int i, index;
+
+       for (i = 0; i < DP83640_N_PINS; i++) {
+               snprintf(pd[i].name, sizeof(pd[i].name), "GPIO%d", 1 + i);
+               pd[i].index = i;
+       }
+
+       for (i = 0; i < GPIO_TABLE_SIZE; i++) {
+               if (gpio_tab[i] < 1 || gpio_tab[i] > DP83640_N_PINS) {
+                       pr_err("gpio_tab[%d]=%hu out of range", i, gpio_tab[i]);
+                       return;
+               }
+       }
+
+       index = gpio_tab[CALIBRATE_GPIO] - 1;
+       pd[index].func = PTP_PF_PHYSYNC;
+       pd[index].chan = 0;
+
+       index = gpio_tab[PEROUT_GPIO] - 1;
+       pd[index].func = PTP_PF_PEROUT;
+       pd[index].chan = 0;
+
+       for (i = EXTTS0_GPIO; i < GPIO_TABLE_SIZE; i++) {
+               index = gpio_tab[i] - 1;
+               pd[index].func = PTP_PF_EXTTS;
+               pd[index].chan = i - EXTTS0_GPIO;
+       }
+}
+
 /* a list of clocks and a mutex to protect it */
 static LIST_HEAD(phyter_clocks);
 static DEFINE_MUTEX(phyter_clocks_lock);
@@ -266,15 +298,22 @@ static u64 phy2txts(struct phy_txts *p)
        return ns;
 }
 
-static void periodic_output(struct dp83640_clock *clock,
-                           struct ptp_clock_request *clkreq, bool on)
+static int periodic_output(struct dp83640_clock *clock,
+                          struct ptp_clock_request *clkreq, bool on)
 {
        struct dp83640_private *dp83640 = clock->chosen;
        struct phy_device *phydev = dp83640->phydev;
-       u32 sec, nsec, period;
+       u32 sec, nsec, pwidth;
        u16 gpio, ptp_trig, trigger, val;
 
-       gpio = on ? gpio_tab[PEROUT_GPIO] : 0;
+       if (on) {
+               gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 0);
+               if (gpio < 1)
+                       return -EINVAL;
+       } else {
+               gpio = 0;
+       }
+
        trigger = PER_TRIGGER;
 
        ptp_trig = TRIG_WR |
@@ -291,13 +330,14 @@ static void periodic_output(struct dp83640_clock *clock,
                ext_write(0, phydev, PAGE5, PTP_TRIG, ptp_trig);
                ext_write(0, phydev, PAGE4, PTP_CTL, val);
                mutex_unlock(&clock->extreg_lock);
-               return;
+               return 0;
        }
 
        sec = clkreq->perout.start.sec;
        nsec = clkreq->perout.start.nsec;
-       period = clkreq->perout.period.sec * 1000000000UL;
-       period += clkreq->perout.period.nsec;
+       pwidth = clkreq->perout.period.sec * 1000000000UL;
+       pwidth += clkreq->perout.period.nsec;
+       pwidth /= 2;
 
        mutex_lock(&clock->extreg_lock);
 
@@ -310,8 +350,8 @@ static void periodic_output(struct dp83640_clock *clock,
        ext_write(0, phydev, PAGE4, PTP_TDR, nsec >> 16);      /* ns[31:16] */
        ext_write(0, phydev, PAGE4, PTP_TDR, sec & 0xffff);    /* sec[15:0] */
        ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16);       /* sec[31:16] */
-       ext_write(0, phydev, PAGE4, PTP_TDR, period & 0xffff); /* ns[15:0] */
-       ext_write(0, phydev, PAGE4, PTP_TDR, period >> 16);    /* ns[31:16] */
+       ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff); /* ns[15:0] */
+       ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16);    /* ns[31:16] */
 
        /*enable trigger*/
        val &= ~TRIG_LOAD;
@@ -319,6 +359,7 @@ static void periodic_output(struct dp83640_clock *clock,
        ext_write(0, phydev, PAGE4, PTP_CTL, val);
 
        mutex_unlock(&clock->extreg_lock);
+       return 0;
 }
 
 /* ptp clock methods */
@@ -424,18 +465,21 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
        struct dp83640_clock *clock =
                container_of(ptp, struct dp83640_clock, caps);
        struct phy_device *phydev = clock->chosen->phydev;
-       int index;
+       unsigned int index;
        u16 evnt, event_num, gpio_num;
 
        switch (rq->type) {
        case PTP_CLK_REQ_EXTTS:
                index = rq->extts.index;
-               if (index < 0 || index >= N_EXT_TS)
+               if (index >= N_EXT_TS)
                        return -EINVAL;
                event_num = EXT_EVENT + index;
                evnt = EVNT_WR | (event_num & EVNT_SEL_MASK) << EVNT_SEL_SHIFT;
                if (on) {
-                       gpio_num = gpio_tab[EXTTS0_GPIO + index];
+                       gpio_num = 1 + ptp_find_pin(clock->ptp_clock,
+                                                   PTP_PF_EXTTS, index);
+                       if (gpio_num < 1)
+                               return -EINVAL;
                        evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
                        if (rq->extts.flags & PTP_FALLING_EDGE)
                                evnt |= EVNT_FALL;
@@ -448,8 +492,7 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
        case PTP_CLK_REQ_PEROUT:
                if (rq->perout.index != 0)
                        return -EINVAL;
-               periodic_output(clock, rq, on);
-               return 0;
+               return periodic_output(clock, rq, on);
 
        default:
                break;
@@ -458,6 +501,12 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
        return -EOPNOTSUPP;
 }
 
+static int ptp_dp83640_verify(struct ptp_clock_info *ptp, unsigned int pin,
+                             enum ptp_pin_function func, unsigned int chan)
+{
+       return 0;
+}
+
 static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 };
 static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
 
@@ -875,6 +924,7 @@ static void dp83640_free_clocks(void)
                mutex_destroy(&clock->extreg_lock);
                mutex_destroy(&clock->clock_lock);
                put_device(&clock->bus->dev);
+               kfree(clock->caps.pin_config);
                kfree(clock);
        }
 
@@ -894,12 +944,18 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
        clock->caps.n_alarm     = 0;
        clock->caps.n_ext_ts    = N_EXT_TS;
        clock->caps.n_per_out   = 1;
+       clock->caps.n_pins      = DP83640_N_PINS;
        clock->caps.pps         = 0;
        clock->caps.adjfreq     = ptp_dp83640_adjfreq;
        clock->caps.adjtime     = ptp_dp83640_adjtime;
        clock->caps.gettime     = ptp_dp83640_gettime;
        clock->caps.settime     = ptp_dp83640_settime;
        clock->caps.enable      = ptp_dp83640_enable;
+       clock->caps.verify      = ptp_dp83640_verify;
+       /*
+        * Convert the module param defaults into a dynamic pin configuration.
+        */
+       dp83640_gpio_defaults(clock->caps.pin_config);
        /*
         * Get a reference to this bus instance.
         */
@@ -950,6 +1006,13 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
        if (!clock)
                goto out;
 
+       clock->caps.pin_config = kzalloc(sizeof(struct ptp_pin_desc) *
+                                        DP83640_N_PINS, GFP_KERNEL);
+       if (!clock->caps.pin_config) {
+               kfree(clock);
+               clock = NULL;
+               goto out;
+       }
        dp83640_clock_init(clock, bus);
        list_add_tail(&phyter_clocks, &clock->list);
 out:
@@ -1363,7 +1426,7 @@ static void __exit dp83640_exit(void)
 }
 
 MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver");
-MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.at>");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
 MODULE_LICENSE("GPL");
 
 module_init(dp83640_init);
index 9367acc84fbb2e54f3d058864d820bf8974d21c7..15bc7f9ea224b44daab666cab2ff62c4e61019b5 100644 (file)
@@ -90,11 +90,6 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        return 0;
 }
 
-static int sun4i_mdio_reset(struct mii_bus *bus)
-{
-       return 0;
-}
-
 static int sun4i_mdio_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -110,7 +105,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
        bus->name = "sun4i_mii_bus";
        bus->read = &sun4i_mdio_read;
        bus->write = &sun4i_mdio_write;
-       bus->reset = &sun4i_mdio_reset;
        snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
        bus->parent = &pdev->dev;
 
index 71e49000fbf33fc50d08e3c6979014c89fdb4478..76f54b32a120832f2ce212c129592a6f30ab83df 100644 (file)
@@ -432,8 +432,28 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
 }
 static DEVICE_ATTR_RO(phy_id);
 
+static ssize_t
+phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct phy_device *phydev = to_phy_device(dev);
+
+       return sprintf(buf, "%s\n", phy_modes(phydev->interface));
+}
+static DEVICE_ATTR_RO(phy_interface);
+
+static ssize_t
+phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct phy_device *phydev = to_phy_device(dev);
+
+       return sprintf(buf, "%d\n", phydev->has_fixups);
+}
+static DEVICE_ATTR_RO(phy_has_fixups);
+
 static struct attribute *mdio_dev_attrs[] = {
        &dev_attr_phy_id.attr,
+       &dev_attr_phy_interface.attr,
+       &dev_attr_phy_has_fixups.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(mdio_dev);
index 5a8993b0cafc374c5c0f114ab01dc7618263570d..5ad971a55c5d9f21ffb3ded8e9d5704534095d21 100644 (file)
@@ -148,15 +148,52 @@ static int ks8737_config_intr(struct phy_device *phydev)
        return rc < 0 ? rc : 0;
 }
 
+static int kszphy_setup_led(struct phy_device *phydev,
+                           unsigned int reg, unsigned int shift)
+{
+
+       struct device *dev = &phydev->dev;
+       struct device_node *of_node = dev->of_node;
+       int rc, temp;
+       u32 val;
+
+       if (!of_node && dev->parent->of_node)
+               of_node = dev->parent->of_node;
+
+       if (of_property_read_u32(of_node, "micrel,led-mode", &val))
+               return 0;
+
+       temp = phy_read(phydev, reg);
+       if (temp < 0)
+               return temp;
+
+       temp &= ~(3 << shift);
+       temp |= val << shift;
+       rc = phy_write(phydev, reg, temp);
+
+       return rc < 0 ? rc : 0;
+}
+
 static int kszphy_config_init(struct phy_device *phydev)
 {
        return 0;
 }
 
+static int kszphy_config_init_led8041(struct phy_device *phydev)
+{
+       /* single led control, register 0x1e bits 15..14 */
+       return kszphy_setup_led(phydev, 0x1e, 14);
+}
+
 static int ksz8021_config_init(struct phy_device *phydev)
 {
-       int rc;
        const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
+       int rc;
+
+       rc = kszphy_setup_led(phydev, 0x1f, 4);
+       if (rc)
+               dev_err(&phydev->dev, "failed to set led mode\n");
+
        phy_write(phydev, MII_KSZPHY_OMSO, val);
        rc = ksz_config_flags(phydev);
        return rc < 0 ? rc : 0;
@@ -166,6 +203,10 @@ static int ks8051_config_init(struct phy_device *phydev)
 {
        int rc;
 
+       rc = kszphy_setup_led(phydev, 0x1f, 4);
+       if (rc)
+               dev_err(&phydev->dev, "failed to set led mode\n");
+
        rc = ksz_config_flags(phydev);
        return rc < 0 ? rc : 0;
 }
@@ -327,7 +368,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = kszphy_config_init_led8041,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
@@ -342,7 +383,7 @@ static struct phy_driver ksphy_driver[] = {
        .features       = PHY_BASIC_FEATURES |
                          SUPPORTED_Pause | SUPPORTED_Asym_Pause,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = kszphy_config_init_led8041,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
@@ -371,7 +412,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id_mask    = 0x00ffffff,
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-       .config_init    = kszphy_config_init,
+       .config_init    = kszphy_config_init_led8041,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
index 76d96b9ebcdb94d2e57c3f52d5b49a41e7397ab4..1d788f19135bdf5e59e71a11eed935981a172ca8 100644 (file)
 
 #include <asm/irq.h>
 
+static const char *phy_speed_to_str(int speed)
+{
+       switch (speed) {
+       case SPEED_10:
+               return "10Mbps";
+       case SPEED_100:
+               return "100Mbps";
+       case SPEED_1000:
+               return "1Gbps";
+       case SPEED_2500:
+               return "2.5Gbps";
+       case SPEED_10000:
+               return "10Gbps";
+       case SPEED_UNKNOWN:
+               return "Unknown";
+       default:
+               return "Unsupported (update phy.c)";
+       }
+}
+
 /**
  * phy_print_status - Convenience function to print out the current phy status
  * @phydev: the phy_device struct
 void phy_print_status(struct phy_device *phydev)
 {
        if (phydev->link) {
-               pr_info("%s - Link is Up - %d/%s\n",
-                       dev_name(&phydev->dev),
-                       phydev->speed,
-                       DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+               netdev_info(phydev->attached_dev,
+                       "Link is Up - %s/%s - flow control %s\n",
+                       phy_speed_to_str(phydev->speed),
+                       DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
+                       phydev->pause ? "rx/tx" : "off");
        } else  {
-               pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+               netdev_info(phydev->attached_dev, "Link is Down\n");
        }
 }
 EXPORT_SYMBOL(phy_print_status);
@@ -62,7 +83,7 @@ EXPORT_SYMBOL(phy_print_status);
  * If the @phydev driver has an ack_interrupt function, call it to
  * ack and clear the phy device's interrupt.
  *
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
  */
 static int phy_clear_interrupt(struct phy_device *phydev)
 {
@@ -77,7 +98,7 @@ static int phy_clear_interrupt(struct phy_device *phydev)
  * @phydev: the phy_device struct
  * @interrupts: interrupt flags to configure for this @phydev
  *
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
  */
 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
 {
@@ -93,15 +114,16 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  * phy_aneg_done - return auto-negotiation status
  * @phydev: target phy_device struct
  *
- * Description: Reads the status register and returns 0 either if
- *   auto-negotiation is incomplete, or if there was an error.
- *   Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ * Description: Return the auto-negotiation status from this @phydev
+ * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
+ * is still pending.
  */
 static inline int phy_aneg_done(struct phy_device *phydev)
 {
-       int retval = phy_read(phydev, MII_BMSR);
+       if (phydev->drv->aneg_done)
+               return phydev->drv->aneg_done(phydev);
 
-       return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+       return genphy_aneg_done(phydev);
 }
 
 /* A structure for mapping a particular speed and duplex
@@ -283,7 +305,10 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 
        ethtool_cmd_speed_set(cmd, phydev->speed);
        cmd->duplex = phydev->duplex;
-       cmd->port = PORT_MII;
+       if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+               cmd->port = PORT_BNC;
+       else
+               cmd->port = PORT_MII;
        cmd->phy_address = phydev->addr;
        cmd->transceiver = phy_is_internal(phydev) ?
                XCVR_INTERNAL : XCVR_EXTERNAL;
index 2f6989b1e0dc801c9adea4d5e145a5dcf4d8ad26..0ce606624296a80492b18d89a27634614aad5497 100644 (file)
@@ -139,6 +139,7 @@ static int phy_scan_fixups(struct phy_device *phydev)
                                mutex_unlock(&phy_fixup_lock);
                                return err;
                        }
+                       phydev->has_fixups = true;
                }
        }
        mutex_unlock(&phy_fixup_lock);
@@ -534,16 +535,16 @@ static int phy_poll_reset(struct phy_device *phydev)
 
 int phy_init_hw(struct phy_device *phydev)
 {
-       int ret;
+       int ret = 0;
 
        if (!phydev->drv || !phydev->drv->config_init)
                return 0;
 
-       ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
-       if (ret < 0)
-               return ret;
+       if (phydev->drv->soft_reset)
+               ret = phydev->drv->soft_reset(phydev);
+       else
+               ret = genphy_soft_reset(phydev);
 
-       ret = phy_poll_reset(phydev);
        if (ret < 0)
                return ret;
 
@@ -864,6 +865,22 @@ int genphy_config_aneg(struct phy_device *phydev)
 }
 EXPORT_SYMBOL(genphy_config_aneg);
 
+/**
+ * genphy_aneg_done - return auto-negotiation status
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the status register and returns 0 either if
+ *   auto-negotiation is incomplete, or if there was an error.
+ *   Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ */
+int genphy_aneg_done(struct phy_device *phydev)
+{
+       int retval = phy_read(phydev, MII_BMSR);
+
+       return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+EXPORT_SYMBOL(genphy_aneg_done);
+
 static int gen10g_config_aneg(struct phy_device *phydev)
 {
        return 0;
@@ -1029,6 +1046,27 @@ static int gen10g_read_status(struct phy_device *phydev)
        return 0;
 }
 
+/**
+ * genphy_soft_reset - software reset the PHY via BMCR_RESET bit
+ * @phydev: target phy_device struct
+ *
+ * Description: Perform a software PHY reset using the standard
+ * BMCR_RESET bit and poll for the reset bit to be cleared.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_soft_reset(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+       if (ret < 0)
+               return ret;
+
+       return phy_poll_reset(phydev);
+}
+EXPORT_SYMBOL(genphy_soft_reset);
+
 static int genphy_config_init(struct phy_device *phydev)
 {
        int val;
@@ -1075,6 +1113,12 @@ static int genphy_config_init(struct phy_device *phydev)
        return 0;
 }
 
+static int gen10g_soft_reset(struct phy_device *phydev)
+{
+       /* Do nothing for now */
+       return 0;
+}
+
 static int gen10g_config_init(struct phy_device *phydev)
 {
        /* Temporarily just say we support everything */
@@ -1249,9 +1293,11 @@ static struct phy_driver genphy_driver[] = {
        .phy_id         = 0xffffffff,
        .phy_id_mask    = 0xffffffff,
        .name           = "Generic PHY",
+       .soft_reset     = genphy_soft_reset,
        .config_init    = genphy_config_init,
        .features       = 0,
        .config_aneg    = genphy_config_aneg,
+       .aneg_done      = genphy_aneg_done,
        .read_status    = genphy_read_status,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
@@ -1260,6 +1306,7 @@ static struct phy_driver genphy_driver[] = {
        .phy_id         = 0xffffffff,
        .phy_id_mask    = 0xffffffff,
        .name           = "Generic 10G PHY",
+       .soft_reset     = gen10g_soft_reset,
        .config_init    = gen10g_config_init,
        .features       = 0,
        .config_aneg    = gen10g_config_aneg,
index 72ff14b811c621c3a6694a2e4941d2a4c41651ed..e3923ebb693fccc276db45de7c7d9551ee6bc208 100644 (file)
@@ -143,9 +143,8 @@ struct ppp {
        struct sk_buff_head mrq;        /* MP: receive reconstruction queue */
 #endif /* CONFIG_PPP_MULTILINK */
 #ifdef CONFIG_PPP_FILTER
-       struct sock_filter *pass_filter;        /* filter for packets to pass */
-       struct sock_filter *active_filter;/* filter for pkts to reset idle */
-       unsigned pass_len, active_len;
+       struct sk_filter *pass_filter;  /* filter for packets to pass */
+       struct sk_filter *active_filter;/* filter for pkts to reset idle */
 #endif /* CONFIG_PPP_FILTER */
        struct net      *ppp_net;       /* the net we belong to */
        struct ppp_link_stats stats64;  /* 64 bit network stats */
@@ -755,28 +754,42 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case PPPIOCSPASS:
        {
                struct sock_filter *code;
+
                err = get_filter(argp, &code);
                if (err >= 0) {
+                       struct sock_fprog fprog = {
+                               .len = err,
+                               .filter = code,
+                       };
+
                        ppp_lock(ppp);
-                       kfree(ppp->pass_filter);
-                       ppp->pass_filter = code;
-                       ppp->pass_len = err;
+                       if (ppp->pass_filter)
+                               sk_unattached_filter_destroy(ppp->pass_filter);
+                       err = sk_unattached_filter_create(&ppp->pass_filter,
+                                                         &fprog);
+                       kfree(code);
                        ppp_unlock(ppp);
-                       err = 0;
                }
                break;
        }
        case PPPIOCSACTIVE:
        {
                struct sock_filter *code;
+
                err = get_filter(argp, &code);
                if (err >= 0) {
+                       struct sock_fprog fprog = {
+                               .len = err,
+                               .filter = code,
+                       };
+
                        ppp_lock(ppp);
-                       kfree(ppp->active_filter);
-                       ppp->active_filter = code;
-                       ppp->active_len = err;
+                       if (ppp->active_filter)
+                               sk_unattached_filter_destroy(ppp->active_filter);
+                       err = sk_unattached_filter_create(&ppp->active_filter,
+                                                         &fprog);
+                       kfree(code);
                        ppp_unlock(ppp);
-                       err = 0;
                }
                break;
        }
@@ -1184,7 +1197,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                   a four-byte PPP header on each packet */
                *skb_push(skb, 2) = 1;
                if (ppp->pass_filter &&
-                   sk_run_filter(skb, ppp->pass_filter) == 0) {
+                   SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
                        if (ppp->debug & 1)
                                netdev_printk(KERN_DEBUG, ppp->dev,
                                              "PPP: outbound frame "
@@ -1194,7 +1207,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                }
                /* if this packet passes the active filter, record the time */
                if (!(ppp->active_filter &&
-                     sk_run_filter(skb, ppp->active_filter) == 0))
+                     SK_RUN_FILTER(ppp->active_filter, skb) == 0))
                        ppp->last_xmit = jiffies;
                skb_pull(skb, 2);
 #else
@@ -1818,7 +1831,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 
                        *skb_push(skb, 2) = 0;
                        if (ppp->pass_filter &&
-                           sk_run_filter(skb, ppp->pass_filter) == 0) {
+                           SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
                                if (ppp->debug & 1)
                                        netdev_printk(KERN_DEBUG, ppp->dev,
                                                      "PPP: inbound frame "
@@ -1827,7 +1840,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                                return;
                        }
                        if (!(ppp->active_filter &&
-                             sk_run_filter(skb, ppp->active_filter) == 0))
+                             SK_RUN_FILTER(ppp->active_filter, skb) == 0))
                                ppp->last_recv = jiffies;
                        __skb_pull(skb, 2);
                } else
@@ -2672,6 +2685,10 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        ppp->minseq = -1;
        skb_queue_head_init(&ppp->mrq);
 #endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+       ppp->pass_filter = NULL;
+       ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
 
        /*
         * drum roll: don't forget to set
@@ -2802,10 +2819,15 @@ static void ppp_destroy_interface(struct ppp *ppp)
        skb_queue_purge(&ppp->mrq);
 #endif /* CONFIG_PPP_MULTILINK */
 #ifdef CONFIG_PPP_FILTER
-       kfree(ppp->pass_filter);
-       ppp->pass_filter = NULL;
-       kfree(ppp->active_filter);
-       ppp->active_filter = NULL;
+       if (ppp->pass_filter) {
+               sk_unattached_filter_destroy(ppp->pass_filter);
+               ppp->pass_filter = NULL;
+       }
+
+       if (ppp->active_filter) {
+               sk_unattached_filter_destroy(ppp->active_filter);
+               ppp->active_filter = NULL;
+       }
 #endif /* CONFIG_PPP_FILTER */
 
        kfree_skb(ppp->xmit_pending);
index c8624a8235ab2f1020c4c387be290b631ed1b0dc..33008c1d1d678756ae8fbae13238763c24cc603e 100644 (file)
@@ -1031,8 +1031,7 @@ static void team_port_leave(struct team *team, struct team_port *port)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
-                                   gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
 {
        struct netpoll *np;
        int err;
@@ -1040,11 +1039,11 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
        if (!team->dev->npinfo)
                return 0;
 
-       np = kzalloc(sizeof(*np), gfp);
+       np = kzalloc(sizeof(*np), GFP_KERNEL);
        if (!np)
                return -ENOMEM;
 
-       err = __netpoll_setup(np, port->dev, gfp);
+       err = __netpoll_setup(np, port->dev);
        if (err) {
                kfree(np);
                return err;
@@ -1067,8 +1066,7 @@ static void team_port_disable_netpoll(struct team_port *port)
        kfree(np);
 }
 #else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port,
-                                   gfp_t gfp)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
 {
        return 0;
 }
@@ -1156,7 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
                goto err_vids_add;
        }
 
-       err = team_port_enable_netpoll(team, port, GFP_KERNEL);
+       err = team_port_enable_netpoll(team, port);
        if (err) {
                netdev_err(dev, "Failed to enable netpoll on device %s\n",
                           portname);
@@ -1540,16 +1538,10 @@ static int team_init(struct net_device *dev)
        mutex_init(&team->lock);
        team_set_no_mode(team);
 
-       team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+       team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
        if (!team->pcpu_stats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct team_pcpu_stats *team_stats;
-               team_stats = per_cpu_ptr(team->pcpu_stats, i);
-               u64_stats_init(&team_stats->syncp);
-       }
-
        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
        INIT_LIST_HEAD(&team->port_list);
@@ -1767,13 +1759,13 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        for_each_possible_cpu(i) {
                p = per_cpu_ptr(team->pcpu_stats, i);
                do {
-                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
                        rx_packets      = p->rx_packets;
                        rx_bytes        = p->rx_bytes;
                        rx_multicast    = p->rx_multicast;
                        tx_packets      = p->tx_packets;
                        tx_bytes        = p->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 
                stats->rx_packets       += rx_packets;
                stats->rx_bytes         += rx_bytes;
@@ -1856,7 +1848,7 @@ static void team_netpoll_cleanup(struct net_device *dev)
 }
 
 static int team_netpoll_setup(struct net_device *dev,
-                             struct netpoll_info *npifo, gfp_t gfp)
+                             struct netpoll_info *npifo)
 {
        struct team *team = netdev_priv(dev);
        struct team_port *port;
@@ -1864,7 +1856,7 @@ static int team_netpoll_setup(struct net_device *dev,
 
        mutex_lock(&team->lock);
        list_for_each_entry(port, &team->port_list, list) {
-               err = team_port_enable_netpoll(team, port, gfp);
+               err = team_port_enable_netpoll(team, port);
                if (err) {
                        __team_netpoll_cleanup(team);
                        break;
index d671fc3ac5ac26ad2b7666617fab5adc1042e569..dbde3412ee5eafdeb39cd238e8af4f19368917c6 100644 (file)
@@ -432,9 +432,9 @@ static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
        struct lb_stats tmp;
 
        do {
-               start = u64_stats_fetch_begin_bh(syncp);
+               start = u64_stats_fetch_begin_irq(syncp);
                tmp.tx_bytes = cpu_stats->tx_bytes;
-       } while (u64_stats_fetch_retry_bh(syncp, start));
+       } while (u64_stats_fetch_retry_irq(syncp, start));
        acc_stats->tx_bytes += tmp.tx_bytes;
 }
 
index 26f8635b027d3fb44ab12223000badc214098df0..ee328ba101e72a9e3f150d8c24068182be86abc5 100644 (file)
@@ -452,7 +452,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
 
                --tun->numqueues;
                if (clean) {
-                       rcu_assign_pointer(tfile->tun, NULL);
+                       RCU_INIT_POINTER(tfile->tun, NULL);
                        sock_put(&tfile->sk);
                } else
                        tun_disable_queue(tun, tfile);
@@ -499,12 +499,12 @@ static void tun_detach_all(struct net_device *dev)
                tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
                wake_up_all(&tfile->wq.wait);
-               rcu_assign_pointer(tfile->tun, NULL);
+               RCU_INIT_POINTER(tfile->tun, NULL);
                --tun->numqueues;
        }
        list_for_each_entry(tfile, &tun->disabled, next) {
                wake_up_all(&tfile->wq.wait);
-               rcu_assign_pointer(tfile->tun, NULL);
+               RCU_INIT_POINTER(tfile->tun, NULL);
        }
        BUG_ON(tun->numqueues != 0);
 
@@ -2194,7 +2194,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
                                            &tun_proto);
        if (!tfile)
                return -ENOMEM;
-       rcu_assign_pointer(tfile->tun, NULL);
+       RCU_INIT_POINTER(tfile->tun, NULL);
        tfile->net = get_net(current->nsproxy->net_ns);
        tfile->flags = 0;
        tfile->ifindex = 0;
index bd363b27e8540e3bfe2acc6173adcc8a1952f4b7..9ea4bfe5d31804ed7413ade40ec568a777b9739d 100644 (file)
@@ -625,6 +625,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* Novatel Expedite E371 - handled by qmi_wwan */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(NOVATEL_VENDOR_ID, 0x9011, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* AnyDATA ADU960S - handled by qmi_wwan */
 {
        USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
index d350d2795e1029cc71f197b2ca863586942d4d3f..549dbac710ed5f576f84cedf375df8588e5a7dc5 100644 (file)
@@ -73,6 +73,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
        u8 iface_no;
        int err;
        int eth_hlen;
+       u16 mbim_mtu;
        u16 ntb_fmt_supported;
        __le16 max_datagram_size;
 
@@ -252,6 +253,14 @@ out:
        /* set MTU to max supported by the device if necessary */
        if (dev->net->mtu > ctx->max_datagram_size - eth_hlen)
                dev->net->mtu = ctx->max_datagram_size - eth_hlen;
+
+       /* do not exceed operater preferred MTU */
+       if (ctx->mbim_extended_desc) {
+               mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
+               if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
+                       dev->net->mtu = mbim_mtu;
+       }
+
        return 0;
 }
 
@@ -390,6 +399,14 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
                        ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
                        break;
 
+               case USB_CDC_MBIM_EXTENDED_TYPE:
+                       if (buf[0] < sizeof(*(ctx->mbim_extended_desc)))
+                               break;
+
+                       ctx->mbim_extended_desc =
+                               (const struct usb_cdc_mbim_extended_desc *)buf;
+                       break;
+
                default:
                        break;
                }
index acfcc32b323d87c1b6f4e64788802af932f74319..8f37efd2d2fbb3ec05fcde896b8b9e88136c2f35 100644 (file)
@@ -210,7 +210,7 @@ static int vl600_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                         * (0x86dd) so Linux can understand it.
                         */
                        if ((buf->data[sizeof(*ethhdr)] & 0xf0) == 0x60)
-                               ethhdr->h_proto = __constant_htons(ETH_P_IPV6);
+                               ethhdr->h_proto = htons(ETH_P_IPV6);
                }
 
                if (count) {
index 313cb6cd4848e033bab664788c24c0a88ce55996..e3458e3c44f146653048aba99295670caabd4db5 100644 (file)
@@ -500,6 +500,13 @@ static const struct usb_device_id products[] = {
                                              USB_CDC_PROTO_NONE),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* Novatel Expedite E371 */
+               USB_DEVICE_AND_INTERFACE_INFO(0x1410, 0x9011,
+                                             USB_CLASS_COMM,
+                                             USB_CDC_SUBCLASS_ETHERNET,
+                                             USB_CDC_PROTO_NONE),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
        {       /* Dell Wireless 5800 (Novatel E362) */
                USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x8195,
                                              USB_CLASS_COMM,
index adb12f349a61fc58582d2092c49db5a9ceb7b7cf..18e12a3f7fc302399167bd32712a5ed66fe8fd36 100644 (file)
 #include <linux/list.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
 
 /* Version Information */
-#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
+#define DRIVER_VERSION "v1.06.0 (2014/03/03)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
 #define PLA_TCR0               0xe610
 #define PLA_TCR1               0xe612
 #define PLA_TXFIFO_CTRL                0xe618
-#define PLA_RSTTELLY           0xe800
+#define PLA_RSTTALLY           0xe800
 #define PLA_CR                 0xe813
 #define PLA_CRWECR             0xe81c
+#define PLA_CONFIG12           0xe81e  /* CONFIG1, CONFIG2 */
+#define PLA_CONFIG34           0xe820  /* CONFIG3, CONFIG4 */
 #define PLA_CONFIG5            0xe822
 #define PLA_PHY_PWR            0xe84c
 #define PLA_OOB_CTRL           0xe84f
@@ -69,7 +72,7 @@
 #define PLA_MISC_0             0xe858
 #define PLA_MISC_1             0xe85a
 #define PLA_OCP_GPHY_BASE      0xe86c
-#define PLA_TELLYCNT           0xe890
+#define PLA_TALLYCNT           0xe890
 #define PLA_SFF_STS_7          0xe8de
 #define PLA_PHYSTATUS          0xe908
 #define PLA_BP_BA              0xfc26
 /* PLA_TCR1 */
 #define VERSION_MASK           0x7cf0
 
+/* PLA_RSTTALLY */
+#define TALLY_RESET            0x0001
+
 /* PLA_CR */
 #define CR_RST                 0x10
 #define CR_RE                  0x08
 /* PAL_BDC_CR */
 #define ALDPS_PROXY_MODE       0x0001
 
+/* PLA_CONFIG34 */
+#define LINK_ON_WAKE_EN                0x0010
+#define LINK_OFF_WAKE_EN       0x0008
+
 /* PLA_CONFIG5 */
+#define BWF_EN                 0x0040
+#define MWF_EN                 0x0020
+#define UWF_EN                 0x0010
 #define LAN_WAKE_EN            0x0002
 
 /* PLA_LED_FEATURE */
@@ -436,6 +449,9 @@ enum rtl8152_flags {
        RTL8152_SET_RX_MODE,
        WORK_ENABLE,
        RTL8152_LINK_CHG,
+       SELECTIVE_SUSPEND,
+       PHY_RESET,
+       SCHEDULE_TASKLET,
 };
 
 /* Define these values to match your device */
@@ -449,11 +465,40 @@ enum rtl8152_flags {
 #define MCU_TYPE_PLA                   0x0100
 #define MCU_TYPE_USB                   0x0000
 
+#define REALTEK_USB_DEVICE(vend, prod) \
+       USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
+
+struct tally_counter {
+       __le64  tx_packets;
+       __le64  rx_packets;
+       __le64  tx_errors;
+       __le32  rx_errors;
+       __le16  rx_missed;
+       __le16  align_errors;
+       __le32  tx_one_collision;
+       __le32  tx_multi_collision;
+       __le64  rx_unicast;
+       __le64  rx_broadcast;
+       __le32  rx_multicast;
+       __le16  tx_aborted;
+       __le16  tx_underun;
+};
+
 struct rx_desc {
        __le32 opts1;
 #define RX_LEN_MASK                    0x7fff
+
        __le32 opts2;
+#define RD_UDP_CS                      (1 << 23)
+#define RD_TCP_CS                      (1 << 22)
+#define RD_IPV6_CS                     (1 << 20)
+#define RD_IPV4_CS                     (1 << 19)
+
        __le32 opts3;
+#define IPF                            (1 << 23) /* IP checksum fail */
+#define UDPF                           (1 << 22) /* UDP checksum fail */
+#define TCPF                           (1 << 21) /* TCP checksum fail */
+
        __le32 opts4;
        __le32 opts5;
        __le32 opts6;
@@ -463,13 +508,21 @@ struct tx_desc {
        __le32 opts1;
 #define TX_FS                  (1 << 31) /* First segment of a packet */
 #define TX_LS                  (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK            0x3ffff
+#define GTSENDV4               (1 << 28)
+#define GTSENDV6               (1 << 27)
+#define GTTCPHO_SHIFT          18
+#define GTTCPHO_MAX            0x7fU
+#define TX_LEN_MAX             0x3ffffU
 
        __le32 opts2;
 #define UDP_CS                 (1 << 31) /* Calculate UDP/IP checksum */
 #define TCP_CS                 (1 << 30) /* Calculate TCP/IP checksum */
 #define IPV4_CS                        (1 << 29) /* Calculate IPv4 checksum */
 #define IPV6_CS                        (1 << 28) /* Calculate IPv6 checksum */
+#define MSS_SHIFT              17
+#define MSS_MAX                        0x7ffU
+#define TCPHO_SHIFT            17
+#define TCPHO_MAX              0x7ffU
 };
 
 struct r8152;
@@ -511,11 +564,13 @@ struct r8152 {
                void (*init)(struct r8152 *);
                int (*enable)(struct r8152 *);
                void (*disable)(struct r8152 *);
+               void (*up)(struct r8152 *);
                void (*down)(struct r8152 *);
                void (*unload)(struct r8152 *);
        } rtl_ops;
 
        int intr_interval;
+       u32 saved_wolopts;
        u32 msg_enable;
        u32 tx_qlen;
        u16 ocp_base;
@@ -534,12 +589,21 @@ enum rtl_version {
        RTL_VER_MAX
 };
 
+enum tx_csum_stat {
+       TX_CSUM_SUCCESS = 0,
+       TX_CSUM_TSO,
+       TX_CSUM_NONE
+};
+
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  * The RTL chips use a 64 element hash table based on the Ethernet CRC.
  */
 static const int multicast_filter_limit = 32;
 static unsigned int rx_buf_sz = 16384;
 
+#define RTL_LIMITED_TSO_SIZE   (rx_buf_sz - sizeof(struct tx_desc) - \
+                                VLAN_ETH_HLEN - VLAN_HLEN)
+
 static
 int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
 {
@@ -577,6 +641,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
                               value, index, tmp, size, 500);
 
        kfree(tmp);
+
        return ret;
 }
 
@@ -862,11 +927,21 @@ static u16 sram_read(struct r8152 *tp, u16 addr)
 static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
 {
        struct r8152 *tp = netdev_priv(netdev);
+       int ret;
 
        if (phy_id != R8152_PHY_ID)
                return -EINVAL;
 
-       return r8152_mdio_read(tp, reg);
+       ret = usb_autopm_get_interface(tp->intf);
+       if (ret < 0)
+               goto out;
+
+       ret = r8152_mdio_read(tp, reg);
+
+       usb_autopm_put_interface(tp->intf);
+
+out:
+       return ret;
 }
 
 static
@@ -877,7 +952,12 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
        if (phy_id != R8152_PHY_ID)
                return;
 
+       if (usb_autopm_get_interface(tp->intf) < 0)
+               return;
+
        r8152_mdio_write(tp, reg, val);
+
+       usb_autopm_put_interface(tp->intf);
 }
 
 static
@@ -886,11 +966,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
 static inline void set_ethernet_addr(struct r8152 *tp)
 {
        struct net_device *dev = tp->netdev;
+       int ret;
        u8 node_id[8] = {0};
 
-       if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
+       if (tp->version == RTL_VER_01)
+               ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id);
+       else
+               ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id);
+
+       if (ret < 0) {
                netif_notice(tp, probe, dev, "inet addr fail\n");
-       else {
+       } else {
+               if (tp->version != RTL_VER_01) {
+                       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+                                      CRWECR_CONFIG);
+                       pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES,
+                                     sizeof(node_id), node_id);
+                       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+                                      CRWECR_NORAML);
+               }
+
                memcpy(dev->dev_addr, node_id, dev->addr_len);
                memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
        }
@@ -913,15 +1008,9 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
        return 0;
 }
 
-static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
-{
-       return &dev->stats;
-}
-
 static void read_bulk_callback(struct urb *urb)
 {
        struct net_device *netdev;
-       unsigned long flags;
        int status = urb->status;
        struct rx_agg *agg;
        struct r8152 *tp;
@@ -948,14 +1037,16 @@ static void read_bulk_callback(struct urb *urb)
        if (!netif_carrier_ok(netdev))
                return;
 
+       usb_mark_last_busy(tp->udev);
+
        switch (status) {
        case 0:
                if (urb->actual_length < ETH_ZLEN)
                        break;
 
-               spin_lock_irqsave(&tp->rx_lock, flags);
+               spin_lock(&tp->rx_lock);
                list_add_tail(&agg->list, &tp->rx_done);
-               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               spin_unlock(&tp->rx_lock);
                tasklet_schedule(&tp->tl);
                return;
        case -ESHUTDOWN:
@@ -978,9 +1069,9 @@ static void read_bulk_callback(struct urb *urb)
        if (result == -ENODEV) {
                netif_device_detach(tp->netdev);
        } else if (result) {
-               spin_lock_irqsave(&tp->rx_lock, flags);
+               spin_lock(&tp->rx_lock);
                list_add_tail(&agg->list, &tp->rx_done);
-               spin_unlock_irqrestore(&tp->rx_lock, flags);
+               spin_unlock(&tp->rx_lock);
                tasklet_schedule(&tp->tl);
        }
 }
@@ -988,7 +1079,7 @@ static void read_bulk_callback(struct urb *urb)
 static void write_bulk_callback(struct urb *urb)
 {
        struct net_device_stats *stats;
-       unsigned long flags;
+       struct net_device *netdev;
        struct tx_agg *agg;
        struct r8152 *tp;
        int status = urb->status;
@@ -1001,21 +1092,24 @@ static void write_bulk_callback(struct urb *urb)
        if (!tp)
                return;
 
-       stats = rtl8152_get_stats(tp->netdev);
+       netdev = tp->netdev;
+       stats = &netdev->stats;
        if (status) {
                if (net_ratelimit())
-                       netdev_warn(tp->netdev, "Tx status %d\n", status);
+                       netdev_warn(netdev, "Tx status %d\n", status);
                stats->tx_errors += agg->skb_num;
        } else {
                stats->tx_packets += agg->skb_num;
                stats->tx_bytes += agg->skb_len;
        }
 
-       spin_lock_irqsave(&tp->tx_lock, flags);
+       spin_lock(&tp->tx_lock);
        list_add_tail(&agg->list, &tp->tx_free);
-       spin_unlock_irqrestore(&tp->tx_lock, flags);
+       spin_unlock(&tp->tx_lock);
 
-       if (!netif_carrier_ok(tp->netdev))
+       usb_autopm_put_interface_async(tp->intf);
+
+       if (!netif_carrier_ok(netdev))
                return;
 
        if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1220,6 +1314,9 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
        struct tx_agg *agg = NULL;
        unsigned long flags;
 
+       if (list_empty(&tp->tx_free))
+               return NULL;
+
        spin_lock_irqsave(&tp->tx_lock, flags);
        if (!list_empty(&tp->tx_free)) {
                struct list_head *cursor;
@@ -1233,24 +1330,138 @@ static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
        return agg;
 }
 
-static void
-r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+static inline __be16 get_protocol(struct sk_buff *skb)
+{
+       __be16 protocol;
+
+       if (skb->protocol == htons(ETH_P_8021Q))
+               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+       else
+               protocol = skb->protocol;
+
+       return protocol;
+}
+
+/*
+ * r8152_csum_workaround()
+ * The hw limites the value the transport offset. When the offset is out of the
+ * range, calculate the checksum by sw.
+ */
+static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
+                                 struct sk_buff_head *list)
+{
+       if (skb_shinfo(skb)->gso_size) {
+               netdev_features_t features = tp->netdev->features;
+               struct sk_buff_head seg_list;
+               struct sk_buff *segs, *nskb;
+
+               features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+               segs = skb_gso_segment(skb, features);
+               if (IS_ERR(segs) || !segs)
+                       goto drop;
+
+               __skb_queue_head_init(&seg_list);
+
+               do {
+                       nskb = segs;
+                       segs = segs->next;
+                       nskb->next = NULL;
+                       __skb_queue_tail(&seg_list, nskb);
+               } while (segs);
+
+               skb_queue_splice(&seg_list, list);
+               dev_kfree_skb(skb);
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (skb_checksum_help(skb) < 0)
+                       goto drop;
+
+               __skb_queue_head(list, skb);
+       } else {
+               struct net_device_stats *stats;
+
+drop:
+               stats = &tp->netdev->stats;
+               stats->tx_dropped++;
+               dev_kfree_skb(skb);
+       }
+}
+
+/*
+ * msdn_giant_send_check()
+ * According to the document of microsoft, the TCP Pseudo Header excludes the
+ * packet length for IPv6 TCP large packets.
+ */
+static int msdn_giant_send_check(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h;
+       struct tcphdr *th;
+       int ret;
+
+       ret = skb_cow_head(skb, 0);
+       if (ret)
+               return ret;
+
+       ipv6h = ipv6_hdr(skb);
+       th = tcp_hdr(skb);
+
+       th->check = 0;
+       th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+
+       return ret;
+}
+
+static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,
+                        struct sk_buff *skb, u32 len, u32 transport_offset)
 {
-       memset(desc, 0, sizeof(*desc));
+       u32 mss = skb_shinfo(skb)->gso_size;
+       u32 opts1, opts2 = 0;
+       int ret = TX_CSUM_SUCCESS;
+
+       WARN_ON_ONCE(len > TX_LEN_MAX);
+
+       opts1 = len | TX_FS | TX_LS;
+
+       if (mss) {
+               if (transport_offset > GTTCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->netdev,
+                                  "Invalid transport offset 0x%x for TSO\n",
+                                  transport_offset);
+                       ret = TX_CSUM_TSO;
+                       goto unavailable;
+               }
 
-       desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+               switch (get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       opts1 |= GTSENDV4;
+                       break;
+
+               case htons(ETH_P_IPV6):
+                       if (msdn_giant_send_check(skb)) {
+                               ret = TX_CSUM_TSO;
+                               goto unavailable;
+                       }
+                       opts1 |= GTSENDV6;
+                       break;
+
+               default:
+                       WARN_ON_ONCE(1);
+                       break;
+               }
 
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               __be16 protocol;
+               opts1 |= transport_offset << GTTCPHO_SHIFT;
+               opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 ip_protocol;
-               u32 opts2 = 0;
 
-               if (skb->protocol == htons(ETH_P_8021Q))
-                       protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
-               else
-                       protocol = skb->protocol;
+               if (transport_offset > TCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->netdev,
+                                  "Invalid transport offset 0x%x\n",
+                                  transport_offset);
+                       ret = TX_CSUM_NONE;
+                       goto unavailable;
+               }
 
-               switch (protocol) {
+               switch (get_protocol(skb)) {
                case htons(ETH_P_IP):
                        opts2 |= IPV4_CS;
                        ip_protocol = ip_hdr(skb)->protocol;
@@ -1266,24 +1477,34 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
                        break;
                }
 
-               if (ip_protocol == IPPROTO_TCP) {
+               if (ip_protocol == IPPROTO_TCP)
                        opts2 |= TCP_CS;
-                       opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
-               } else if (ip_protocol == IPPROTO_UDP) {
+               else if (ip_protocol == IPPROTO_UDP)
                        opts2 |= UDP_CS;
-               } else {
+               else
                        WARN_ON_ONCE(1);
-               }
 
-               desc->opts2 = cpu_to_le32(opts2);
+               opts2 |= transport_offset << TCPHO_SHIFT;
        }
+
+       desc->opts2 = cpu_to_le32(opts2);
+       desc->opts1 = cpu_to_le32(opts1);
+
+unavailable:
+       return ret;
 }
 
 static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
 {
-       int remain;
+       struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+       int remain, ret;
        u8 *tx_data;
 
+       __skb_queue_head_init(&skb_head);
+       spin_lock(&tx_queue->lock);
+       skb_queue_splice_init(tx_queue, &skb_head);
+       spin_unlock(&tx_queue->lock);
+
        tx_data = agg->head;
        agg->skb_num = agg->skb_len = 0;
        remain = rx_buf_sz;
@@ -1292,32 +1513,56 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
                struct tx_desc *tx_desc;
                struct sk_buff *skb;
                unsigned int len;
+               u32 offset;
 
-               skb = skb_dequeue(&tp->tx_queue);
+               skb = __skb_dequeue(&skb_head);
                if (!skb)
                        break;
 
-               remain -= sizeof(*tx_desc);
-               len = skb->len;
-               if (remain < len) {
-                       skb_queue_head(&tp->tx_queue, skb);
+               len = skb->len + sizeof(*tx_desc);
+
+               if (len > remain) {
+                       __skb_queue_head(&skb_head, skb);
                        break;
                }
 
                tx_data = tx_agg_align(tx_data);
                tx_desc = (struct tx_desc *)tx_data;
+
+               offset = (u32)skb_transport_offset(skb);
+
+               if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) {
+                       r8152_csum_workaround(tp, skb, &skb_head);
+                       continue;
+               }
+
                tx_data += sizeof(*tx_desc);
 
-               r8152_tx_csum(tp, tx_desc, skb);
-               memcpy(tx_data, skb->data, len);
-               agg->skb_num++;
+               len = skb->len;
+               if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
+                       struct net_device_stats *stats = &tp->netdev->stats;
+
+                       stats->tx_dropped++;
+                       dev_kfree_skb_any(skb);
+                       tx_data -= sizeof(*tx_desc);
+                       continue;
+               }
+
+               tx_data += len;
                agg->skb_len += len;
+               agg->skb_num++;
+
                dev_kfree_skb_any(skb);
 
-               tx_data += len;
                remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
        }
 
+       if (!skb_queue_empty(&skb_head)) {
+               spin_lock(&tx_queue->lock);
+               skb_queue_splice(&skb_head, tx_queue);
+               spin_unlock(&tx_queue->lock);
+       }
+
        netif_tx_lock(tp->netdev);
 
        if (netif_queue_stopped(tp->netdev) &&
@@ -1326,20 +1571,67 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
 
        netif_tx_unlock(tp->netdev);
 
+       ret = usb_autopm_get_interface_async(tp->intf);
+       if (ret < 0)
+               goto out_tx_fill;
+
        usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
                          agg->head, (int)(tx_data - (u8 *)agg->head),
                          (usb_complete_t)write_bulk_callback, agg);
 
-       return usb_submit_urb(agg->urb, GFP_ATOMIC);
+       ret = usb_submit_urb(agg->urb, GFP_ATOMIC);
+       if (ret < 0)
+               usb_autopm_put_interface_async(tp->intf);
+
+out_tx_fill:
+       return ret;
+}
+
+static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
+{
+       u8 checksum = CHECKSUM_NONE;
+       u32 opts2, opts3;
+
+       if (tp->version == RTL_VER_01)
+               goto return_result;
+
+       opts2 = le32_to_cpu(rx_desc->opts2);
+       opts3 = le32_to_cpu(rx_desc->opts3);
+
+       if (opts2 & RD_IPV4_CS) {
+               if (opts3 & IPF)
+                       checksum = CHECKSUM_NONE;
+               else if ((opts2 & RD_UDP_CS) && (opts3 & UDPF))
+                       checksum = CHECKSUM_NONE;
+               else if ((opts2 & RD_TCP_CS) && (opts3 & TCPF))
+                       checksum = CHECKSUM_NONE;
+               else
+                       checksum = CHECKSUM_UNNECESSARY;
+       } else if (RD_IPV6_CS) {
+               if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF))
+                       checksum = CHECKSUM_UNNECESSARY;
+               else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF))
+                       checksum = CHECKSUM_UNNECESSARY;
+       }
+
+return_result:
+       return checksum;
 }
 
 static void rx_bottom(struct r8152 *tp)
 {
        unsigned long flags;
-       struct list_head *cursor, *next;
+       struct list_head *cursor, *next, rx_queue;
+
+       if (list_empty(&tp->rx_done))
+               return;
 
+       INIT_LIST_HEAD(&rx_queue);
        spin_lock_irqsave(&tp->rx_lock, flags);
-       list_for_each_safe(cursor, next, &tp->rx_done) {
+       list_splice_init(&tp->rx_done, &rx_queue);
+       spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+       list_for_each_safe(cursor, next, &rx_queue) {
                struct rx_desc *rx_desc;
                struct rx_agg *agg;
                int len_used = 0;
@@ -1348,7 +1640,6 @@ static void rx_bottom(struct r8152 *tp)
                int ret;
 
                list_del_init(cursor);
-               spin_unlock_irqrestore(&tp->rx_lock, flags);
 
                agg = list_entry(cursor, struct rx_agg, list);
                urb = agg->urb;
@@ -1361,7 +1652,7 @@ static void rx_bottom(struct r8152 *tp)
 
                while (urb->actual_length > len_used) {
                        struct net_device *netdev = tp->netdev;
-                       struct net_device_stats *stats;
+                       struct net_device_stats *stats = &netdev->stats;
                        unsigned int pkt_len;
                        struct sk_buff *skb;
 
@@ -1373,23 +1664,24 @@ static void rx_bottom(struct r8152 *tp)
                        if (urb->actual_length < len_used)
                                break;
 
-                       stats = rtl8152_get_stats(netdev);
-
                        pkt_len -= CRC_SIZE;
                        rx_data += sizeof(struct rx_desc);
 
                        skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
                        if (!skb) {
                                stats->rx_dropped++;
-                               break;
+                               goto find_next_rx;
                        }
+
+                       skb->ip_summed = r8152_rx_csum(tp, rx_desc);
                        memcpy(skb->data, rx_data, pkt_len);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, netdev);
-                       netif_rx(skb);
+                       netif_receive_skb(skb);
                        stats->rx_packets++;
                        stats->rx_bytes += pkt_len;
 
+find_next_rx:
                        rx_data = rx_agg_align(rx_data + pkt_len + CRC_SIZE);
                        rx_desc = (struct rx_desc *)rx_data;
                        len_used = (int)(rx_data - (u8 *)agg->head);
@@ -1398,13 +1690,13 @@ static void rx_bottom(struct r8152 *tp)
 
 submit:
                ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
-               spin_lock_irqsave(&tp->rx_lock, flags);
                if (ret && ret != -ENODEV) {
-                       list_add_tail(&agg->list, next);
+                       spin_lock_irqsave(&tp->rx_lock, flags);
+                       list_add_tail(&agg->list, &tp->rx_done);
+                       spin_unlock_irqrestore(&tp->rx_lock, flags);
                        tasklet_schedule(&tp->tl);
                }
        }
-       spin_unlock_irqrestore(&tp->rx_lock, flags);
 }
 
 static void tx_bottom(struct r8152 *tp)
@@ -1423,19 +1715,18 @@ static void tx_bottom(struct r8152 *tp)
 
                res = r8152_tx_agg_fill(tp, agg);
                if (res) {
-                       struct net_device_stats *stats;
-                       struct net_device *netdev;
-                       unsigned long flags;
-
-                       netdev = tp->netdev;
-                       stats = rtl8152_get_stats(netdev);
+                       struct net_device *netdev = tp->netdev;
 
                        if (res == -ENODEV) {
                                netif_device_detach(netdev);
                        } else {
+                               struct net_device_stats *stats = &netdev->stats;
+                               unsigned long flags;
+
                                netif_warn(tp, tx_err, netdev,
                                           "failed tx_urb %d\n", res);
                                stats->tx_dropped += agg->skb_num;
+
                                spin_lock_irqsave(&tp->tx_lock, flags);
                                list_add_tail(&agg->list, &tp->tx_free);
                                spin_unlock_irqrestore(&tp->tx_lock, flags);
@@ -1475,6 +1766,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
        return usb_submit_urb(agg->urb, mem_flags);
 }
 
+static void rtl_drop_queued_tx(struct r8152 *tp)
+{
+       struct net_device_stats *stats = &tp->netdev->stats;
+       struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+       struct sk_buff *skb;
+
+       if (skb_queue_empty(tx_queue))
+               return;
+
+       __skb_queue_head_init(&skb_head);
+       spin_lock_bh(&tx_queue->lock);
+       skb_queue_splice_init(tx_queue, &skb_head);
+       spin_unlock_bh(&tx_queue->lock);
+
+       while ((skb = __skb_dequeue(&skb_head))) {
+               dev_kfree_skb(skb);
+               stats->tx_dropped++;
+       }
+}
+
 static void rtl8152_tx_timeout(struct net_device *netdev)
 {
        struct r8152 *tp = netdev_priv(netdev);
@@ -1538,7 +1849,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
 }
 
 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
-                                           struct net_device *netdev)
+                                       struct net_device *netdev)
 {
        struct r8152 *tp = netdev_priv(netdev);
 
@@ -1546,13 +1857,17 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
 
        skb_queue_tail(&tp->tx_queue, skb);
 
-       if (list_empty(&tp->tx_free) &&
-           skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
+       if (!list_empty(&tp->tx_free)) {
+               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+                       set_bit(SCHEDULE_TASKLET, &tp->flags);
+                       schedule_delayed_work(&tp->schedule, 0);
+               } else {
+                       usb_mark_last_busy(tp->udev);
+                       tasklet_schedule(&tp->tl);
+               }
+       } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen)
                netif_stop_queue(netdev);
 
-       if (!list_empty(&tp->tx_free))
-               tasklet_schedule(&tp->tl);
-
        return NETDEV_TX_OK;
 }
 
@@ -1610,6 +1925,18 @@ static void rtl_set_eee_plus(struct r8152 *tp)
        }
 }
 
+static void rxdy_gated_en(struct r8152 *tp, bool enable)
+{
+       u32 ocp_data;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+       if (enable)
+               ocp_data |= RXDY_GATED_EN;
+       else
+               ocp_data &= ~RXDY_GATED_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+}
+
 static int rtl_enable(struct r8152 *tp)
 {
        u32 ocp_data;
@@ -1621,9 +1948,7 @@ static int rtl_enable(struct r8152 *tp)
        ocp_data |= CR_RE | CR_TE;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
-       ocp_data &= ~RXDY_GATED_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+       rxdy_gated_en(tp, false);
 
        INIT_LIST_HEAD(&tp->rx_done);
        ret = 0;
@@ -1678,8 +2003,6 @@ static int rtl8153_enable(struct r8152 *tp)
 
 static void rtl8152_disable(struct r8152 *tp)
 {
-       struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
-       struct sk_buff *skb;
        u32 ocp_data;
        int i;
 
@@ -1687,17 +2010,12 @@ static void rtl8152_disable(struct r8152 *tp)
        ocp_data &= ~RCR_ACPT_ALL;
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
 
-       while ((skb = skb_dequeue(&tp->tx_queue))) {
-               dev_kfree_skb(skb);
-               stats->tx_dropped++;
-       }
+       rtl_drop_queued_tx(tp);
 
        for (i = 0; i < RTL8152_MAX_TX; i++)
                usb_kill_urb(tp->tx_info[i].urb);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
-       ocp_data |= RXDY_GATED_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+       rxdy_gated_en(tp, true);
 
        for (i = 0; i < 1000; i++) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -1718,80 +2036,271 @@ static void rtl8152_disable(struct r8152 *tp)
        rtl8152_nic_reset(tp);
 }
 
-static void r8152b_exit_oob(struct r8152 *tp)
+static void r8152_power_cut_en(struct r8152 *tp, bool enable)
 {
-       u32     ocp_data;
-       int     i;
+       u32 ocp_data;
 
-       ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
-       ocp_data &= ~RCR_ACPT_ALL;
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+       if (enable)
+               ocp_data |= POWER_CUT;
+       else
+               ocp_data &= ~POWER_CUT;
+       ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
-       ocp_data |= RXDY_GATED_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+       ocp_data &= ~RESUME_INDICATE;
+       ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+}
 
-       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
-       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 
-       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
-       ocp_data &= ~NOW_IS_OOB;
-       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+static u32 __rtl_get_wol(struct r8152 *tp)
+{
+       u32 ocp_data;
+       u32 wolopts = 0;
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
-       ocp_data &= ~MCU_BORW_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+       if (!(ocp_data & LAN_WAKE_EN))
+               return 0;
 
-       for (i = 0; i < 1000; i++) {
-               ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
-               if (ocp_data & LINK_LIST_READY)
-                       break;
-               mdelay(1);
-       }
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+       if (ocp_data & LINK_ON_WAKE_EN)
+               wolopts |= WAKE_PHY;
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
-       ocp_data |= RE_INIT_LL;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+       if (ocp_data & UWF_EN)
+               wolopts |= WAKE_UCAST;
+       if (ocp_data & BWF_EN)
+               wolopts |= WAKE_BCAST;
+       if (ocp_data & MWF_EN)
+               wolopts |= WAKE_MCAST;
 
-       for (i = 0; i < 1000; i++) {
-               ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
-               if (ocp_data & LINK_LIST_READY)
-                       break;
-               mdelay(1);
-       }
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+       if (ocp_data & MAGIC_EN)
+               wolopts |= WAKE_MAGIC;
 
-       rtl8152_nic_reset(tp);
+       return wolopts;
+}
 
-       /* rx share fifo credit full threshold */
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
+static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
+{
+       u32 ocp_data;
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
-       ocp_data &= STAT_SPEED_MASK;
-       if (ocp_data == STAT_SPEED_FULL) {
-               /* rx share fifo credit near full threshold */
-               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
-                               RXFIFO_THR2_FULL);
-               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
-                               RXFIFO_THR3_FULL);
-       } else {
-               /* rx share fifo credit near full threshold */
-               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
-                               RXFIFO_THR2_HIGH);
-               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
-                               RXFIFO_THR3_HIGH);
-       }
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
 
-       /* TX share fifo free credit full threshold */
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+       ocp_data &= ~LINK_ON_WAKE_EN;
+       if (wolopts & WAKE_PHY)
+               ocp_data |= LINK_ON_WAKE_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+       ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+       if (wolopts & WAKE_UCAST)
+               ocp_data |= UWF_EN;
+       if (wolopts & WAKE_BCAST)
+               ocp_data |= BWF_EN;
+       if (wolopts & WAKE_MCAST)
+               ocp_data |= MWF_EN;
+       if (wolopts & WAKE_ANY)
+               ocp_data |= LAN_WAKE_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
 
-       ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
-                       TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
-       ocp_data &= ~CPCR_RX_VLAN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+       ocp_data &= ~MAGIC_EN;
+       if (wolopts & WAKE_MAGIC)
+               ocp_data |= MAGIC_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+       if (wolopts & WAKE_ANY)
+               device_set_wakeup_enable(&tp->udev->dev, true);
+       else
+               device_set_wakeup_enable(&tp->udev->dev, false);
+}
+
+static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
+{
+       if (enable) {
+               u32 ocp_data;
+
+               __rtl_set_wol(tp, WAKE_ANY);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+               ocp_data |= LINK_OFF_WAKE_EN;
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+       } else {
+               __rtl_set_wol(tp, tp->saved_wolopts);
+       }
+}
+
+static void rtl_phy_reset(struct r8152 *tp)
+{
+       u16 data;
+       int i;
+
+       clear_bit(PHY_RESET, &tp->flags);
+
+       data = r8152_mdio_read(tp, MII_BMCR);
+
+       /* don't reset again before the previous one complete */
+       if (data & BMCR_RESET)
+               return;
+
+       data |= BMCR_RESET;
+       r8152_mdio_write(tp, MII_BMCR, data);
+
+       for (i = 0; i < 50; i++) {
+               msleep(20);
+               if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+                       break;
+       }
+}
+
+static void rtl_clear_bp(struct r8152 *tp)
+{
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
+       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
+       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
+       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
+       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
+       mdelay(3);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
+       ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
+}
+
+static void r8153_clear_bp(struct r8152 *tp)
+{
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+       ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+       rtl_clear_bp(tp);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+       u32 ocp_data;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+       ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8152b_disable_aldps(struct r8152 *tp)
+{
+       ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
+       msleep(20);
+}
+
+static inline void r8152b_enable_aldps(struct r8152 *tp)
+{
+       ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
+                                           LINKENA | DIS_SDSAVE);
+}
+
+static void r8152b_hw_phy_cfg(struct r8152 *tp)
+{
+       u16 data;
+
+       data = r8152_mdio_read(tp, MII_BMCR);
+       if (data & BMCR_PDOWN) {
+               data &= ~BMCR_PDOWN;
+               r8152_mdio_write(tp, MII_BMCR, data);
+       }
+
+       r8152b_disable_aldps(tp);
+
+       rtl_clear_bp(tp);
+
+       r8152b_enable_aldps(tp);
+       set_bit(PHY_RESET, &tp->flags);
+}
+
+static void r8152b_exit_oob(struct r8152 *tp)
+{
+       u32 ocp_data;
+       int i;
+
+       ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+       ocp_data &= ~RCR_ACPT_ALL;
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+
+       rxdy_gated_en(tp, true);
+       r8153_teredo_off(tp);
+       r8152b_hw_phy_cfg(tp);
+
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
+
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+       ocp_data &= ~NOW_IS_OOB;
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+       ocp_data &= ~MCU_BORW_EN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+       for (i = 0; i < 1000; i++) {
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+               if (ocp_data & LINK_LIST_READY)
+                       break;
+               mdelay(1);
+       }
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7);
+       ocp_data |= RE_INIT_LL;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data);
+
+       for (i = 0; i < 1000; i++) {
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
+               if (ocp_data & LINK_LIST_READY)
+                       break;
+               mdelay(1);
+       }
+
+       rtl8152_nic_reset(tp);
+
+       /* rx share fifo credit full threshold */
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
+       ocp_data &= STAT_SPEED_MASK;
+       if (ocp_data == STAT_SPEED_FULL) {
+               /* rx share fifo credit near full threshold */
+               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
+                               RXFIFO_THR2_FULL);
+               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
+                               RXFIFO_THR3_FULL);
+       } else {
+               /* rx share fifo credit near full threshold */
+               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
+                               RXFIFO_THR2_HIGH);
+               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2,
+                               RXFIFO_THR3_HIGH);
+       }
+
+       /* TX share fifo free credit full threshold */
+       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL);
+
+       ocp_write_byte(tp, MCU_TYPE_USB, USB_TX_AGG, TX_AGG_MAX_THRESHOLD);
+       ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_HIGH);
+       ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA,
+                       TEST_MODE_DISABLE | TX_SIZE_ADJUST1);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
+       ocp_data &= ~CPCR_RX_VLAN;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
 
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
 
@@ -1835,10 +2344,6 @@ static void r8152b_enter_oob(struct r8152 *tp)
 
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
-       ocp_data |= MAGIC_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
        ocp_data |= CPCR_RX_VLAN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
@@ -1851,36 +2356,26 @@ static void r8152b_enter_oob(struct r8152 *tp)
        ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
-       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
-       ocp_data &= ~RXDY_GATED_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+       rxdy_gated_en(tp, false);
 
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
        ocp_data |= RCR_APM | RCR_AM | RCR_AB;
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
 }
 
-static void r8152b_disable_aldps(struct r8152 *tp)
-{
-       ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
-       msleep(20);
-}
-
-static inline void r8152b_enable_aldps(struct r8152 *tp)
-{
-       ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
-                                           LINKENA | DIS_SDSAVE);
-}
-
 static void r8153_hw_phy_cfg(struct r8152 *tp)
 {
        u32 ocp_data;
        u16 data;
 
        ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
-       r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+       data = r8152_mdio_read(tp, MII_BMCR);
+       if (data & BMCR_PDOWN) {
+               data &= ~BMCR_PDOWN;
+               r8152_mdio_write(tp, MII_BMCR, data);
+       }
+
+       r8153_clear_bp(tp);
 
        if (tp->version == RTL_VER_03) {
                data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -1916,9 +2411,11 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        data = sram_read(tp, SRAM_10M_AMP2);
        data |= AMP_DN;
        sram_write(tp, SRAM_10M_AMP2, data);
+
+       set_bit(PHY_RESET, &tp->flags);
 }
 
-static void r8153_u1u2en(struct r8152 *tp, int enable)
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
 {
        u8 u1u2[8];
 
@@ -1930,7 +2427,7 @@ static void r8153_u1u2en(struct r8152 *tp, int enable)
        usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
 }
 
-static void r8153_u2p3en(struct r8152 *tp, int enable)
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
 {
        u32 ocp_data;
 
@@ -1942,7 +2439,7 @@ static void r8153_u2p3en(struct r8152 *tp, int enable)
        ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
 }
 
-static void r8153_power_cut_en(struct r8152 *tp, int enable)
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
 {
        u32 ocp_data;
 
@@ -1958,28 +2455,12 @@ static void r8153_power_cut_en(struct r8152 *tp, int enable)
        ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
 }
 
-static void r8153_teredo_off(struct r8152 *tp)
-{
-       u32 ocp_data;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
-       ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
-
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
-}
-
 static void r8153_first_init(struct r8152 *tp)
 {
        u32 ocp_data;
        int i;
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
-       ocp_data |= RXDY_GATED_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
-
+       rxdy_gated_en(tp, true);
        r8153_teredo_off(tp);
 
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -2072,10 +2553,6 @@ static void r8153_enter_oob(struct r8152 *tp)
 
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
-       ocp_data |= MAGIC_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
        ocp_data &= ~TEREDO_WAKE_MASK;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
@@ -2092,11 +2569,7 @@ static void r8153_enter_oob(struct r8152 *tp)
        ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
 
-       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
-       ocp_data &= ~RXDY_GATED_EN;
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+       rxdy_gated_en(tp, false);
 
        ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
        ocp_data |= RCR_APM | RCR_AM | RCR_AB;
@@ -2187,12 +2660,26 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
                bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
        }
 
+       if (test_bit(PHY_RESET, &tp->flags))
+               bmcr |= BMCR_RESET;
+
        if (tp->mii.supports_gmii)
                r8152_mdio_write(tp, MII_CTRL1000, gbcr);
 
        r8152_mdio_write(tp, MII_ADVERTISE, anar);
        r8152_mdio_write(tp, MII_BMCR, bmcr);
 
+       if (test_bit(PHY_RESET, &tp->flags)) {
+               int i;
+
+               clear_bit(PHY_RESET, &tp->flags);
+               for (i = 0; i < 50; i++) {
+                       msleep(20);
+                       if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+                               break;
+               }
+       }
+
 out:
 
        return ret;
@@ -2200,12 +2687,7 @@ out:
 
 static void rtl8152_down(struct r8152 *tp)
 {
-       u32     ocp_data;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
-       ocp_data &= ~POWER_CUT;
-       ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
+       r8152_power_cut_en(tp, false);
        r8152b_disable_aldps(tp);
        r8152b_enter_oob(tp);
        r8152b_enable_aldps(tp);
@@ -2213,8 +2695,8 @@ static void rtl8152_down(struct r8152 *tp)
 
 static void rtl8153_down(struct r8152 *tp)
 {
-       r8153_u1u2en(tp, 0);
-       r8153_power_cut_en(tp, 0);
+       r8153_u1u2en(tp, false);
+       r8153_power_cut_en(tp, false);
        r8153_disable_aldps(tp);
        r8153_enter_oob(tp);
        r8153_enable_aldps(tp);
@@ -2249,6 +2731,9 @@ static void rtl_work_func_t(struct work_struct *work)
 {
        struct r8152 *tp = container_of(work, struct r8152, schedule.work);
 
+       if (usb_autopm_get_interface(tp->intf) < 0)
+               return;
+
        if (!test_bit(WORK_ENABLE, &tp->flags))
                goto out1;
 
@@ -2261,8 +2746,17 @@ static void rtl_work_func_t(struct work_struct *work)
        if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
                _rtl8152_set_rx_mode(tp->netdev);
 
+       if (test_bit(SCHEDULE_TASKLET, &tp->flags) &&
+           (tp->speed & LINK_STATUS)) {
+               clear_bit(SCHEDULE_TASKLET, &tp->flags);
+               tasklet_schedule(&tp->tl);
+       }
+
+       if (test_bit(PHY_RESET, &tp->flags))
+               rtl_phy_reset(tp);
+
 out1:
-       return;
+       usb_autopm_put_interface(tp->intf);
 }
 
 static int rtl8152_open(struct net_device *netdev)
@@ -2270,6 +2764,27 @@ static int rtl8152_open(struct net_device *netdev)
        struct r8152 *tp = netdev_priv(netdev);
        int res = 0;
 
+       res = alloc_all_mem(tp);
+       if (res)
+               goto out;
+
+       res = usb_autopm_get_interface(tp->intf);
+       if (res < 0) {
+               free_all_mem(tp);
+               goto out;
+       }
+
+       /* The WORK_ENABLE may be set when autoresume occurs */
+       if (test_bit(WORK_ENABLE, &tp->flags)) {
+               clear_bit(WORK_ENABLE, &tp->flags);
+               usb_kill_urb(tp->intr_urb);
+               cancel_delayed_work_sync(&tp->schedule);
+               if (tp->speed & LINK_STATUS)
+                       tp->rtl_ops.disable(tp);
+       }
+
+       tp->rtl_ops.up(tp);
+
        rtl8152_set_speed(tp, AUTONEG_ENABLE,
                          tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
                          DUPLEX_FULL);
@@ -2277,15 +2792,19 @@ static int rtl8152_open(struct net_device *netdev)
        netif_carrier_off(netdev);
        netif_start_queue(netdev);
        set_bit(WORK_ENABLE, &tp->flags);
+
        res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
        if (res) {
                if (res == -ENODEV)
                        netif_device_detach(tp->netdev);
                netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
                           res);
+               free_all_mem(tp);
        }
 
+       usb_autopm_put_interface(tp->intf);
 
+out:
        return res;
 }
 
@@ -2298,33 +2817,30 @@ static int rtl8152_close(struct net_device *netdev)
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
        netif_stop_queue(netdev);
-       tasklet_disable(&tp->tl);
-       tp->rtl_ops.disable(tp);
-       tasklet_enable(&tp->tl);
 
-       return res;
-}
+       res = usb_autopm_get_interface(tp->intf);
+       if (res < 0) {
+               rtl_drop_queued_tx(tp);
+       } else {
+               /*
+                * The autosuspend may have been enabled and wouldn't
+                * be disable when autoresume occurs, because the
+                * netif_running() would be false.
+                */
+               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+                       rtl_runtime_suspend_enable(tp, false);
+                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+               }
 
-static void rtl_clear_bp(struct r8152 *tp)
-{
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
-       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
-       ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
-       mdelay(3);
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
-       ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
-}
+               tasklet_disable(&tp->tl);
+               tp->rtl_ops.down(tp);
+               tasklet_enable(&tp->tl);
+               usb_autopm_put_interface(tp->intf);
+       }
 
-static void r8153_clear_bp(struct r8152 *tp)
-{
-       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
-       ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
-       rtl_clear_bp(tp);
+       free_all_mem(tp);
+
+       return res;
 }
 
 static void r8152b_enable_eee(struct r8152 *tp)
@@ -2375,18 +2891,18 @@ static void r8152b_enable_fc(struct r8152 *tp)
        r8152_mdio_write(tp, MII_ADVERTISE, anar);
 }
 
-static void r8152b_hw_phy_cfg(struct r8152 *tp)
+static void rtl_tally_reset(struct r8152 *tp)
 {
-       r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
-       r8152b_disable_aldps(tp);
+       u32 ocp_data;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY);
+       ocp_data |= TALLY_RESET;
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RSTTALLY, ocp_data);
 }
 
 static void r8152b_init(struct r8152 *tp)
 {
        u32 ocp_data;
-       int i;
-
-       rtl_clear_bp(tp);
 
        if (tp->version == RTL_VER_01) {
                ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
@@ -2394,17 +2910,7 @@ static void r8152b_init(struct r8152 *tp)
                ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
        }
 
-       r8152b_hw_phy_cfg(tp);
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
-       ocp_data &= ~POWER_CUT;
-       ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
-       ocp_data &= ~RESUME_INDICATE;
-       ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-
-       r8152b_exit_oob(tp);
+       r8152_power_cut_en(tp, false);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
        ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
@@ -2420,14 +2926,7 @@ static void r8152b_init(struct r8152 *tp)
        r8152b_enable_eee(tp);
        r8152b_enable_aldps(tp);
        r8152b_enable_fc(tp);
-
-       r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
-                                      BMCR_ANRESTART);
-       for (i = 0; i < 100; i++) {
-               udelay(100);
-               if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
-                       break;
-       }
+       rtl_tally_reset(tp);
 
        /* enable rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
@@ -2440,7 +2939,7 @@ static void r8153_init(struct r8152 *tp)
        u32 ocp_data;
        int i;
 
-       r8153_u1u2en(tp, 0);
+       r8153_u1u2en(tp, false);
 
        for (i = 0; i < 500; i++) {
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
@@ -2456,14 +2955,12 @@ static void r8153_init(struct r8152 *tp)
                msleep(20);
        }
 
-       r8153_u2p3en(tp, 0);
+       r8153_u2p3en(tp, false);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
        ocp_data &= ~TIMER11_EN;
        ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
 
-       r8153_clear_bp(tp);
-
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
        ocp_data &= ~LED_MODE_MASK;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
@@ -2481,10 +2978,8 @@ static void r8153_init(struct r8152 *tp)
        ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
        ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
 
-       r8153_power_cut_en(tp, 0);
-       r8153_u1u2en(tp, 1);
-
-       r8153_first_init(tp);
+       r8153_power_cut_en(tp, false);
+       r8153_u1u2en(tp, true);
 
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
@@ -2499,26 +2994,31 @@ static void r8153_init(struct r8152 *tp)
        r8153_enable_eee(tp);
        r8153_enable_aldps(tp);
        r8152b_enable_fc(tp);
-
-       r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
-                                      BMCR_ANRESTART);
+       rtl_tally_reset(tp);
 }
 
 static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct r8152 *tp = usb_get_intfdata(intf);
 
-       netif_device_detach(tp->netdev);
+       if (PMSG_IS_AUTO(message))
+               set_bit(SELECTIVE_SUSPEND, &tp->flags);
+       else
+               netif_device_detach(tp->netdev);
 
        if (netif_running(tp->netdev)) {
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                cancel_delayed_work_sync(&tp->schedule);
-               tasklet_disable(&tp->tl);
+               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+                       rtl_runtime_suspend_enable(tp, true);
+               } else {
+                       tasklet_disable(&tp->tl);
+                       tp->rtl_ops.down(tp);
+                       tasklet_enable(&tp->tl);
+               }
        }
 
-       tp->rtl_ops.down(tp);
-
        return 0;
 }
 
@@ -2526,22 +3026,77 @@ static int rtl8152_resume(struct usb_interface *intf)
 {
        struct r8152 *tp = usb_get_intfdata(intf);
 
-       tp->rtl_ops.init(tp);
-       netif_device_attach(tp->netdev);
+       if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+               tp->rtl_ops.init(tp);
+               netif_device_attach(tp->netdev);
+       }
+
        if (netif_running(tp->netdev)) {
-               rtl8152_set_speed(tp, AUTONEG_ENABLE,
+               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+                       rtl_runtime_suspend_enable(tp, false);
+                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                       if (tp->speed & LINK_STATUS)
+                               tp->rtl_ops.disable(tp);
+               } else {
+                       tp->rtl_ops.up(tp);
+                       rtl8152_set_speed(tp, AUTONEG_ENABLE,
                                tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
                                DUPLEX_FULL);
+               }
                tp->speed = 0;
                netif_carrier_off(tp->netdev);
                set_bit(WORK_ENABLE, &tp->flags);
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
-               tasklet_enable(&tp->tl);
        }
 
        return 0;
 }
 
+static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct r8152 *tp = netdev_priv(dev);
+
+       if (usb_autopm_get_interface(tp->intf) < 0)
+               return;
+
+       wol->supported = WAKE_ANY;
+       wol->wolopts = __rtl_get_wol(tp);
+
+       usb_autopm_put_interface(tp->intf);
+}
+
+static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct r8152 *tp = netdev_priv(dev);
+       int ret;
+
+       ret = usb_autopm_get_interface(tp->intf);
+       if (ret < 0)
+               goto out_set_wol;
+
+       __rtl_set_wol(tp, wol->wolopts);
+       tp->saved_wolopts = wol->wolopts & WAKE_ANY;
+
+       usb_autopm_put_interface(tp->intf);
+
+out_set_wol:
+       return ret;
+}
+
+static u32 rtl8152_get_msglevel(struct net_device *dev)
+{
+       struct r8152 *tp = netdev_priv(dev);
+
+       return tp->msg_enable;
+}
+
+static void rtl8152_set_msglevel(struct net_device *dev, u32 value)
+{
+       struct r8152 *tp = netdev_priv(dev);
+
+       tp->msg_enable = value;
+}
+
 static void rtl8152_get_drvinfo(struct net_device *netdev,
                                struct ethtool_drvinfo *info)
 {
@@ -2566,8 +3121,76 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
 static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct r8152 *tp = netdev_priv(dev);
+       int ret;
+
+       ret = usb_autopm_get_interface(tp->intf);
+       if (ret < 0)
+               goto out;
+
+       ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+
+       usb_autopm_put_interface(tp->intf);
+
+out:
+       return ret;
+}
+
+static const char rtl8152_gstrings[][ETH_GSTRING_LEN] = {
+       "tx_packets",
+       "rx_packets",
+       "tx_errors",
+       "rx_errors",
+       "rx_missed",
+       "align_errors",
+       "tx_single_collisions",
+       "tx_multi_collisions",
+       "rx_unicast",
+       "rx_broadcast",
+       "rx_multicast",
+       "tx_aborted",
+       "tx_underrun",
+};
 
-       return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+static int rtl8152_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(rtl8152_gstrings);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void rtl8152_get_ethtool_stats(struct net_device *dev,
+                                     struct ethtool_stats *stats, u64 *data)
+{
+       struct r8152 *tp = netdev_priv(dev);
+       struct tally_counter tally;
+
+       generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
+
+       data[0] = le64_to_cpu(tally.tx_packets);
+       data[1] = le64_to_cpu(tally.rx_packets);
+       data[2] = le64_to_cpu(tally.tx_errors);
+       data[3] = le32_to_cpu(tally.rx_errors);
+       data[4] = le16_to_cpu(tally.rx_missed);
+       data[5] = le16_to_cpu(tally.align_errors);
+       data[6] = le32_to_cpu(tally.tx_one_collision);
+       data[7] = le32_to_cpu(tally.tx_multi_collision);
+       data[8] = le64_to_cpu(tally.rx_unicast);
+       data[9] = le64_to_cpu(tally.rx_broadcast);
+       data[10] = le32_to_cpu(tally.rx_multicast);
+       data[11] = le16_to_cpu(tally.tx_aborted);
+       data[12] = le16_to_cpu(tally.tx_underun);
+}
+
+static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+       switch (stringset) {
+       case ETH_SS_STATS:
+               memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
+               break;
+       }
 }
 
 static struct ethtool_ops ops = {
@@ -2575,13 +3198,24 @@ static struct ethtool_ops ops = {
        .get_settings = rtl8152_get_settings,
        .set_settings = rtl8152_set_settings,
        .get_link = ethtool_op_get_link,
+       .get_msglevel = rtl8152_get_msglevel,
+       .set_msglevel = rtl8152_set_msglevel,
+       .get_wol = rtl8152_get_wol,
+       .set_wol = rtl8152_set_wol,
+       .get_strings = rtl8152_get_strings,
+       .get_sset_count = rtl8152_get_sset_count,
+       .get_ethtool_stats = rtl8152_get_ethtool_stats,
 };
 
 static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 {
        struct r8152 *tp = netdev_priv(netdev);
        struct mii_ioctl_data *data = if_mii(rq);
-       int res = 0;
+       int res;
+
+       res = usb_autopm_get_interface(tp->intf);
+       if (res < 0)
+               goto out;
 
        switch (cmd) {
        case SIOCGMIIPHY:
@@ -2604,6 +3238,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
                res = -EOPNOTSUPP;
        }
 
+       usb_autopm_put_interface(tp->intf);
+
+out:
        return res;
 }
 
@@ -2656,22 +3293,13 @@ static void r8152b_get_version(struct r8152 *tp)
 
 static void rtl8152_unload(struct r8152 *tp)
 {
-       u32     ocp_data;
-
-       if (tp->version != RTL_VER_01) {
-               ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
-               ocp_data |= POWER_CUT;
-               ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-       }
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
-       ocp_data &= ~RESUME_INDICATE;
-       ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+       if (tp->version != RTL_VER_01)
+               r8152_power_cut_en(tp, true);
 }
 
 static void rtl8153_unload(struct r8152 *tp)
 {
-       r8153_power_cut_en(tp, 1);
+       r8153_power_cut_en(tp, true);
 }
 
 static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -2686,6 +3314,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                        ops->init               = r8152b_init;
                        ops->enable             = rtl8152_enable;
                        ops->disable            = rtl8152_disable;
+                       ops->up                 = r8152b_exit_oob;
                        ops->down               = rtl8152_down;
                        ops->unload             = rtl8152_unload;
                        ret = 0;
@@ -2694,6 +3323,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
                        ops->disable            = rtl8152_disable;
+                       ops->up                 = r8153_first_init;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
@@ -2709,6 +3339,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
                        ops->init               = r8153_init;
                        ops->enable             = rtl8153_enable;
                        ops->disable            = rtl8152_disable;
+                       ops->up                 = r8153_first_init;
                        ops->down               = rtl8153_down;
                        ops->unload             = rtl8153_unload;
                        ret = 0;
@@ -2766,9 +3397,15 @@ static int rtl8152_probe(struct usb_interface *intf,
        netdev->netdev_ops = &rtl8152_netdev_ops;
        netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
 
-       netdev->features |= NETIF_F_IP_CSUM;
-       netdev->hw_features = NETIF_F_IP_CSUM;
+       netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+                           NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM |
+                           NETIF_F_TSO6;
+       netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+                             NETIF_F_TSO | NETIF_F_FRAGLIST |
+                             NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
        SET_ETHTOOL_OPS(netdev, &ops);
+       netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
 
        tp->mii.dev = netdev;
        tp->mii.mdio_read = read_mii_word;
@@ -2778,14 +3415,12 @@ static int rtl8152_probe(struct usb_interface *intf,
        tp->mii.phy_id = R8152_PHY_ID;
        tp->mii.supports_gmii = 0;
 
+       intf->needs_remote_wakeup = 1;
+
        r8152b_get_version(tp);
        tp->rtl_ops.init(tp);
        set_ethernet_addr(tp);
 
-       ret = alloc_all_mem(tp);
-       if (ret)
-               goto out;
-
        usb_set_intfdata(intf, tp);
 
        ret = register_netdev(netdev);
@@ -2794,6 +3429,12 @@ static int rtl8152_probe(struct usb_interface *intf,
                goto out1;
        }
 
+       tp->saved_wolopts = __rtl_get_wol(tp);
+       if (tp->saved_wolopts)
+               device_set_wakeup_enable(&udev->dev, true);
+       else
+               device_set_wakeup_enable(&udev->dev, false);
+
        netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
 
        return 0;
@@ -2815,7 +3456,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
                tasklet_kill(&tp->tl);
                unregister_netdev(tp->netdev);
                tp->rtl_ops.unload(tp);
-               free_all_mem(tp);
                free_netdev(tp->netdev);
        }
 }
@@ -2838,6 +3478,8 @@ static struct usb_driver rtl8152_driver = {
        .suspend =      rtl8152_suspend,
        .resume =       rtl8152_resume,
        .reset_resume = rtl8152_resume,
+       .supports_autosuspend = 1,
+       .disable_hub_initiated_lpm = 1,
 };
 
 module_usb_driver(rtl8152_driver);
index c0e7c64765abd449070a7bce6826a994c674b8c7..b4a10bcb66a0f62be1606fa34629d120913fc74d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/etherdevice.h>
 #include <linux/u64_stats_sync.h>
 
+#include <net/rtnetlink.h>
 #include <net/dst.h>
 #include <net/xfrm.h>
 #include <linux/veth.h>
@@ -155,10 +156,10 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
                unsigned int start;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&stats->syncp);
                        packets = stats->packets;
                        bytes = stats->bytes;
-               } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
                result->packets += packets;
                result->bytes += bytes;
        }
@@ -235,18 +236,9 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
 
 static int veth_dev_init(struct net_device *dev)
 {
-       int i;
-
-       dev->vstats = alloc_percpu(struct pcpu_vstats);
+       dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
        if (!dev->vstats)
                return -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               struct pcpu_vstats *veth_stats;
-               veth_stats = per_cpu_ptr(dev->vstats, i);
-               u64_stats_init(&veth_stats->syncp);
-       }
-
        return 0;
 }
 
@@ -336,10 +328,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 
                nla_peer = data[VETH_INFO_PEER];
                ifmp = nla_data(nla_peer);
-               err = nla_parse(peer_tb, IFLA_MAX,
-                               nla_data(nla_peer) + sizeof(struct ifinfomsg),
-                               nla_len(nla_peer) - sizeof(struct ifinfomsg),
-                               ifla_policy);
+               err = rtnl_nla_parse_ifla(peer_tb,
+                                         nla_data(nla_peer) + sizeof(struct ifinfomsg),
+                                         nla_len(nla_peer) - sizeof(struct ifinfomsg));
                if (err < 0)
                        return err;
 
index 841b60831df1b2e83c12f55ee5e8fc90874b9475..6f15d9b315a11ceb8d46b40062a62d73379a8390 100644 (file)
@@ -882,7 +882,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
                        dev_warn(&dev->dev,
                                 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
                dev->stats.tx_dropped++;
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
        virtqueue_kick(sq->vq);
@@ -1000,16 +1000,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
                u64 tpackets, tbytes, rpackets, rbytes;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
+                       start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
                        tpackets = stats->tx_packets;
                        tbytes   = stats->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
+               } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
 
                do {
-                       start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
+                       start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
                        rpackets = stats->rx_packets;
                        rbytes   = stats->rx_bytes;
-               } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
+               } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
 
                tot->rx_packets += rpackets;
                tot->tx_packets += tpackets;
index 0fa3b44f7342dc0cf979cf69b6b1d6c3444411f3..97394345e5dd223fd77ede415ced81e6fb26f545 100644 (file)
@@ -1078,7 +1078,7 @@ unlock_drop_pkt:
        spin_unlock_irqrestore(&tq->tx_lock, flags);
 drop_pkt:
        tq->stats.drop_total++;
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
@@ -2738,47 +2738,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
 /*
  * Enable MSIx vectors.
  * Returns :
- *     0 on successful enabling of required vectors,
  *     VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
- *      could be enabled.
- *     number of vectors which can be enabled otherwise (this number is smaller
+ *      were enabled.
+ *     number of vectors which were enabled otherwise (this number is greater
  *      than VMXNET3_LINUX_MIN_MSIX_VECT)
  */
 
 static int
-vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
-                            int vectors)
-{
-       int err = 0, vector_threshold;
-       vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
-
-       while (vectors >= vector_threshold) {
-               err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
-                                     vectors);
-               if (!err) {
-                       adapter->intr.num_intrs = vectors;
-                       return 0;
-               } else if (err < 0) {
-                       dev_err(&adapter->netdev->dev,
-                                  "Failed to enable MSI-X, error: %d\n", err);
-                       vectors = 0;
-               } else if (err < vector_threshold) {
-                       break;
-               } else {
-                       /* If fails to enable required number of MSI-x vectors
-                        * try enabling minimum number of vectors required.
-                        */
-                       dev_err(&adapter->netdev->dev,
-                               "Failed to enable %d MSI-X, trying %d instead\n",
-                                   vectors, vector_threshold);
-                       vectors = vector_threshold;
-               }
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
+{
+       int ret = pci_enable_msix_range(adapter->pdev,
+                                       adapter->intr.msix_entries, nvec, nvec);
+
+       if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
+               dev_err(&adapter->netdev->dev,
+                       "Failed to enable %d MSI-X, trying %d\n",
+                       nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
+
+               ret = pci_enable_msix_range(adapter->pdev,
+                                           adapter->intr.msix_entries,
+                                           VMXNET3_LINUX_MIN_MSIX_VECT,
+                                           VMXNET3_LINUX_MIN_MSIX_VECT);
        }
 
-       dev_info(&adapter->pdev->dev,
-                "Number of MSI-X interrupts which can be allocated "
-                "is lower than min threshold required.\n");
-       return err;
+       if (ret < 0) {
+               dev_err(&adapter->netdev->dev,
+                       "Failed to enable MSI-X, error: %d\n", ret);
+       }
+
+       return ret;
 }
 
 
@@ -2805,56 +2793,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
 
 #ifdef CONFIG_PCI_MSI
        if (adapter->intr.type == VMXNET3_IT_MSIX) {
-               int vector, err = 0;
-
-               adapter->intr.num_intrs = (adapter->share_intr ==
-                                          VMXNET3_INTR_TXSHARE) ? 1 :
-                                          adapter->num_tx_queues;
-               adapter->intr.num_intrs += (adapter->share_intr ==
-                                          VMXNET3_INTR_BUDDYSHARE) ? 0 :
-                                          adapter->num_rx_queues;
-               adapter->intr.num_intrs += 1;           /* for link event */
-
-               adapter->intr.num_intrs = (adapter->intr.num_intrs >
-                                          VMXNET3_LINUX_MIN_MSIX_VECT
-                                          ? adapter->intr.num_intrs :
-                                          VMXNET3_LINUX_MIN_MSIX_VECT);
-
-               for (vector = 0; vector < adapter->intr.num_intrs; vector++)
-                       adapter->intr.msix_entries[vector].entry = vector;
-
-               err = vmxnet3_acquire_msix_vectors(adapter,
-                                                  adapter->intr.num_intrs);
+               int i, nvec;
+
+               nvec  = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+                       1 : adapter->num_tx_queues;
+               nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
+                       0 : adapter->num_rx_queues;
+               nvec += 1;      /* for link event */
+               nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
+                      nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
+
+               for (i = 0; i < nvec; i++)
+                       adapter->intr.msix_entries[i].entry = i;
+
+               nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
+               if (nvec < 0)
+                       goto msix_err;
+
                /* If we cannot allocate one MSIx vector per queue
                 * then limit the number of rx queues to 1
                 */
-               if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+               if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
                        if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
                            || adapter->num_rx_queues != 1) {
                                adapter->share_intr = VMXNET3_INTR_TXSHARE;
                                netdev_err(adapter->netdev,
                                           "Number of rx queues : 1\n");
                                adapter->num_rx_queues = 1;
-                               adapter->intr.num_intrs =
-                                               VMXNET3_LINUX_MIN_MSIX_VECT;
                        }
-                       return;
                }
-               if (!err)
-                       return;
 
+               adapter->intr.num_intrs = nvec;
+               return;
+
+msix_err:
                /* If we cannot allocate MSIx vectors use only one rx queue */
                dev_info(&adapter->pdev->dev,
                         "Failed to enable MSI-X, error %d. "
-                        "Limiting #rx queues to 1, try MSI.\n", err);
+                        "Limiting #rx queues to 1, try MSI.\n", nvec);
 
                adapter->intr.type = VMXNET3_IT_MSI;
        }
 
        if (adapter->intr.type == VMXNET3_IT_MSI) {
-               int err;
-               err = pci_enable_msi(adapter->pdev);
-               if (!err) {
+               if (!pci_enable_msi(adapter->pdev)) {
                        adapter->num_rx_queues = 1;
                        adapter->intr.num_intrs = 1;
                        return;
index 1236812c7be69975487e7956c24d39718997ec2b..0d862a5077ab53d01d7c4b6ffb6e4d5f9ed85725 100644 (file)
@@ -1132,7 +1132,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct vxlan_sock *vs;
        struct vxlanhdr *vxh;
-       __be16 port;
 
        /* Need Vxlan and inner Ethernet header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1150,8 +1149,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
                goto drop;
 
-       port = inet_sk(sk)->inet_sport;
-
        vs = rcu_dereference_sk_user_data(sk);
        if (!vs)
                goto drop;
@@ -2080,19 +2077,11 @@ static int vxlan_init(struct net_device *dev)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_sock *vs;
-       int i;
 
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *vxlan_stats;
-               vxlan_stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&vxlan_stats->syncp);
-       }
-
-
        spin_lock(&vn->sock_lock);
        vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
        if (vs) {
index 48896138418f21e206782bc259e8fa0b5da9ace7..a9970f1af976a5b0c34d41be8df61ed47365713b 100644 (file)
@@ -374,8 +374,7 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
 
        d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
 
-       if (skb_header_cloned(skb) && 
-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_cow_head(skb, 0))
                goto drop;
 
        if (i2400m->state == I2400M_SS_IDLE)
index 200020eb30059d73767a8cda2540b4f9af9326cf..b2137e8f7ca63dbd4b2d7b88e3c19ea5153d327f 100644 (file)
@@ -53,7 +53,7 @@ config LIBERTAS_THINFIRM_USB
 
 config AIRO
        tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
-       depends on ISA_DMA_API && (PCI || BROKEN)
+       depends on CFG80211 && ISA_DMA_API && (PCI || BROKEN)
        select WIRELESS_EXT
        select CRYPTO
        select WEXT_SPY
@@ -73,7 +73,7 @@ config AIRO
 
 config ATMEL
       tristate "Atmel at76c50x chipset  802.11b support"
-      depends on (PCI || PCMCIA)
+      depends on CFG80211 && (PCI || PCMCIA)
       select WIRELESS_EXT
       select WEXT_PRIV
       select FW_LOADER
@@ -116,7 +116,7 @@ config AT76C50X_USB
 
 config AIRO_CS
        tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
-       depends on PCMCIA && (BROKEN || !M32R)
+       depends on CFG80211 && PCMCIA && (BROKEN || !M32R)
        select WIRELESS_EXT
        select WEXT_SPY
        select WEXT_PRIV
@@ -138,7 +138,7 @@ config AIRO_CS
 
 config PCMCIA_WL3501
        tristate "Planet WL3501 PCMCIA cards"
-       depends on PCMCIA
+       depends on CFG80211 && PCMCIA
        select WIRELESS_EXT
        select WEXT_SPY
        help
@@ -168,7 +168,7 @@ config PRISM54
 
 config USB_ZD1201
        tristate "USB ZD1201 based Wireless device support"
-       depends on USB
+       depends on CFG80211 && USB
        select WIRELESS_EXT
        select WEXT_PRIV
        select FW_LOADER
@@ -281,5 +281,6 @@ source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
 source "drivers/net/wireless/mwifiex/Kconfig"
 source "drivers/net/wireless/cw1200/Kconfig"
+source "drivers/net/wireless/rsi/Kconfig"
 
 endif # WLAN
index 0fab227025be33bbd399fd29b3ea2693a6d21000..0c88916867187817ea08f43c4d69cbaf86d28246 100644 (file)
@@ -59,3 +59,4 @@ obj-$(CONFIG_BRCMFMAC)        += brcm80211/
 obj-$(CONFIG_BRCMSMAC) += brcm80211/
 
 obj-$(CONFIG_CW1200)   += cw1200/
+obj-$(CONFIG_RSI_91X)  += rsi/
index edf4b57c4aaa306ebbf12beea5100d6f6a7f5e2e..64747d457bb3a1879f9cb0b478836dea4cb3099e 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/bitops.h>
 #include <linux/scatterlist.h>
 #include <linux/crypto.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/unaligned.h>
 
 #include <linux/netdevice.h>
 #include <linux/if_arp.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
 #include <net/iw_handler.h>
 
 #include "airo.h"
@@ -5797,7 +5797,7 @@ static int airo_set_freq(struct net_device *dev,
 
                /* Hack to fall through... */
                fwrq->e = 0;
-               fwrq->m = ieee80211_freq_to_dsss_chan(f);
+               fwrq->m = ieee80211_frequency_to_channel(f);
        }
        /* Setting by channel number */
        if((fwrq->m > 1000) || (fwrq->e > 0))
@@ -5841,7 +5841,8 @@ static int airo_get_freq(struct net_device *dev,
 
        ch = le16_to_cpu(status_rid.channel);
        if((ch > 0) && (ch < 15)) {
-               fwrq->m = ieee80211_dsss_chan_to_freq(ch) * 100000;
+               fwrq->m = 100000 *
+                       ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
                fwrq->e = 1;
        } else {
                fwrq->m = ch;
@@ -6898,7 +6899,8 @@ static int airo_get_range(struct net_device *dev,
        k = 0;
        for(i = 0; i < 14; i++) {
                range->freq[k].i = i + 1; /* List index */
-               range->freq[k].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000;
+               range->freq[k].m = 100000 *
+                    ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
                range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
        }
        range->num_frequency = k;
@@ -7297,7 +7299,8 @@ static inline char *airo_translate_scan(struct net_device *dev,
        /* Add frequency */
        iwe.cmd = SIOCGIWFREQ;
        iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
-       iwe.u.freq.m = ieee80211_dsss_chan_to_freq(iwe.u.freq.m) * 100000;
+       iwe.u.freq.m = 100000 *
+             ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
        iwe.u.freq.e = 1;
        current_ev = iwe_stream_add_event(info, current_ev, end_buf,
                                          &iwe, IW_EV_FREQ_LEN);
index b59cfbe0276b830680bcc172b4393836333618d8..a889fd66fc63190ec7f393ae8e64a6d117281267 100644 (file)
@@ -56,6 +56,15 @@ enum ath_device_state {
        ATH_HW_INITIALIZED,
 };
 
+enum ath_op_flags {
+       ATH_OP_INVALID,
+       ATH_OP_BEACONS,
+       ATH_OP_ANI_RUN,
+       ATH_OP_PRIM_STA_VIF,
+       ATH_OP_HW_RESET,
+       ATH_OP_SCANNING,
+};
+
 enum ath_bus_type {
        ATH_PCI,
        ATH_AHB,
@@ -63,7 +72,7 @@ enum ath_bus_type {
 };
 
 struct reg_dmn_pair_mapping {
-       u16 regDmnEnum;
+       u16 reg_domain;
        u16 reg_5ghz_ctl;
        u16 reg_2ghz_ctl;
 };
@@ -130,6 +139,7 @@ struct ath_common {
        struct ieee80211_hw *hw;
        int debug_mask;
        enum ath_device_state state;
+       unsigned long op_flags;
 
        struct ath_ani ani;
 
@@ -161,6 +171,9 @@ struct ath_common {
        bool btcoex_enabled;
        bool disable_ani;
        bool bt_ant_diversity;
+
+       int last_rssi;
+       struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
 };
 
 struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
index d44d618b05f91f17defaddbeed4946fe27b6aa17..a79499c8235009f701073c83b3974d66d183e001 100644 (file)
@@ -266,12 +266,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  * ath10k_ce_sendlist_send.
  * The caller takes responsibility for any needed locking.
  */
-static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
-                                void *per_transfer_context,
-                                u32 buffer,
-                                unsigned int nbytes,
-                                unsigned int transfer_id,
-                                unsigned int flags)
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+                         void *per_transfer_context,
+                         u32 buffer,
+                         unsigned int nbytes,
+                         unsigned int transfer_id,
+                         unsigned int flags)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@@ -1067,9 +1067,9 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
         *
         * For the lack of a better place do the check here.
         */
-       BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
+       BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
                     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
-       BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
+       BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
                     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
 
        ret = ath10k_pci_wake(ar);
index 67dbde6a5c7430fbd4507355755e479d58d4d289..8eb7f99ed992277b0efb3e7ae4f971b8e4eb7557 100644 (file)
@@ -23,7 +23,7 @@
 
 /* Maximum number of Copy Engine's supported */
 #define CE_COUNT_MAX 8
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
 
 /* Descriptor rings must be aligned to this boundary */
 #define CE_DESC_RING_ALIGN     8
@@ -152,6 +152,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
                   unsigned int transfer_id,
                   unsigned int flags);
 
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+                         void *per_transfer_context,
+                         u32 buffer,
+                         unsigned int nbytes,
+                         unsigned int transfer_id,
+                         unsigned int flags);
+
 void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
                                void (*send_cb)(struct ath10k_ce_pipe *),
                                int disable_interrupts);
index 3b59af3bddf4a6e4506f027472c41832f41c43c9..ebc5fc2ede75cbac75da3a2f789a946d4e1274ad 100644 (file)
@@ -55,8 +55,7 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
 {
        ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
 
-       ar->is_target_paused = true;
-       wake_up(&ar->event_queue);
+       complete(&ar->target_suspend);
 }
 
 static int ath10k_init_connect_htc(struct ath10k *ar)
@@ -470,8 +469,12 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                                if (index == ie_len)
                                        break;
 
-                               if (data[index] & (1 << bit))
+                               if (data[index] & (1 << bit)) {
+                                       ath10k_dbg(ATH10K_DBG_BOOT,
+                                                  "Enabling feature bit: %i\n",
+                                                  i);
                                        __set_bit(i, ar->fw_features);
+                               }
                        }
 
                        ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
@@ -699,6 +702,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
        init_completion(&ar->scan.started);
        init_completion(&ar->scan.completed);
        init_completion(&ar->scan.on_channel);
+       init_completion(&ar->target_suspend);
 
        init_completion(&ar->install_key_done);
        init_completion(&ar->vdev_setup_done);
@@ -722,8 +726,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
        INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
        skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
 
-       init_waitqueue_head(&ar->event_queue);
-
        INIT_WORK(&ar->restart_work, ath10k_core_restart);
 
        return ar;
@@ -856,10 +858,34 @@ err:
 }
 EXPORT_SYMBOL(ath10k_core_start);
 
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+       int ret;
+
+       reinit_completion(&ar->target_suspend);
+
+       ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
+       if (ret) {
+               ath10k_warn("could not suspend target (%d)\n", ret);
+               return ret;
+       }
+
+       ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+
+       if (ret == 0) {
+               ath10k_warn("suspend timed out - target pause event never came\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
 void ath10k_core_stop(struct ath10k *ar)
 {
        lockdep_assert_held(&ar->conf_mutex);
 
+       /* try to suspend target */
+       ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
        ath10k_debug_stop(ar);
        ath10k_htc_stop(&ar->htc);
        ath10k_htt_detach(&ar->htt);
index ade1781c7186c5d033a0d5bc5dda2c36b4c6a4ac..0e71979d837cf90888c74e4d85035c2a4d6fd4ef 100644 (file)
 
 #define ATH10K_MAX_NUM_MGMT_PENDING 128
 
+/* number of failed packets */
+#define ATH10K_KICKOUT_THRESHOLD 50
+
+/*
+ * Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH10K_KEEPALIVE_MIN_IDLE 3747
+#define ATH10K_KEEPALIVE_MAX_IDLE 3895
+#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
 struct ath10k;
 
 struct ath10k_skb_cb {
        dma_addr_t paddr;
-       bool is_mapped;
-       bool is_aborted;
        u8 vdev_id;
 
        struct {
                u8 tid;
                bool is_offchan;
-
-               u8 frag_len;
-               u8 pad_len;
+               struct ath10k_htt_txbuf *txbuf;
+               u32 txbuf_paddr;
        } __packed htt;
+
+       struct {
+               bool dtim_zero;
+               bool deliver_cab;
+       } bcn;
 } __packed;
 
 static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -70,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
        return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
 }
 
-static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
-{
-       if (ATH10K_SKB_CB(skb)->is_mapped)
-               return -EINVAL;
-
-       ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
-                                                  DMA_TO_DEVICE);
-
-       if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
-               return -EIO;
-
-       ATH10K_SKB_CB(skb)->is_mapped = true;
-       return 0;
-}
-
-static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
-{
-       if (!ATH10K_SKB_CB(skb)->is_mapped)
-               return -EINVAL;
-
-       dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
-                        DMA_TO_DEVICE);
-       ATH10K_SKB_CB(skb)->is_mapped = false;
-       return 0;
-}
-
 static inline u32 host_interest_item_address(u32 item_offset)
 {
        return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
@@ -211,6 +199,18 @@ struct ath10k_peer {
        struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
 };
 
+struct ath10k_sta {
+       struct ath10k_vif *arvif;
+
+       /* the following are protected by ar->data_lock */
+       u32 changed; /* IEEE80211_RC_* */
+       u32 bw;
+       u32 nss;
+       u32 smps;
+
+       struct work_struct update_wk;
+};
+
 #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
 
 struct ath10k_vif {
@@ -222,10 +222,17 @@ struct ath10k_vif {
        u32 beacon_interval;
        u32 dtim_period;
        struct sk_buff *beacon;
+       /* protected by data_lock */
+       bool beacon_sent;
 
        struct ath10k *ar;
        struct ieee80211_vif *vif;
 
+       bool is_started;
+       bool is_up;
+       u32 aid;
+       u8 bssid[ETH_ALEN];
+
        struct work_struct wep_key_work;
        struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
        u8 def_wep_key_idx;
@@ -235,7 +242,6 @@ struct ath10k_vif {
 
        union {
                struct {
-                       u8 bssid[ETH_ALEN];
                        u32 uapsd;
                } sta;
                struct {
@@ -249,13 +255,11 @@ struct ath10k_vif {
                        u32 noa_len;
                        u8 *noa_data;
                } ap;
-               struct {
-                       u8 bssid[ETH_ALEN];
-               } ibss;
        } u;
 
        u8 fixed_rate;
        u8 fixed_nss;
+       u8 force_sgi;
 };
 
 struct ath10k_vif_iter {
@@ -355,8 +359,7 @@ struct ath10k {
                const struct ath10k_hif_ops *ops;
        } hif;
 
-       wait_queue_head_t event_queue;
-       bool is_target_paused;
+       struct completion target_suspend;
 
        struct ath10k_bmi bmi;
        struct ath10k_wmi wmi;
@@ -412,6 +415,9 @@ struct ath10k {
        /* valid during scan; needed for mgmt rx during scan */
        struct ieee80211_channel *scan_channel;
 
+       /* current operating channel definition */
+       struct cfg80211_chan_def chandef;
+
        int free_vdev_map;
        int monitor_vdev_id;
        bool monitor_enabled;
@@ -470,6 +476,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
 void ath10k_core_destroy(struct ath10k *ar);
 
 int ath10k_core_start(struct ath10k *ar);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
 void ath10k_core_stop(struct ath10k *ar);
 int ath10k_core_register(struct ath10k *ar, u32 chip_id);
 void ath10k_core_unregister(struct ath10k *ar);
index 1773c36c71a01a179177b887a3485c9f05c505cf..a5824990bd2a8c789e69d18bb2e3f12f26548248 100644 (file)
@@ -92,7 +92,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
 
 #ifdef CONFIG_ATH10K_DEBUG
 __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
-                                     const char *fmt, ...);
+                              const char *fmt, ...);
 void ath10k_dbg_dump(enum ath10k_debug_mask mask,
                     const char *msg, const char *prefix,
                     const void *buf, size_t len);
index dcdea68bcc0a0ffd091eee16e0b74654ef3339b7..2ac7beacddca4b44705fbe2cb6af3fe8f67a7461 100644 (file)
 #include <linux/kernel.h>
 #include "core.h"
 
+struct ath10k_hif_sg_item {
+       u16 transfer_id;
+       void *transfer_context; /* NULL = tx completion callback not called */
+       void *vaddr; /* for debugging mostly */
+       u32 paddr;
+       u16 len;
+};
+
 struct ath10k_hif_cb {
        int (*tx_completion)(struct ath10k *ar,
                             struct sk_buff *wbuf,
@@ -31,11 +39,9 @@ struct ath10k_hif_cb {
 };
 
 struct ath10k_hif_ops {
-       /* Send the head of a buffer to HIF for transmission to the target. */
-       int (*send_head)(struct ath10k *ar, u8 pipe_id,
-                        unsigned int transfer_id,
-                        unsigned int nbytes,
-                        struct sk_buff *buf);
+       /* send a scatter-gather list to the target */
+       int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
+                    struct ath10k_hif_sg_item *items, int n_items);
 
        /*
         * API to handle HIF-specific BMI message exchanges, this API is
@@ -86,12 +92,11 @@ struct ath10k_hif_ops {
 };
 
 
-static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
-                                      unsigned int transfer_id,
-                                      unsigned int nbytes,
-                                      struct sk_buff *buf)
+static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+                                  struct ath10k_hif_sg_item *items,
+                                  int n_items)
 {
-       return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
+       return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
 }
 
 static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
index edc57ab505c893c3dd295d01a3f44fd2126e3dee..7f1bccd3597f1bb2a3b40d5a482f308e99e9e27c 100644 (file)
@@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
                                             struct sk_buff *skb)
 {
-       ath10k_skb_unmap(htc->ar->dev, skb);
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+
+       dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
        skb_pull(skb, sizeof(struct ath10k_htc_hdr));
 }
 
@@ -122,6 +124,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
                    struct sk_buff *skb)
 {
        struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+       struct ath10k_hif_sg_item sg_item;
+       struct device *dev = htc->ar->dev;
        int credits = 0;
        int ret;
 
@@ -157,19 +162,25 @@ int ath10k_htc_send(struct ath10k_htc *htc,
 
        ath10k_htc_prepare_tx_skb(ep, skb);
 
-       ret = ath10k_skb_map(htc->ar->dev, skb);
+       skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+       ret = dma_mapping_error(dev, skb_cb->paddr);
        if (ret)
                goto err_credits;
 
-       ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
-                                  skb->len, skb);
+       sg_item.transfer_id = ep->eid;
+       sg_item.transfer_context = skb;
+       sg_item.vaddr = skb->data;
+       sg_item.paddr = skb_cb->paddr;
+       sg_item.len = skb->len;
+
+       ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
        if (ret)
                goto err_unmap;
 
        return 0;
 
 err_unmap:
-       ath10k_skb_unmap(htc->ar->dev, skb);
+       dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
 err_credits:
        if (ep->tx_credit_flow_enabled) {
                spin_lock_bh(&htc->tx_lock);
@@ -191,10 +202,8 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
        struct ath10k_htc *htc = &ar->htc;
        struct ath10k_htc_ep *ep = &htc->endpoint[eid];
 
-       if (!skb) {
-               ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
+       if (WARN_ON_ONCE(!skb))
                return 0;
-       }
 
        ath10k_htc_notify_tx_completion(ep, skb);
        /* the skb now belongs to the completion handler */
index b93ae355bc08aab513e6535c4d5dd48268d55d6b..654867fc1ae73bbd7a13cf4dc61f8ac89a0b7823 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/bug.h>
 #include <linux/interrupt.h>
+#include <linux/dmapool.h>
 
 #include "htc.h"
 #include "rx_desc.h"
@@ -1181,11 +1182,20 @@ struct htt_rx_info {
                u32 info1;
                u32 info2;
        } rate;
+
+       u32 tsf;
        bool fcs_err;
        bool amsdu_more;
        bool mic_err;
 };
 
+struct ath10k_htt_txbuf {
+       struct htt_data_tx_desc_frag frags[2];
+       struct ath10k_htc_hdr htc_hdr;
+       struct htt_cmd_hdr cmd_hdr;
+       struct htt_data_tx_desc cmd_tx;
+} __packed;
+
 struct ath10k_htt {
        struct ath10k *ar;
        enum ath10k_htc_ep_id eid;
@@ -1267,11 +1277,18 @@ struct ath10k_htt {
        struct sk_buff **pending_tx;
        unsigned long *used_msdu_ids; /* bitmap */
        wait_queue_head_t empty_tx_wq;
+       struct dma_pool *tx_pool;
 
        /* set if host-fw communication goes haywire
         * used to avoid further failures */
        bool rx_confused;
        struct tasklet_struct rx_replenish_task;
+
+       /* This is used to group tx/rx completions separately and process them
+        * in batches to reduce cache stalls */
+       struct tasklet_struct txrx_compl_task;
+       struct sk_buff_head tx_compl_q;
+       struct sk_buff_head rx_compl_q;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1343,4 +1360,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+
 #endif
index fe8bd1b59f0e2651ee27b742087aedeb5e96a56c..cdcbe2de95f97d602cb086c301f0778aad5bc49c 100644 (file)
@@ -43,7 +43,7 @@
 
 
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-
+static void ath10k_htt_txrx_compl_task(unsigned long ptr);
 
 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
 {
@@ -225,18 +225,16 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
        ath10k_htt_rx_msdu_buff_replenish(htt);
 }
 
-static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
-{
-       return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
-               htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
-}
-
 void ath10k_htt_rx_detach(struct ath10k_htt *htt)
 {
        int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 
        del_timer_sync(&htt->rx_ring.refill_retry_timer);
        tasklet_kill(&htt->rx_replenish_task);
+       tasklet_kill(&htt->txrx_compl_task);
+
+       skb_queue_purge(&htt->tx_compl_q);
+       skb_queue_purge(&htt->rx_compl_q);
 
        while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
                struct sk_buff *skb =
@@ -270,10 +268,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
        int idx;
        struct sk_buff *msdu;
 
-       spin_lock_bh(&htt->rx_ring.lock);
+       lockdep_assert_held(&htt->rx_ring.lock);
 
-       if (ath10k_htt_rx_ring_elems(htt) == 0)
-               ath10k_warn("htt rx ring is empty!\n");
+       if (htt->rx_ring.fill_cnt == 0) {
+               ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
+               return NULL;
+       }
 
        idx = htt->rx_ring.sw_rd_idx.msdu_payld;
        msdu = htt->rx_ring.netbufs_ring[idx];
@@ -283,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
        htt->rx_ring.sw_rd_idx.msdu_payld = idx;
        htt->rx_ring.fill_cnt--;
 
-       spin_unlock_bh(&htt->rx_ring.lock);
        return msdu;
 }
 
@@ -307,8 +306,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        struct sk_buff *msdu;
        struct htt_rx_desc *rx_desc;
 
-       if (ath10k_htt_rx_ring_elems(htt) == 0)
-               ath10k_warn("htt rx ring is empty!\n");
+       lockdep_assert_held(&htt->rx_ring.lock);
 
        if (htt->rx_confused) {
                ath10k_warn("htt is confused. refusing rx\n");
@@ -324,7 +322,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                                 msdu->len + skb_tailroom(msdu),
                                 DMA_FROM_DEVICE);
 
-               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
+               ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
                                msdu->data, msdu->len + skb_tailroom(msdu));
 
                rx_desc = (struct htt_rx_desc *)msdu->data;
@@ -400,6 +398,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
                              RX_MSDU_START_INFO0_MSDU_LENGTH);
                msdu_chained = rx_desc->frag_info.ring2_more_count;
+               msdu_chaining = msdu_chained;
 
                if (msdu_len_invalid)
                        msdu_len = 0;
@@ -417,8 +416,8 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                                         next->len + skb_tailroom(next),
                                         DMA_FROM_DEVICE);
 
-                       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx: ",
-                                       next->data,
+                       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
+                                       "htt rx chained: ", next->data,
                                        next->len + skb_tailroom(next));
 
                        skb_trim(next, 0);
@@ -427,13 +426,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
 
                        msdu->next = next;
                        msdu = next;
-                       msdu_chaining = 1;
-               }
-
-               if (msdu_len > 0) {
-                       /* This may suggest FW bug? */
-                       ath10k_warn("htt rx msdu len not consumed (%d)\n",
-                                   msdu_len);
                }
 
                last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
@@ -535,6 +527,12 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
        tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
                     (unsigned long)htt);
 
+       skb_queue_head_init(&htt->tx_compl_q);
+       skb_queue_head_init(&htt->rx_compl_q);
+
+       tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
+                    (unsigned long)htt);
+
        ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
                   htt->rx_ring.size, htt->rx_ring.fill_level);
        return 0;
@@ -638,6 +636,12 @@ struct amsdu_subframe_hdr {
        __be16 len;
 } __packed;
 
+static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+{
+       /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
+       return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+}
+
 static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
                                struct htt_rx_info *info)
 {
@@ -687,7 +691,7 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
                case RX_MSDU_DECAP_NATIVE_WIFI:
                        /* pull decapped header and copy DA */
                        hdr = (struct ieee80211_hdr *)skb->data;
-                       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+                       hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
                        memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
                        skb_pull(skb, hdr_len);
 
@@ -751,7 +755,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
 
        /* This shouldn't happen. If it does than it may be a FW bug. */
        if (skb->next) {
-               ath10k_warn("received chained non A-MSDU frame\n");
+               ath10k_warn("htt rx received chained non A-MSDU frame\n");
                ath10k_htt_rx_free_msdu_chain(skb->next);
                skb->next = NULL;
        }
@@ -774,7 +778,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
        case RX_MSDU_DECAP_NATIVE_WIFI:
                /* Pull decapped header */
                hdr = (struct ieee80211_hdr *)skb->data;
-               hdr_len = ieee80211_hdrlen(hdr->frame_control);
+               hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
                skb_pull(skb, hdr_len);
 
                /* Push original header */
@@ -852,6 +856,20 @@ static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
        return false;
 }
 
+static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
+{
+       struct htt_rx_desc *rxd;
+       u32 flags;
+
+       rxd = (void *)skb->data - sizeof(*rxd);
+       flags = __le32_to_cpu(rxd->attention.flags);
+
+       if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
+               return true;
+
+       return false;
+}
+
 static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
 {
        struct htt_rx_desc *rxd;
@@ -883,6 +901,57 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
        return CHECKSUM_UNNECESSARY;
 }
 
+static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
+{
+       struct sk_buff *next = msdu_head->next;
+       struct sk_buff *to_free = next;
+       int space;
+       int total_len = 0;
+
+       /* TODO:  Might could optimize this by using
+        * skb_try_coalesce or similar method to
+        * decrease copying, or maybe get mac80211 to
+        * provide a way to just receive a list of
+        * skb?
+        */
+
+       msdu_head->next = NULL;
+
+       /* Allocate total length all at once. */
+       while (next) {
+               total_len += next->len;
+               next = next->next;
+       }
+
+       space = total_len - skb_tailroom(msdu_head);
+       if ((space > 0) &&
+           (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
+               /* TODO:  bump some rx-oom error stat */
+               /* put it back together so we can free the
+                * whole list at once.
+                */
+               msdu_head->next = to_free;
+               return -1;
+       }
+
+       /* Walk list again, copying contents into
+        * msdu_head
+        */
+       next = to_free;
+       while (next) {
+               skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
+                                         next->len);
+               next = next->next;
+       }
+
+       /* If here, we have consolidated skb.  Free the
+        * fragments and pass the main skb on up the
+        * stack.
+        */
+       ath10k_htt_rx_free_msdu_chain(to_free);
+       return 0;
+}
+
 static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                                  struct htt_rx_indication *rx)
 {
@@ -894,6 +963,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
        u8 *fw_desc;
        int i, j;
 
+       lockdep_assert_held(&htt->rx_ring.lock);
+
        memset(&info, 0, sizeof(info));
 
        fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
@@ -937,6 +1008,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                        }
 
                        if (ath10k_htt_rx_has_decrypt_err(msdu_head)) {
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx dropping due to decrypt-err\n");
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
@@ -944,13 +1017,16 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                        status = info.status;
 
                        /* Skip mgmt frames while we handle this in WMI */
-                       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
+                       if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
+                           ath10k_htt_rx_is_mgmt(msdu_head)) {
+                               ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
                        if (status != HTT_RX_IND_MPDU_STATUS_OK &&
                            status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
+                           status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
                            !htt->ar->monitor_enabled) {
                                ath10k_dbg(ATH10K_DBG_HTT,
                                           "htt rx ignoring frame w/ status %d\n",
@@ -960,14 +1036,14 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                        }
 
                        if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx CAC running\n");
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
 
-                       /* FIXME: we do not support chaining yet.
-                        * this needs investigation */
-                       if (msdu_chaining) {
-                               ath10k_warn("msdu_chaining is true\n");
+                       if (msdu_chaining &&
+                           (ath10k_unchain_msdu(msdu_head) < 0)) {
                                ath10k_htt_rx_free_msdu_chain(msdu_head);
                                continue;
                        }
@@ -975,12 +1051,22 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                        info.skb     = msdu_head;
                        info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
                        info.mic_err = ath10k_htt_rx_has_mic_err(msdu_head);
+
+                       if (info.fcs_err)
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx has FCS err\n");
+
+                       if (info.mic_err)
+                               ath10k_dbg(ATH10K_DBG_HTT,
+                                          "htt rx has MIC err\n");
+
                        info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
                        info.signal += rx->ppdu.combined_rssi;
 
                        info.rate.info0 = rx->ppdu.info0;
                        info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
                        info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
+                       info.tsf = __le32_to_cpu(rx->ppdu.tsf);
 
                        hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
 
@@ -1014,8 +1100,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
 
        msdu_head = NULL;
        msdu_tail = NULL;
+
+       spin_lock_bh(&htt->rx_ring.lock);
        msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
                                                &msdu_head, &msdu_tail);
+       spin_unlock_bh(&htt->rx_ring.lock);
 
        ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
 
@@ -1095,7 +1184,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
 
        skb_trim(info.skb, info.skb->len - trim);
 
-       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt frag mpdu: ",
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
                        info.skb->data, info.skb->len);
        ath10k_process_rx(htt->ar, &info);
 
@@ -1107,6 +1196,45 @@ end:
        }
 }
 
+static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+                                      struct sk_buff *skb)
+{
+       struct ath10k_htt *htt = &ar->htt;
+       struct htt_resp *resp = (struct htt_resp *)skb->data;
+       struct htt_tx_done tx_done = {};
+       int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
+       __le16 msdu_id;
+       int i;
+
+       lockdep_assert_held(&htt->tx_lock);
+
+       switch (status) {
+       case HTT_DATA_TX_STATUS_NO_ACK:
+               tx_done.no_ack = true;
+               break;
+       case HTT_DATA_TX_STATUS_OK:
+               break;
+       case HTT_DATA_TX_STATUS_DISCARD:
+       case HTT_DATA_TX_STATUS_POSTPONE:
+       case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+               tx_done.discard = true;
+               break;
+       default:
+               ath10k_warn("unhandled tx completion status %d\n", status);
+               tx_done.discard = true;
+               break;
+       }
+
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+                  resp->data_tx_completion.num_msdus);
+
+       for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+               msdu_id = resp->data_tx_completion.msdus[i];
+               tx_done.msdu_id = __le16_to_cpu(msdu_id);
+               ath10k_txrx_tx_unref(htt, &tx_done);
+       }
+}
+
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_htt *htt = &ar->htt;
@@ -1116,7 +1244,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        if (!IS_ALIGNED((unsigned long)skb->data, 4))
                ath10k_warn("unaligned htt message, expect trouble\n");
 
-       ath10k_dbg(ATH10K_DBG_HTT, "HTT RX, msg_type: 0x%0X\n",
+       ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
                   resp->hdr.msg_type);
        switch (resp->hdr.msg_type) {
        case HTT_T2H_MSG_TYPE_VERSION_CONF: {
@@ -1125,10 +1253,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                complete(&htt->target_version_received);
                break;
        }
-       case HTT_T2H_MSG_TYPE_RX_IND: {
-               ath10k_htt_rx_handler(htt, &resp->rx_ind);
-               break;
-       }
+       case HTT_T2H_MSG_TYPE_RX_IND:
+               spin_lock_bh(&htt->rx_ring.lock);
+               __skb_queue_tail(&htt->rx_compl_q, skb);
+               spin_unlock_bh(&htt->rx_ring.lock);
+               tasklet_schedule(&htt->txrx_compl_task);
+               return;
        case HTT_T2H_MSG_TYPE_PEER_MAP: {
                struct htt_peer_map_event ev = {
                        .vdev_id = resp->peer_map.vdev_id,
@@ -1163,44 +1293,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                        break;
                }
 
+               spin_lock_bh(&htt->tx_lock);
                ath10k_txrx_tx_unref(htt, &tx_done);
+               spin_unlock_bh(&htt->tx_lock);
                break;
        }
-       case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
-               struct htt_tx_done tx_done = {};
-               int status = MS(resp->data_tx_completion.flags,
-                               HTT_DATA_TX_STATUS);
-               __le16 msdu_id;
-               int i;
-
-               switch (status) {
-               case HTT_DATA_TX_STATUS_NO_ACK:
-                       tx_done.no_ack = true;
-                       break;
-               case HTT_DATA_TX_STATUS_OK:
-                       break;
-               case HTT_DATA_TX_STATUS_DISCARD:
-               case HTT_DATA_TX_STATUS_POSTPONE:
-               case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
-                       tx_done.discard = true;
-                       break;
-               default:
-                       ath10k_warn("unhandled tx completion status %d\n",
-                                   status);
-                       tx_done.discard = true;
-                       break;
-               }
-
-               ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
-                          resp->data_tx_completion.num_msdus);
-
-               for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
-                       msdu_id = resp->data_tx_completion.msdus[i];
-                       tx_done.msdu_id = __le16_to_cpu(msdu_id);
-                       ath10k_txrx_tx_unref(htt, &tx_done);
-               }
-               break;
-       }
+       case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+               spin_lock_bh(&htt->tx_lock);
+               __skb_queue_tail(&htt->tx_compl_q, skb);
+               spin_unlock_bh(&htt->tx_lock);
+               tasklet_schedule(&htt->txrx_compl_task);
+               return;
        case HTT_T2H_MSG_TYPE_SEC_IND: {
                struct ath10k *ar = htt->ar;
                struct htt_security_indication *ev = &resp->security_indication;
@@ -1240,3 +1343,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        /* Free the indication buffer */
        dev_kfree_skb_any(skb);
 }
+
+static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+{
+       struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+       struct htt_resp *resp;
+       struct sk_buff *skb;
+
+       spin_lock_bh(&htt->tx_lock);
+       while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+               ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+               dev_kfree_skb_any(skb);
+       }
+       spin_unlock_bh(&htt->tx_lock);
+
+       spin_lock_bh(&htt->rx_ring.lock);
+       while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
+               resp = (struct htt_resp *)skb->data;
+               ath10k_htt_rx_handler(htt, &resp->rx_ind);
+               dev_kfree_skb_any(skb);
+       }
+       spin_unlock_bh(&htt->rx_ring.lock);
+}
index f1d36d2d27235aeec979fed652d2925ec6e94513..7a3e2e40dd5c587215b94e859705f9a24da9acb5 100644 (file)
@@ -109,6 +109,14 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
                return -ENOMEM;
        }
 
+       htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
+                                      sizeof(struct ath10k_htt_txbuf), 4, 0);
+       if (!htt->tx_pool) {
+               kfree(htt->used_msdu_ids);
+               kfree(htt->pending_tx);
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
@@ -117,9 +125,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
        struct htt_tx_done tx_done = {0};
        int msdu_id;
 
-       /* No locks needed. Called after communication with the device has
-        * been stopped. */
-
+       spin_lock_bh(&htt->tx_lock);
        for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
                if (!test_bit(msdu_id, htt->used_msdu_ids))
                        continue;
@@ -132,6 +138,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
 
                ath10k_txrx_tx_unref(htt, &tx_done);
        }
+       spin_unlock_bh(&htt->tx_lock);
 }
 
 void ath10k_htt_tx_detach(struct ath10k_htt *htt)
@@ -139,6 +146,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
        ath10k_htt_tx_cleanup_pending(htt);
        kfree(htt->pending_tx);
        kfree(htt->used_msdu_ids);
+       dma_pool_destroy(htt->tx_pool);
        return;
 }
 
@@ -334,7 +342,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
                goto err_free_msdu_id;
        }
 
-       res = ath10k_skb_map(dev, msdu);
+       skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+                                      DMA_TO_DEVICE);
+       res = dma_mapping_error(dev, skb_cb->paddr);
        if (res)
                goto err_free_txdesc;
 
@@ -348,8 +358,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        memcpy(cmd->mgmt_tx.hdr, msdu->data,
               min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
 
-       skb_cb->htt.frag_len = 0;
-       skb_cb->htt.pad_len = 0;
+       skb_cb->htt.txbuf = NULL;
 
        res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
        if (res)
@@ -358,7 +367,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        return 0;
 
 err_unmap_msdu:
-       ath10k_skb_unmap(dev, msdu);
+       dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 err_free_txdesc:
        dev_kfree_skb_any(txdesc);
 err_free_msdu_id:
@@ -375,19 +384,19 @@ err:
 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 {
        struct device *dev = htt->ar->dev;
-       struct htt_cmd *cmd;
-       struct htt_data_tx_desc_frag *tx_frags;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
-       struct sk_buff *txdesc = NULL;
-       bool use_frags;
-       u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
-       u8 tid;
-       int prefetch_len, desc_len;
-       int msdu_id = -1;
+       struct ath10k_hif_sg_item sg_items[2];
+       struct htt_data_tx_desc_frag *frags;
+       u8 vdev_id = skb_cb->vdev_id;
+       u8 tid = skb_cb->htt.tid;
+       int prefetch_len;
        int res;
-       u8 flags0;
-       u16 flags1;
+       u8 flags0 = 0;
+       u16 msdu_id, flags1 = 0;
+       dma_addr_t paddr;
+       u32 frags_paddr;
+       bool use_frags;
 
        res = ath10k_htt_tx_inc_pending(htt);
        if (res)
@@ -406,114 +415,120 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
 
-       desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
-
-       txdesc = ath10k_htc_alloc_skb(desc_len);
-       if (!txdesc) {
-               res = -ENOMEM;
-               goto err_free_msdu_id;
-       }
-
        /* Since HTT 3.0 there is no separate mgmt tx command. However in case
         * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
         * fragment list host driver specifies directly frame pointer. */
        use_frags = htt->target_version_major < 3 ||
                    !ieee80211_is_mgmt(hdr->frame_control);
 
-       if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
-               ath10k_warn("htt alignment check failed. dropping packet.\n");
-               res = -EIO;
-               goto err_free_txdesc;
-       }
+       skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
+                                          &paddr);
+       if (!skb_cb->htt.txbuf)
+               goto err_free_msdu_id;
+       skb_cb->htt.txbuf_paddr = paddr;
 
-       if (use_frags) {
-               skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
-               skb_cb->htt.pad_len = (unsigned long)msdu->data -
-                                     round_down((unsigned long)msdu->data, 4);
+       skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+                                      DMA_TO_DEVICE);
+       res = dma_mapping_error(dev, skb_cb->paddr);
+       if (res)
+               goto err_free_txbuf;
 
-               skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
-       } else {
-               skb_cb->htt.frag_len = 0;
-               skb_cb->htt.pad_len = 0;
-       }
+       if (likely(use_frags)) {
+               frags = skb_cb->htt.txbuf->frags;
 
-       res = ath10k_skb_map(dev, msdu);
-       if (res)
-               goto err_pull_txfrag;
-
-       if (use_frags) {
-               dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
-                                       DMA_TO_DEVICE);
-
-               /* tx fragment list must be terminated with zero-entry */
-               tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
-               tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
-                                                 skb_cb->htt.frag_len +
-                                                 skb_cb->htt.pad_len);
-               tx_frags[0].len   = __cpu_to_le32(msdu->len -
-                                                 skb_cb->htt.frag_len -
-                                                 skb_cb->htt.pad_len);
-               tx_frags[1].paddr = __cpu_to_le32(0);
-               tx_frags[1].len   = __cpu_to_le32(0);
-
-               dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
-                                          DMA_TO_DEVICE);
-       }
+               frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
+               frags[0].len = __cpu_to_le32(msdu->len);
+               frags[1].paddr = 0;
+               frags[1].len = 0;
 
-       ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
-                  (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
-       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
-                       msdu->data, msdu->len);
+               flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 
-       skb_put(txdesc, desc_len);
-       cmd = (struct htt_cmd *)txdesc->data;
+               frags_paddr = skb_cb->htt.txbuf_paddr;
+       } else {
+               flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 
-       tid = ATH10K_SKB_CB(msdu)->htt.tid;
+               frags_paddr = skb_cb->paddr;
+       }
 
-       ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
+       /* Normally all commands go through HTC which manages tx credits for
+        * each endpoint and notifies when tx is completed.
+        *
+        * HTT endpoint is creditless so there's no need to care about HTC
+        * flags. In that case it is trivial to fill the HTC header here.
+        *
+        * MSDU transmission is considered completed upon HTT event. This
+        * implies no relevant resources can be freed until after the event is
+        * received. That's why HTC tx completion handler itself is ignored by
+        * setting NULL to transfer_context for all sg items.
+        *
+        * There is simply no point in pushing HTT TX_FRM through HTC tx path
+        * as it's a waste of resources. By bypassing HTC it is possible to
+        * avoid extra memory allocations, compress data structures and thus
+        * improve performance. */
+
+       skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
+       skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
+                       sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+                       sizeof(skb_cb->htt.txbuf->cmd_tx) +
+                       prefetch_len);
+       skb_cb->htt.txbuf->htc_hdr.flags = 0;
 
-       flags0  = 0;
        if (!ieee80211_has_protected(hdr->frame_control))
                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
-       flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
 
-       if (use_frags)
-               flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
-                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
-       else
-               flags0 |= SM(ATH10K_HW_TXRX_MGMT,
-                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+       flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
 
-       flags1  = 0;
        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
        flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
        flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
 
-       cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
-       cmd->data_tx.flags0      = flags0;
-       cmd->data_tx.flags1      = __cpu_to_le16(flags1);
-       cmd->data_tx.len         = __cpu_to_le16(msdu->len -
-                                                skb_cb->htt.frag_len -
-                                                skb_cb->htt.pad_len);
-       cmd->data_tx.id          = __cpu_to_le16(msdu_id);
-       cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
-       cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);
-
-       memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
+       skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+       skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
+       skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+       skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+       skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+       skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+       skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+
+       ath10k_dbg(ATH10K_DBG_HTT,
+                  "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
+                  flags0, flags1, msdu->len, msdu_id, frags_paddr,
+                  (u32)skb_cb->paddr, vdev_id, tid);
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+                       msdu->data, msdu->len);
 
-       res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
+       sg_items[0].transfer_id = 0;
+       sg_items[0].transfer_context = NULL;
+       sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
+       sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
+                           sizeof(skb_cb->htt.txbuf->frags);
+       sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
+                         sizeof(skb_cb->htt.txbuf->cmd_hdr) +
+                         sizeof(skb_cb->htt.txbuf->cmd_tx);
+
+       sg_items[1].transfer_id = 0;
+       sg_items[1].transfer_context = NULL;
+       sg_items[1].vaddr = msdu->data;
+       sg_items[1].paddr = skb_cb->paddr;
+       sg_items[1].len = prefetch_len;
+
+       res = ath10k_hif_tx_sg(htt->ar,
+                              htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+                              sg_items, ARRAY_SIZE(sg_items));
        if (res)
                goto err_unmap_msdu;
 
        return 0;
 
 err_unmap_msdu:
-       ath10k_skb_unmap(dev, msdu);
-err_pull_txfrag:
-       skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
-err_free_txdesc:
-       dev_kfree_skb_any(txdesc);
+       dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txbuf:
+       dma_pool_free(htt->tx_pool,
+                     skb_cb->htt.txbuf,
+                     skb_cb->htt.txbuf_paddr);
 err_free_msdu_id:
        spin_lock_bh(&htt->tx_lock);
        htt->pending_tx[msdu_id] = NULL;
index f1505a25d8109b3cf1a315c548aad9348ea0e441..35fc44e281f57968171283d7d336cce5b20eddac 100644 (file)
@@ -205,8 +205,11 @@ enum ath10k_mcast2ucast_mode {
 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS     0x0006c000
 #define PCIE_LOCAL_BASE_ADDRESS                        0x00080000
 
+#define SOC_RESET_CONTROL_ADDRESS              0x00000000
 #define SOC_RESET_CONTROL_OFFSET               0x00000000
 #define SOC_RESET_CONTROL_SI0_RST_MASK         0x00000001
+#define SOC_RESET_CONTROL_CE_RST_MASK          0x00040000
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK    0x00000040
 #define SOC_CPU_CLOCK_OFFSET                   0x00000020
 #define SOC_CPU_CLOCK_STANDARD_LSB             0
 #define SOC_CPU_CLOCK_STANDARD_MASK            0x00000003
@@ -216,6 +219,8 @@ enum ath10k_mcast2ucast_mode {
 #define SOC_LPO_CAL_OFFSET                     0x000000e0
 #define SOC_LPO_CAL_ENABLE_LSB                 20
 #define SOC_LPO_CAL_ENABLE_MASK                        0x00100000
+#define SOC_LF_TIMER_CONTROL0_ADDRESS          0x00000050
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK      0x00000004
 
 #define SOC_CHIP_ID_ADDRESS                    0x000000ec
 #define SOC_CHIP_ID_REV_LSB                    8
@@ -273,6 +278,7 @@ enum ath10k_mcast2ucast_mode {
 #define PCIE_INTR_CAUSE_ADDRESS                        0x000c
 #define PCIE_INTR_CLR_ADDRESS                  0x0014
 #define SCRATCH_3_ADDRESS                      0x0030
+#define CPU_INTR_ADDRESS                       0x0010
 
 /* Firmware indications to the Host via SCRATCH_3 register. */
 #define FW_INDICATOR_ADDRESS   (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
index 776e364eadcd76c82ed37a64e20601f5bb1926ab..511a2f81e7afc190419623235cdbefe9a66e4039 100644 (file)
@@ -323,13 +323,15 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 
        ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
        if (ret) {
-               ath10k_warn("Failed to create wmi peer: %i\n", ret);
+               ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
+                           addr, vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
        if (ret) {
-               ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
+               ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
+                           addr, vdev_id, ret);
                return ret;
        }
        spin_lock_bh(&ar->data_lock);
@@ -339,6 +341,51 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
        return 0;
 }
 
+static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       u32 param;
+       int ret;
+
+       param = ar->wmi.pdev_param->sta_kickout_th;
+       ret = ath10k_wmi_pdev_set_param(ar, param,
+                                       ATH10K_KICKOUT_THRESHOLD);
+       if (ret) {
+               ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+                                       ATH10K_KEEPALIVE_MIN_IDLE);
+       if (ret) {
+               ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+                                       ATH10K_KEEPALIVE_MAX_IDLE);
+       if (ret) {
+               ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+                                       ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
+       if (ret) {
+               ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int  ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
 {
        struct ath10k *ar = arvif->ar;
@@ -444,8 +491,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 static int ath10k_vdev_start(struct ath10k_vif *arvif)
 {
        struct ath10k *ar = arvif->ar;
-       struct ieee80211_conf *conf = &ar->hw->conf;
-       struct ieee80211_channel *channel = conf->chandef.chan;
+       struct cfg80211_chan_def *chandef = &ar->chandef;
        struct wmi_vdev_start_request_arg arg = {};
        int ret = 0;
 
@@ -457,16 +503,14 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
        arg.dtim_period = arvif->dtim_period;
        arg.bcn_intval = arvif->beacon_interval;
 
-       arg.channel.freq = channel->center_freq;
-
-       arg.channel.band_center_freq1 = conf->chandef.center_freq1;
-
-       arg.channel.mode = chan_to_phymode(&conf->chandef);
+       arg.channel.freq = chandef->chan->center_freq;
+       arg.channel.band_center_freq1 = chandef->center_freq1;
+       arg.channel.mode = chan_to_phymode(chandef);
 
        arg.channel.min_power = 0;
-       arg.channel.max_power = channel->max_power * 2;
-       arg.channel.max_reg_power = channel->max_reg_power * 2;
-       arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+       arg.channel.max_power = chandef->chan->max_power * 2;
+       arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+       arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                arg.ssid = arvif->u.ap.ssid;
@@ -475,7 +519,7 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
 
                /* For now allow DFS for AP mode */
                arg.channel.chan_radar =
-                       !!(channel->flags & IEEE80211_CHAN_RADAR);
+                       !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
        } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
                arg.ssid = arvif->vif->bss_conf.ssid;
                arg.ssid_len = arvif->vif->bss_conf.ssid_len;
@@ -488,13 +532,15 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
 
        ret = ath10k_wmi_vdev_start(ar, &arg);
        if (ret) {
-               ath10k_warn("WMI vdev start failed: ret %d\n", ret);
+               ath10k_warn("WMI vdev %i start failed: ret %d\n",
+                           arg.vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn("vdev setup failed %d\n", ret);
+               ath10k_warn("vdev %i setup failed %d\n",
+                           arg.vdev_id, ret);
                return ret;
        }
 
@@ -512,13 +558,15 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
 
        ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
        if (ret) {
-               ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
+               ath10k_warn("WMI vdev %i stop failed: ret %d\n",
+                           arvif->vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn("vdev setup failed %d\n", ret);
+               ath10k_warn("vdev %i setup sync failed %d\n",
+                           arvif->vdev_id, ret);
                return ret;
        }
 
@@ -527,7 +575,8 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
 
 static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 {
-       struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
+       struct cfg80211_chan_def *chandef = &ar->chandef;
+       struct ieee80211_channel *channel = chandef->chan;
        struct wmi_vdev_start_request_arg arg = {};
        int ret = 0;
 
@@ -540,11 +589,11 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 
        arg.vdev_id = vdev_id;
        arg.channel.freq = channel->center_freq;
-       arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
+       arg.channel.band_center_freq1 = chandef->center_freq1;
 
        /* TODO setup this dynamically, what in case we
           don't have any vifs? */
-       arg.channel.mode = chan_to_phymode(&ar->hw->conf.chandef);
+       arg.channel.mode = chan_to_phymode(chandef);
        arg.channel.chan_radar =
                        !!(channel->flags & IEEE80211_CHAN_RADAR);
 
@@ -555,19 +604,22 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 
        ret = ath10k_wmi_vdev_start(ar, &arg);
        if (ret) {
-               ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
+               ath10k_warn("Monitor vdev %i start failed: ret %d\n",
+                           vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret) {
-               ath10k_warn("Monitor vdev setup failed %d\n", ret);
+               ath10k_warn("Monitor vdev %i setup failed %d\n",
+                           vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
        if (ret) {
-               ath10k_warn("Monitor vdev up failed: %d\n", ret);
+               ath10k_warn("Monitor vdev %i up failed: %d\n",
+                           vdev_id, ret);
                goto vdev_stop;
        }
 
@@ -579,7 +631,8 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 vdev_stop:
        ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+               ath10k_warn("Monitor vdev %i stop failed: %d\n",
+                           ar->monitor_vdev_id, ret);
 
        return ret;
 }
@@ -602,15 +655,18 @@ static int ath10k_monitor_stop(struct ath10k *ar)
 
        ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev down failed: %d\n", ret);
+               ath10k_warn("Monitor vdev %i down failed: %d\n",
+                           ar->monitor_vdev_id, ret);
 
        ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
        if (ret)
-               ath10k_warn("Monitor vdev stop failed: %d\n", ret);
+               ath10k_warn("Monitor vdev %i stop failed: %d\n",
+                           ar->monitor_vdev_id, ret);
 
        ret = ath10k_vdev_setup_sync(ar);
        if (ret)
-               ath10k_warn("Monitor_down sync failed: %d\n", ret);
+               ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
+                           ar->monitor_vdev_id, ret);
 
        ar->monitor_enabled = false;
        return ret;
@@ -640,7 +696,8 @@ static int ath10k_monitor_create(struct ath10k *ar)
                                     WMI_VDEV_TYPE_MONITOR,
                                     0, ar->mac_addr);
        if (ret) {
-               ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
+               ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
+                           ar->monitor_vdev_id, ret);
                goto vdev_fail;
        }
 
@@ -669,7 +726,8 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
 
        ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
        if (ret) {
-               ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
+               ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
+                           ar->monitor_vdev_id, ret);
                return ret;
        }
 
@@ -791,6 +849,22 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
 
        if (!info->enable_beacon) {
                ath10k_vdev_stop(arvif);
+
+               arvif->is_started = false;
+               arvif->is_up = false;
+
+               spin_lock_bh(&arvif->ar->data_lock);
+               if (arvif->beacon) {
+                       dma_unmap_single(arvif->ar->dev,
+                                        ATH10K_SKB_CB(arvif->beacon)->paddr,
+                                        arvif->beacon->len, DMA_TO_DEVICE);
+                       dev_kfree_skb_any(arvif->beacon);
+
+                       arvif->beacon = NULL;
+                       arvif->beacon_sent = false;
+               }
+               spin_unlock_bh(&arvif->ar->data_lock);
+
                return;
        }
 
@@ -800,12 +874,21 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
        if (ret)
                return;
 
-       ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, 0, info->bssid);
+       arvif->aid = 0;
+       memcpy(arvif->bssid, info->bssid, ETH_ALEN);
+
+       ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+                                arvif->bssid);
        if (ret) {
-               ath10k_warn("Failed to bring up VDEV: %d\n",
-                           arvif->vdev_id);
+               ath10k_warn("Failed to bring up vdev %d: %i\n",
+                           arvif->vdev_id, ret);
+               ath10k_vdev_stop(arvif);
                return;
        }
+
+       arvif->is_started = true;
+       arvif->is_up = true;
+
        ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
 }
 
@@ -824,18 +907,18 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
                        ath10k_warn("Failed to delete IBSS self peer:%pM for VDEV:%d ret:%d\n",
                                    self_peer, arvif->vdev_id, ret);
 
-               if (is_zero_ether_addr(arvif->u.ibss.bssid))
+               if (is_zero_ether_addr(arvif->bssid))
                        return;
 
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
-                                        arvif->u.ibss.bssid);
+                                        arvif->bssid);
                if (ret) {
                        ath10k_warn("Failed to delete IBSS BSSID peer:%pM for VDEV:%d ret:%d\n",
-                                   arvif->u.ibss.bssid, arvif->vdev_id, ret);
+                                   arvif->bssid, arvif->vdev_id, ret);
                        return;
                }
 
-               memset(arvif->u.ibss.bssid, 0, ETH_ALEN);
+               memset(arvif->bssid, 0, ETH_ALEN);
 
                return;
        }
@@ -878,8 +961,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
                                                  conf->dynamic_ps_timeout);
                if (ret) {
-                       ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
-                                   arvif->vdev_id);
+                       ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
+                                   arvif->vdev_id, ret);
                        return ret;
                }
        } else {
@@ -1017,7 +1100,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                                   struct wmi_peer_assoc_complete_arg *arg)
 {
        const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-       int smps;
        int i, n;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -1063,17 +1145,6 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                arg->peer_flags |= WMI_PEER_STBC;
        }
 
-       smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
-       smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
-       if (smps == WLAN_HT_CAP_SM_PS_STATIC) {
-               arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
-               arg->peer_flags |= WMI_PEER_STATIC_MIMOPS;
-       } else if (smps == WLAN_HT_CAP_SM_PS_DYNAMIC) {
-               arg->peer_flags |= WMI_PEER_SPATIAL_MUX;
-               arg->peer_flags |= WMI_PEER_DYN_MIMOPS;
-       }
-
        if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
                arg->peer_rate_caps |= WMI_RC_TS_FLAG;
        else if (ht_cap->mcs.rx_mask[1])
@@ -1083,8 +1154,23 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
                        arg->peer_ht_rates.rates[n++] = i;
 
-       arg->peer_ht_rates.num_rates = n;
-       arg->peer_num_spatial_streams = max((n+7) / 8, 1);
+       /*
+        * This is a workaround for HT-enabled STAs which break the spec
+        * and have no HT capabilities RX mask (no HT RX MCS map).
+        *
+        * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+        * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+        *
+        * Firmware asserts if such situation occurs.
+        */
+       if (n == 0) {
+               arg->peer_ht_rates.num_rates = 8;
+               for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+                       arg->peer_ht_rates.rates[i] = i;
+       } else {
+               arg->peer_ht_rates.num_rates = n;
+               arg->peer_num_spatial_streams = sta->rx_nss;
+       }
 
        ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
                   arg->addr,
@@ -1092,27 +1178,20 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                   arg->peer_num_spatial_streams);
 }
 
-static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
-                                      struct ath10k_vif *arvif,
-                                      struct ieee80211_sta *sta,
-                                      struct ieee80211_bss_conf *bss_conf,
-                                      struct wmi_peer_assoc_complete_arg *arg)
+static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
+                                   struct ath10k_vif *arvif,
+                                   struct ieee80211_sta *sta)
 {
        u32 uapsd = 0;
        u32 max_sp = 0;
+       int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (sta->wme)
-               arg->peer_flags |= WMI_PEER_QOS;
-
        if (sta->wme && sta->uapsd_queues) {
                ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
                           sta->uapsd_queues, sta->max_sp);
 
-               arg->peer_flags |= WMI_PEER_APSD;
-               arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
-
                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
                        uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
                                 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
@@ -1130,35 +1209,40 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
                if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
                        max_sp = sta->max_sp;
 
-               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
-                                          sta->addr,
-                                          WMI_AP_PS_PEER_PARAM_UAPSD,
-                                          uapsd);
+               ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                                sta->addr,
+                                                WMI_AP_PS_PEER_PARAM_UAPSD,
+                                                uapsd);
+               if (ret) {
+                       ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
 
-               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
-                                          sta->addr,
-                                          WMI_AP_PS_PEER_PARAM_MAX_SP,
-                                          max_sp);
+               ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+                                                sta->addr,
+                                                WMI_AP_PS_PEER_PARAM_MAX_SP,
+                                                max_sp);
+               if (ret) {
+                       ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
 
                /* TODO setup this based on STA listen interval and
                   beacon interval. Currently we don't know
                   sta->listen_interval - mac80211 patch required.
                   Currently use 10 seconds */
-               ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
-                                          sta->addr,
-                                          WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
-                                          10);
+               ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
+                                       WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
+               if (ret) {
+                       ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
        }
-}
 
-static void ath10k_peer_assoc_h_qos_sta(struct ath10k *ar,
-                                       struct ath10k_vif *arvif,
-                                       struct ieee80211_sta *sta,
-                                       struct ieee80211_bss_conf *bss_conf,
-                                       struct wmi_peer_assoc_complete_arg *arg)
-{
-       if (bss_conf->qos)
-               arg->peer_flags |= WMI_PEER_QOS;
+       return 0;
 }
 
 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
@@ -1211,10 +1295,17 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
 {
        switch (arvif->vdev_type) {
        case WMI_VDEV_TYPE_AP:
-               ath10k_peer_assoc_h_qos_ap(ar, arvif, sta, bss_conf, arg);
+               if (sta->wme)
+                       arg->peer_flags |= WMI_PEER_QOS;
+
+               if (sta->wme && sta->uapsd_queues) {
+                       arg->peer_flags |= WMI_PEER_APSD;
+                       arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
+               }
                break;
        case WMI_VDEV_TYPE_STA:
-               ath10k_peer_assoc_h_qos_sta(ar, arvif, sta, bss_conf, arg);
+               if (bss_conf->qos)
+                       arg->peer_flags |= WMI_PEER_QOS;
                break;
        default:
                break;
@@ -1293,6 +1384,33 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
        return 0;
 }
 
+static const u32 ath10k_smps_map[] = {
+       [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+       [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+       [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+       [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
+                                 const u8 *addr,
+                                 const struct ieee80211_sta_ht_cap *ht_cap)
+{
+       int smps;
+
+       if (!ht_cap->ht_supported)
+               return 0;
+
+       smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+       smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+       if (smps >= ARRAY_SIZE(ath10k_smps_map))
+               return -EINVAL;
+
+       return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
+                                        WMI_PEER_SMPS_STATE,
+                                        ath10k_smps_map[smps]);
+}
+
 /* can be called only in mac80211 callbacks due to `key_count` usage */
 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
@@ -1300,6 +1418,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ieee80211_sta_ht_cap ht_cap;
        struct wmi_peer_assoc_complete_arg peer_arg;
        struct ieee80211_sta *ap_sta;
        int ret;
@@ -1310,17 +1429,21 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
        if (!ap_sta) {
-               ath10k_warn("Failed to find station entry for %pM\n",
-                           bss_conf->bssid);
+               ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
+                           bss_conf->bssid, arvif->vdev_id);
                rcu_read_unlock();
                return;
        }
 
+       /* ap_sta must be accessed only within rcu section which must be left
+        * before calling ath10k_setup_peer_smps() which might sleep. */
+       ht_cap = ap_sta->ht_cap;
+
        ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
                                        bss_conf, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
-                           bss_conf->bssid, ret);
+               ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
+                           bss_conf->bssid, arvif->vdev_id, ret);
                rcu_read_unlock();
                return;
        }
@@ -1329,8 +1452,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 
        ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc failed for %pM\n: %d",
-                           bss_conf->bssid, ret);
+               ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
+                           bss_conf->bssid, arvif->vdev_id, ret);
+               return;
+       }
+
+       ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
+       if (ret) {
+               ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
                return;
        }
 
@@ -1338,11 +1468,17 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                   "mac vdev %d up (associated) bssid %pM aid %d\n",
                   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
 
-       ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
-                                bss_conf->bssid);
-       if (ret)
+       arvif->aid = bss_conf->aid;
+       memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN);
+
+       ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+       if (ret) {
                ath10k_warn("VDEV: %d up failed: ret %d\n",
                            arvif->vdev_id, ret);
+               return;
+       }
+
+       arvif->is_up = true;
 }
 
 /*
@@ -1382,6 +1518,9 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
        ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
 
        arvif->def_wep_key_idx = 0;
+
+       arvif->is_started = false;
+       arvif->is_up = false;
 }
 
 static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
@@ -1394,21 +1533,35 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
 
        ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
        if (ret) {
-               ath10k_warn("WMI peer assoc prepare failed for %pM\n",
-                           sta->addr);
+               ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
+                           sta->addr, arvif->vdev_id, ret);
                return ret;
        }
 
        ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc failed for STA %pM\n: %d",
-                           sta->addr, ret);
+               ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
+                           sta->addr, arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
+       if (ret) {
+               ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
                return ret;
        }
 
        ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
        if (ret) {
-               ath10k_warn("could not install peer wep keys (%d)\n", ret);
+               ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+       if (ret) {
+               ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
+                           sta->addr, arvif->vdev_id, ret);
                return ret;
        }
 
@@ -1424,7 +1577,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
 
        ret = ath10k_clear_peer_keys(arvif, sta->addr);
        if (ret) {
-               ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
+               ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
                return ret;
        }
 
@@ -1547,9 +1701,9 @@ static void ath10k_regd_update(struct ath10k *ar)
        /* Target allows setting up per-band regdomain but ath_common provides
         * a combined one only */
        ret = ath10k_wmi_pdev_set_regdomain(ar,
-                                           regpair->regDmnEnum,
-                                           regpair->regDmnEnum, /* 2ghz */
-                                           regpair->regDmnEnum, /* 5ghz */
+                                           regpair->reg_domain,
+                                           regpair->reg_domain, /* 2ghz */
+                                           regpair->reg_domain, /* 5ghz */
                                            regpair->reg_2ghz_ctl,
                                            regpair->reg_5ghz_ctl);
        if (ret)
@@ -2100,11 +2254,29 @@ static int ath10k_start(struct ieee80211_hw *hw)
                ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
                            ret);
 
+       /*
+        * By default FW set ARP frames ac to voice (6). In that case ARP
+        * exchange is not working properly for UAPSD enabled AP. ARP requests
+        * which arrives with access category 0 are processed by network stack
+        * and send back with access category 0, but FW changes access category
+        * to 6. Set ARP frames access category to best effort (0) solves
+        * this problem.
+        */
+
+       ret = ath10k_wmi_pdev_set_param(ar,
+                                       ar->wmi.pdev_param->arp_ac_override, 0);
+       if (ret) {
+               ath10k_warn("could not set arp ac override parameter: %d\n",
+                           ret);
+               goto exit;
+       }
+
        ath10k_regd_update(ar);
+       ret = 0;
 
 exit:
        mutex_unlock(&ar->conf_mutex);
-       return 0;
+       return ret;
 }
 
 static void ath10k_stop(struct ieee80211_hw *hw)
@@ -2145,6 +2317,98 @@ static int ath10k_config_ps(struct ath10k *ar)
        return ret;
 }
 
+static const char *chandef_get_width(enum nl80211_chan_width width)
+{
+       switch (width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               return "20 (noht)";
+       case NL80211_CHAN_WIDTH_20:
+               return "20";
+       case NL80211_CHAN_WIDTH_40:
+               return "40";
+       case NL80211_CHAN_WIDTH_80:
+               return "80";
+       case NL80211_CHAN_WIDTH_80P80:
+               return "80+80";
+       case NL80211_CHAN_WIDTH_160:
+               return "160";
+       case NL80211_CHAN_WIDTH_5:
+               return "5";
+       case NL80211_CHAN_WIDTH_10:
+               return "10";
+       }
+       return "?";
+}
+
+static void ath10k_config_chan(struct ath10k *ar)
+{
+       struct ath10k_vif *arvif;
+       bool monitor_was_enabled;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ath10k_dbg(ATH10K_DBG_MAC,
+                  "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
+                  ar->chandef.chan->center_freq,
+                  ar->chandef.center_freq1,
+                  ar->chandef.center_freq2,
+                  chandef_get_width(ar->chandef.width));
+
+       /* First stop monitor interface. Some FW versions crash if there's a
+        * lone monitor interface. */
+       monitor_was_enabled = ar->monitor_enabled;
+
+       if (ar->monitor_enabled)
+               ath10k_monitor_stop(ar);
+
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               if (!arvif->is_started)
+                       continue;
+
+               if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+                       continue;
+
+               ret = ath10k_vdev_stop(arvif);
+               if (ret) {
+                       ath10k_warn("could not stop vdev %d (%d)\n",
+                                   arvif->vdev_id, ret);
+                       continue;
+               }
+       }
+
+       /* all vdevs are now stopped - now attempt to restart them */
+
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               if (!arvif->is_started)
+                       continue;
+
+               if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+                       continue;
+
+               ret = ath10k_vdev_start(arvif);
+               if (ret) {
+                       ath10k_warn("could not start vdev %d (%d)\n",
+                                   arvif->vdev_id, ret);
+                       continue;
+               }
+
+               if (!arvif->is_up)
+                       continue;
+
+               ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+                                        arvif->bssid);
+               if (ret) {
+                       ath10k_warn("could not bring vdev up %d (%d)\n",
+                                   arvif->vdev_id, ret);
+                       continue;
+               }
+       }
+
+       if (monitor_was_enabled)
+               ath10k_monitor_start(ar, ar->monitor_vdev_id);
+}
+
 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct ath10k *ar = hw->priv;
@@ -2165,6 +2429,11 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                spin_unlock_bh(&ar->data_lock);
 
                ath10k_config_radar_detection(ar);
+
+               if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
+                       ar->chandef = conf->chandef;
+                       ath10k_config_chan(ar);
+               }
        }
 
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -2214,7 +2483,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        enum wmi_sta_powersave_param param;
        int ret = 0;
-       u32 value, param_id;
+       u32 value;
        int bit;
        u32 vdev_param;
 
@@ -2276,7 +2545,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
                                     arvif->vdev_subtype, vif->addr);
        if (ret) {
-               ath10k_warn("WMI vdev create failed: ret %d\n", ret);
+               ath10k_warn("WMI vdev %i create failed: ret %d\n",
+                           arvif->vdev_id, ret);
                goto err;
        }
 
@@ -2287,7 +2557,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
                                        arvif->def_wep_key_idx);
        if (ret) {
-               ath10k_warn("Failed to set default keyid: %d\n", ret);
+               ath10k_warn("Failed to set vdev %i default keyid: %d\n",
+                           arvif->vdev_id, ret);
                goto err_vdev_delete;
        }
 
@@ -2296,23 +2567,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                                        ATH10K_HW_TXRX_NATIVE_WIFI);
        /* 10.X firmware does not support this VDEV parameter. Do not warn */
        if (ret && ret != -EOPNOTSUPP) {
-               ath10k_warn("Failed to set TX encap: %d\n", ret);
+               ath10k_warn("Failed to set vdev %i TX encap: %d\n",
+                           arvif->vdev_id, ret);
                goto err_vdev_delete;
        }
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
                if (ret) {
-                       ath10k_warn("Failed to create peer for AP: %d\n", ret);
+                       ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
+                                   arvif->vdev_id, ret);
                        goto err_vdev_delete;
                }
 
-               param_id = ar->wmi.pdev_param->sta_kickout_th;
-
-               /* Disable STA KICKOUT functionality in FW */
-               ret = ath10k_wmi_pdev_set_param(ar, param_id, 0);
-               if (ret)
-                       ath10k_warn("Failed to disable STA KICKOUT\n");
+               ret = ath10k_mac_set_kickout(arvif);
+               if (ret) {
+                       ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
+                                   arvif->vdev_id, ret);
+                       goto err_peer_delete;
+               }
        }
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
@@ -2321,7 +2594,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+                       ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
+                                   arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
 
@@ -2330,7 +2604,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+                       ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
+                                   arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
 
@@ -2339,7 +2614,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
                if (ret) {
-                       ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+                       ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
+                                   arvif->vdev_id, ret);
                        goto err_peer_delete;
                }
        }
@@ -2403,17 +2679,19 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
                if (ret)
-                       ath10k_warn("Failed to remove peer for AP: %d\n", ret);
+                       ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
 
                kfree(arvif->u.ap.noa_data);
        }
 
-       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
                   arvif->vdev_id);
 
        ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
        if (ret)
-               ath10k_warn("WMI vdev delete failed: %d\n", ret);
+               ath10k_warn("WMI vdev %i delete failed: %d\n",
+                           arvif->vdev_id, ret);
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
                ar->monitor_present = false;
@@ -2502,8 +2780,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                           arvif->vdev_id, arvif->beacon_interval);
 
                if (ret)
-                       ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
-                                   arvif->vdev_id);
+                       ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
+                                   arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_BEACON) {
@@ -2515,8 +2793,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
                                                WMI_BEACON_STAGGERED_MODE);
                if (ret)
-                       ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
-                                   arvif->vdev_id);
+                       ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
+                                   arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_BEACON_INFO) {
@@ -2530,8 +2808,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                arvif->dtim_period);
                if (ret)
-                       ath10k_warn("Failed to set dtim period for VDEV: %d\n",
-                                   arvif->vdev_id);
+                       ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
+                                   arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_SSID &&
@@ -2551,7 +2829,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                        ret = ath10k_peer_create(ar, arvif->vdev_id,
                                                 info->bssid);
                        if (ret)
-                               ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
+                               ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
                                            info->bssid, arvif->vdev_id, ret);
 
                        if (vif->type == NL80211_IFTYPE_STATION) {
@@ -2559,15 +2837,20 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                                 * this is never erased as we it for crypto key
                                 * clearing; this is FW requirement
                                 */
-                               memcpy(arvif->u.sta.bssid, info->bssid,
-                                      ETH_ALEN);
+                               memcpy(arvif->bssid, info->bssid, ETH_ALEN);
 
                                ath10k_dbg(ATH10K_DBG_MAC,
                                           "mac vdev %d start %pM\n",
                                           arvif->vdev_id, info->bssid);
 
-                               /* FIXME: check return value */
                                ret = ath10k_vdev_start(arvif);
+                               if (ret) {
+                                       ath10k_warn("failed to start vdev %i: %d\n",
+                                                   arvif->vdev_id, ret);
+                                       goto exit;
+                               }
+
+                               arvif->is_started = true;
                        }
 
                        /*
@@ -2576,7 +2859,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                         * IBSS in order to remove BSSID peer.
                         */
                        if (vif->type == NL80211_IFTYPE_ADHOC)
-                               memcpy(arvif->u.ibss.bssid, info->bssid,
+                               memcpy(arvif->bssid, info->bssid,
                                       ETH_ALEN);
                }
        }
@@ -2598,8 +2881,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                cts_prot);
                if (ret)
-                       ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
-                                   arvif->vdev_id);
+                       ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
+                                   arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2617,8 +2900,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                slottime);
                if (ret)
-                       ath10k_warn("Failed to set erp slot for VDEV: %d\n",
-                                   arvif->vdev_id);
+                       ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
+                                   arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2636,8 +2919,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                preamble);
                if (ret)
-                       ath10k_warn("Failed to set preamble for VDEV: %d\n",
-                                   arvif->vdev_id);
+                       ath10k_warn("Failed to set preamble for vdev %d: %i\n",
+                                   arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_ASSOC) {
@@ -2645,6 +2928,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                        ath10k_bss_assoc(hw, vif, info);
        }
 
+exit:
        mutex_unlock(&ar->conf_mutex);
 }
 
@@ -2767,8 +3051,8 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                        key->keyidx);
        if (ret)
-               ath10k_warn("failed to set group key as default key: %d\n",
-                           ret);
+               ath10k_warn("failed to set vdev %i group key as default key: %d\n",
+                           arvif->vdev_id, ret);
 }
 
 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -2828,7 +3112,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        ret = ath10k_install_key(arvif, key, cmd, peer_addr);
        if (ret) {
-               ath10k_warn("ath10k_install_key failed (%d)\n", ret);
+               ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
+                           arvif->vdev_id, peer_addr, ret);
                goto exit;
        }
 
@@ -2850,6 +3135,69 @@ exit:
        return ret;
 }
 
+static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+{
+       struct ath10k *ar;
+       struct ath10k_vif *arvif;
+       struct ath10k_sta *arsta;
+       struct ieee80211_sta *sta;
+       u32 changed, bw, nss, smps;
+       int err;
+
+       arsta = container_of(wk, struct ath10k_sta, update_wk);
+       sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+       arvif = arsta->arvif;
+       ar = arvif->ar;
+
+       spin_lock_bh(&ar->data_lock);
+
+       changed = arsta->changed;
+       arsta->changed = 0;
+
+       bw = arsta->bw;
+       nss = arsta->nss;
+       smps = arsta->smps;
+
+       spin_unlock_bh(&ar->data_lock);
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (changed & IEEE80211_RC_BW_CHANGED) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
+                          sta->addr, bw);
+
+               err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+                                               WMI_PEER_CHAN_WIDTH, bw);
+               if (err)
+                       ath10k_warn("failed to update STA %pM peer bw %d: %d\n",
+                                   sta->addr, bw, err);
+       }
+
+       if (changed & IEEE80211_RC_NSS_CHANGED) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+                          sta->addr, nss);
+
+               err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+                                               WMI_PEER_NSS, nss);
+               if (err)
+                       ath10k_warn("failed to update STA %pM nss %d: %d\n",
+                                   sta->addr, nss, err);
+       }
+
+       if (changed & IEEE80211_RC_SMPS_CHANGED) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+                          sta->addr, smps);
+
+               err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+                                               WMI_PEER_SMPS_STATE, smps);
+               if (err)
+                       ath10k_warn("failed to update STA %pM smps %d: %d\n",
+                                   sta->addr, smps, err);
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
 static int ath10k_sta_state(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta,
@@ -2858,9 +3206,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
        int max_num_peers;
        int ret = 0;
 
+       if (old_state == IEEE80211_STA_NOTEXIST &&
+           new_state == IEEE80211_STA_NONE) {
+               memset(arsta, 0, sizeof(*arsta));
+               arsta->arvif = arvif;
+               INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+       }
+
+       /* cancel must be done outside the mutex to avoid deadlock */
+       if ((old_state == IEEE80211_STA_NONE &&
+            new_state == IEEE80211_STA_NOTEXIST))
+               cancel_work_sync(&arsta->update_wk);
+
        mutex_lock(&ar->conf_mutex);
 
        if (old_state == IEEE80211_STA_NOTEXIST &&
@@ -2899,8 +3260,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                           arvif->vdev_id, sta->addr);
                ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
                if (ret)
-                       ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
-                                   sta->addr, arvif->vdev_id);
+                       ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
+                                   sta->addr, arvif->vdev_id, ret);
 
                if (vif->type == NL80211_IFTYPE_STATION)
                        ath10k_bss_disassoc(hw, vif);
@@ -2916,8 +3277,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 
                ret = ath10k_station_assoc(ar, arvif, sta);
                if (ret)
-                       ath10k_warn("Failed to associate station: %pM\n",
-                                   sta->addr);
+                       ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
+                                   sta->addr, arvif->vdev_id, ret);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH &&
                   (vif->type == NL80211_IFTYPE_AP ||
@@ -2930,8 +3291,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
 
                ret = ath10k_station_disassoc(ar, arvif, sta);
                if (ret)
-                       ath10k_warn("Failed to disassociate station: %pM\n",
-                                   sta->addr);
+                       ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
+                                   sta->addr, arvif->vdev_id, ret);
        }
 exit:
        mutex_unlock(&ar->conf_mutex);
@@ -3212,7 +3573,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
                }), ATH10K_FLUSH_TIMEOUT_HZ);
 
        if (ret <= 0 || skip)
-               ath10k_warn("tx not flushed\n");
+               ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
+                           skip, ar->state, ret);
 
 skip:
        mutex_unlock(&ar->conf_mutex);
@@ -3234,23 +3596,14 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
        int ret;
 
-       ar->is_target_paused = false;
+       mutex_lock(&ar->conf_mutex);
 
-       ret = ath10k_wmi_pdev_suspend_target(ar);
+       ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
        if (ret) {
-               ath10k_warn("could not suspend target (%d)\n", ret);
-               return 1;
-       }
-
-       ret = wait_event_interruptible_timeout(ar->event_queue,
-                                              ar->is_target_paused == true,
-                                              1 * HZ);
-       if (ret < 0) {
-               ath10k_warn("suspend interrupted (%d)\n", ret);
-               goto resume;
-       } else if (ret == 0) {
-               ath10k_warn("suspend timed out - target pause event never came\n");
-               goto resume;
+               if (ret == -ETIMEDOUT)
+                       goto resume;
+               ret = 1;
+               goto exit;
        }
 
        ret = ath10k_hif_suspend(ar);
@@ -3259,12 +3612,17 @@ static int ath10k_suspend(struct ieee80211_hw *hw,
                goto resume;
        }
 
-       return 0;
+       ret = 0;
+       goto exit;
 resume:
        ret = ath10k_wmi_pdev_resume_target(ar);
        if (ret)
                ath10k_warn("could not resume target (%d)\n", ret);
-       return 1;
+
+       ret = 1;
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
 }
 
 static int ath10k_resume(struct ieee80211_hw *hw)
@@ -3272,19 +3630,26 @@ static int ath10k_resume(struct ieee80211_hw *hw)
        struct ath10k *ar = hw->priv;
        int ret;
 
+       mutex_lock(&ar->conf_mutex);
+
        ret = ath10k_hif_resume(ar);
        if (ret) {
                ath10k_warn("could not resume hif (%d)\n", ret);
-               return 1;
+               ret = 1;
+               goto exit;
        }
 
        ret = ath10k_wmi_pdev_resume_target(ar);
        if (ret) {
                ath10k_warn("could not resume target (%d)\n", ret);
-               return 1;
+               ret = 1;
+               goto exit;
        }
 
-       return 0;
+       ret = 0;
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
 }
 #endif
 
@@ -3575,7 +3940,8 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
 
 static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
                                       u8 fixed_rate,
-                                      u8 fixed_nss)
+                                      u8 fixed_nss,
+                                      u8 force_sgi)
 {
        struct ath10k *ar = arvif->ar;
        u32 vdev_param;
@@ -3584,12 +3950,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
        mutex_lock(&ar->conf_mutex);
 
        if (arvif->fixed_rate == fixed_rate &&
-           arvif->fixed_nss == fixed_nss)
+           arvif->fixed_nss == fixed_nss &&
+           arvif->force_sgi == force_sgi)
                goto exit;
 
        if (fixed_rate == WMI_FIXED_RATE_NONE)
                ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
 
+       if (force_sgi)
+               ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
+
        vdev_param = ar->wmi.vdev_param->fixed_rate;
        ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
                                        vdev_param, fixed_rate);
@@ -3615,6 +3985,19 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
 
        arvif->fixed_nss = fixed_nss;
 
+       vdev_param = ar->wmi.vdev_param->sgi;
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+                                       force_sgi);
+
+       if (ret) {
+               ath10k_warn("Could not set sgi param %d: %d\n",
+                           force_sgi, ret);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       arvif->force_sgi = force_sgi;
+
 exit:
        mutex_unlock(&ar->conf_mutex);
        return ret;
@@ -3629,6 +4012,11 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
        enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
        u8 fixed_rate = WMI_FIXED_RATE_NONE;
        u8 fixed_nss = ar->num_rf_chains;
+       u8 force_sgi;
+
+       force_sgi = mask->control[band].gi;
+       if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+               return -EINVAL;
 
        if (!ath10k_default_bitrate_mask(ar, band, mask)) {
                if (!ath10k_get_fixed_rate_nss(mask, band,
@@ -3637,7 +4025,113 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
                        return -EINVAL;
        }
 
-       return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
+       if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
+               ath10k_warn("Could not force SGI usage for default rate settings\n");
+               return -EINVAL;
+       }
+
+       return ath10k_set_fixed_rate_param(arvif, fixed_rate,
+                                          fixed_nss, force_sgi);
+}
+
+static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
+                                        struct ieee80211_vif *vif,
+                                        struct cfg80211_chan_def *chandef)
+{
+       /* there's no need to do anything here. vif->csa_active is enough */
+       return;
+}
+
+static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_sta *sta,
+                                u32 changed)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       u32 bw, smps;
+
+       spin_lock_bh(&ar->data_lock);
+
+       ath10k_dbg(ATH10K_DBG_MAC,
+                  "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+                  sta->addr, changed, sta->bandwidth, sta->rx_nss,
+                  sta->smps_mode);
+
+       if (changed & IEEE80211_RC_BW_CHANGED) {
+               bw = WMI_PEER_CHWIDTH_20MHZ;
+
+               switch (sta->bandwidth) {
+               case IEEE80211_STA_RX_BW_20:
+                       bw = WMI_PEER_CHWIDTH_20MHZ;
+                       break;
+               case IEEE80211_STA_RX_BW_40:
+                       bw = WMI_PEER_CHWIDTH_40MHZ;
+                       break;
+               case IEEE80211_STA_RX_BW_80:
+                       bw = WMI_PEER_CHWIDTH_80MHZ;
+                       break;
+               case IEEE80211_STA_RX_BW_160:
+                       ath10k_warn("mac sta rc update for %pM: invalid bw %d\n",
+                                   sta->addr, sta->bandwidth);
+                       bw = WMI_PEER_CHWIDTH_20MHZ;
+                       break;
+               }
+
+               arsta->bw = bw;
+       }
+
+       if (changed & IEEE80211_RC_NSS_CHANGED)
+               arsta->nss = sta->rx_nss;
+
+       if (changed & IEEE80211_RC_SMPS_CHANGED) {
+               smps = WMI_PEER_SMPS_PS_NONE;
+
+               switch (sta->smps_mode) {
+               case IEEE80211_SMPS_AUTOMATIC:
+               case IEEE80211_SMPS_OFF:
+                       smps = WMI_PEER_SMPS_PS_NONE;
+                       break;
+               case IEEE80211_SMPS_STATIC:
+                       smps = WMI_PEER_SMPS_STATIC;
+                       break;
+               case IEEE80211_SMPS_DYNAMIC:
+                       smps = WMI_PEER_SMPS_DYNAMIC;
+                       break;
+               case IEEE80211_SMPS_NUM_MODES:
+                       ath10k_warn("mac sta rc update for %pM: invalid smps: %d\n",
+                                   sta->addr, sta->smps_mode);
+                       smps = WMI_PEER_SMPS_PS_NONE;
+                       break;
+               }
+
+               arsta->smps = smps;
+       }
+
+       if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+               /* FIXME: Not implemented. Probably the only way to do it would
+                * be to re-assoc the peer. */
+               changed &= ~IEEE80211_RC_SUPP_RATES_CHANGED;
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac sta rc update for %pM: changing supported rates not implemented\n",
+                          sta->addr);
+       }
+
+       arsta->changed |= changed;
+
+       spin_unlock_bh(&ar->data_lock);
+
+       ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+       /*
+        * FIXME: Return 0 for time being. Need to figure out whether FW
+        * has the API to fetch 64-bit local TSF
+        */
+
+       return 0;
 }
 
 static const struct ieee80211_ops ath10k_ops = {
@@ -3663,6 +4157,9 @@ static const struct ieee80211_ops ath10k_ops = {
        .restart_complete               = ath10k_restart_complete,
        .get_survey                     = ath10k_get_survey,
        .set_bitrate_mask               = ath10k_set_bitrate_mask,
+       .channel_switch_beacon          = ath10k_channel_switch_beacon,
+       .sta_rc_update                  = ath10k_sta_rc_update,
+       .get_tsf                        = ath10k_get_tsf,
 #ifdef CONFIG_PM
        .suspend                        = ath10k_suspend,
        .resume                         = ath10k_resume,
@@ -3939,7 +4436,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
                                                   ath10k_get_arvif_iter,
                                                   &arvif_iter);
        if (!arvif_iter.arvif) {
-               ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
+               ath10k_warn("No VIF found for vdev %d\n", vdev_id);
                return NULL;
        }
 
@@ -4020,7 +4517,8 @@ int ath10k_mac_register(struct ath10k *ar)
                        IEEE80211_HW_HAS_RATE_CONTROL |
                        IEEE80211_HW_SUPPORTS_STATIC_SMPS |
                        IEEE80211_HW_WANT_MONITOR_VIF |
-                       IEEE80211_HW_AP_LINK_PS;
+                       IEEE80211_HW_AP_LINK_PS |
+                       IEEE80211_HW_SPECTRUM_MGMT;
 
        /* MSDU can have HTT TX fragment pushed in front. The additional 4
         * bytes is used for padding/alignment if necessary. */
@@ -4038,10 +4536,12 @@ int ath10k_mac_register(struct ath10k *ar)
        ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
 
        ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+       ar->hw->sta_data_size = sizeof(struct ath10k_sta);
 
        ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
 
        ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+       ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        ar->hw->wiphy->max_remain_on_channel_duration = 5000;
 
        ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
@@ -4076,7 +4576,7 @@ int ath10k_mac_register(struct ath10k *ar)
        ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
                            ath10k_reg_notifier);
        if (ret) {
-               ath10k_err("Regulatory initialization failed\n");
+               ath10k_err("Regulatory initialization failed: %i\n", ret);
                goto err_free;
        }
 
index 29fd197d1fd8b3b7e8da66b212d0452c4e3ec3de..9d242d801d9d354f772b74257826427f0a598180 100644 (file)
@@ -58,13 +58,12 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
                                       u32 *data);
 
-static void ath10k_pci_process_ce(struct ath10k *ar);
 static int ath10k_pci_post_rx(struct ath10k *ar);
 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
                                             int num);
 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
-static void ath10k_pci_stop_ce(struct ath10k *ar);
-static int ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_cold_reset(struct ath10k *ar);
+static int ath10k_pci_warm_reset(struct ath10k *ar);
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
 static int ath10k_pci_init_irq(struct ath10k *ar);
 static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -73,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
                               struct ath10k_ce_pipe *rx_pipe,
                               struct bmi_xfer *xfer);
-static void ath10k_pci_cleanup_ce(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
        /* CE0: host->target HTC control and raw streams */
@@ -678,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
        }
 }
 
-/*
- * FIXME: Handle OOM properly.
- */
-static inline
-struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
-{
-       struct ath10k_pci_compl *compl = NULL;
-
-       spin_lock_bh(&pipe_info->pipe_lock);
-       if (list_empty(&pipe_info->compl_free)) {
-               ath10k_warn("Completion buffers are full\n");
-               goto exit;
-       }
-       compl = list_first_entry(&pipe_info->compl_free,
-                                struct ath10k_pci_compl, list);
-       list_del(&compl->list);
-exit:
-       spin_unlock_bh(&pipe_info->pipe_lock);
-       return compl;
-}
-
 /* Called by lower (CE) layer when a send to Target completes. */
 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
-       struct ath10k_pci_compl *compl;
+       struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
        void *transfer_context;
        u32 ce_data;
        unsigned int nbytes;
@@ -714,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
        while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
                                             &ce_data, &nbytes,
                                             &transfer_id) == 0) {
-               compl = get_free_compl(pipe_info);
-               if (!compl)
-                       break;
-
-               compl->state = ATH10K_PCI_COMPL_SEND;
-               compl->ce_state = ce_state;
-               compl->pipe_info = pipe_info;
-               compl->skb = transfer_context;
-               compl->nbytes = nbytes;
-               compl->transfer_id = transfer_id;
-               compl->flags = 0;
+               /* no need to call tx completion for NULL pointers */
+               if (transfer_context == NULL)
+                       continue;
 
-               /*
-                * Add the completion to the processing queue.
-                */
-               spin_lock_bh(&ar_pci->compl_lock);
-               list_add_tail(&compl->list, &ar_pci->compl_process);
-               spin_unlock_bh(&ar_pci->compl_lock);
+               cb->tx_completion(ar, transfer_context, transfer_id);
        }
-
-       ath10k_pci_process_ce(ar);
 }
 
 /* Called by lower (CE) layer when data is received from the Target. */
@@ -743,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
-       struct ath10k_pci_compl *compl;
+       struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
        struct sk_buff *skb;
        void *transfer_context;
        u32 ce_data;
-       unsigned int nbytes;
+       unsigned int nbytes, max_nbytes;
        unsigned int transfer_id;
        unsigned int flags;
+       int err;
 
        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
                                             &ce_data, &nbytes, &transfer_id,
                                             &flags) == 0) {
-               compl = get_free_compl(pipe_info);
-               if (!compl)
-                       break;
-
-               compl->state = ATH10K_PCI_COMPL_RECV;
-               compl->ce_state = ce_state;
-               compl->pipe_info = pipe_info;
-               compl->skb = transfer_context;
-               compl->nbytes = nbytes;
-               compl->transfer_id = transfer_id;
-               compl->flags = flags;
+               err = ath10k_pci_post_rx_pipe(pipe_info, 1);
+               if (unlikely(err)) {
+                       /* FIXME: retry */
+                       ath10k_warn("failed to replenish CE rx ring %d: %d\n",
+                                   pipe_info->pipe_num, err);
+               }
 
                skb = transfer_context;
+               max_nbytes = skb->len + skb_tailroom(skb);
                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
-                                skb->len + skb_tailroom(skb),
-                                DMA_FROM_DEVICE);
-               /*
-                * Add the completion to the processing queue.
-                */
-               spin_lock_bh(&ar_pci->compl_lock);
-               list_add_tail(&compl->list, &ar_pci->compl_process);
-               spin_unlock_bh(&ar_pci->compl_lock);
-       }
+                                max_nbytes, DMA_FROM_DEVICE);
+
+               if (unlikely(max_nbytes < nbytes)) {
+                       ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+                                   nbytes, max_nbytes);
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
 
-       ath10k_pci_process_ce(ar);
+               skb_put(skb, nbytes);
+               cb->rx_completion(ar, skb, pipe_info->pipe_num);
+       }
 }
 
-/* Send the first nbytes bytes of the buffer */
-static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
-                                   unsigned int transfer_id,
-                                   unsigned int bytes, struct sk_buff *nbuf)
+static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+                               struct ath10k_hif_sg_item *items, int n_items)
 {
-       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
-       struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
-       unsigned int len;
-       u32 flags = 0;
-       int ret;
+       struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
+       struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
+       struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
+       unsigned int nentries_mask = src_ring->nentries_mask;
+       unsigned int sw_index = src_ring->sw_index;
+       unsigned int write_index = src_ring->write_index;
+       int err, i;
 
-       len = min(bytes, nbuf->len);
-       bytes -= len;
+       spin_lock_bh(&ar_pci->ce_lock);
 
-       if (len & 3)
-               ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
+       if (unlikely(CE_RING_DELTA(nentries_mask,
+                                  write_index, sw_index - 1) < n_items)) {
+               err = -ENOBUFS;
+               goto unlock;
+       }
 
-       ath10k_dbg(ATH10K_DBG_PCI,
-                  "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
-                  nbuf->data, (unsigned long long) skb_cb->paddr,
-                  nbuf->len, len);
-       ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
-                       "ath10k tx: data: ",
-                       nbuf->data, nbuf->len);
-
-       ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
-                            flags);
-       if (ret)
-               ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
+       for (i = 0; i < n_items - 1; i++) {
+               ath10k_dbg(ATH10K_DBG_PCI,
+                          "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+                          i, items[i].paddr, items[i].len, n_items);
+               ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+                               items[i].vaddr, items[i].len);
 
-       return ret;
+               err = ath10k_ce_send_nolock(ce_pipe,
+                                           items[i].transfer_context,
+                                           items[i].paddr,
+                                           items[i].len,
+                                           items[i].transfer_id,
+                                           CE_SEND_FLAG_GATHER);
+               if (err)
+                       goto unlock;
+       }
+
+       /* `i` is equal to `n_items -1` after for() */
+
+       ath10k_dbg(ATH10K_DBG_PCI,
+                  "pci tx item %d paddr 0x%08x len %d n_items %d\n",
+                  i, items[i].paddr, items[i].len, n_items);
+       ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+                       items[i].vaddr, items[i].len);
+
+       err = ath10k_ce_send_nolock(ce_pipe,
+                                   items[i].transfer_context,
+                                   items[i].paddr,
+                                   items[i].len,
+                                   items[i].transfer_id,
+                                   0);
+       if (err)
+               goto unlock;
+
+       err = 0;
+unlock:
+       spin_unlock_bh(&ar_pci->ce_lock);
+       return err;
 }
 
 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
@@ -833,9 +817,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
        ath10k_err("firmware crashed!\n");
        ath10k_err("hardware name %s version 0x%x\n",
                   ar->hw_params.name, ar->target_version);
-       ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
-                  ar->fw_version_minor, ar->fw_version_release,
-                  ar->fw_version_build);
+       ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
 
        host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
        ret = ath10k_pci_diag_read_mem(ar, host_addr,
@@ -904,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
               sizeof(ar_pci->msg_callbacks_current));
 }
 
-static int ath10k_pci_alloc_compl(struct ath10k *ar)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       const struct ce_attr *attr;
-       struct ath10k_pci_pipe *pipe_info;
-       struct ath10k_pci_compl *compl;
-       int i, pipe_num, completions;
-
-       spin_lock_init(&ar_pci->compl_lock);
-       INIT_LIST_HEAD(&ar_pci->compl_process);
-
-       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
-               pipe_info = &ar_pci->pipe_info[pipe_num];
-
-               spin_lock_init(&pipe_info->pipe_lock);
-               INIT_LIST_HEAD(&pipe_info->compl_free);
-
-               /* Handle Diagnostic CE specially */
-               if (pipe_info->ce_hdl == ar_pci->ce_diag)
-                       continue;
-
-               attr = &host_ce_config_wlan[pipe_num];
-               completions = 0;
-
-               if (attr->src_nentries)
-                       completions += attr->src_nentries;
-
-               if (attr->dest_nentries)
-                       completions += attr->dest_nentries;
-
-               for (i = 0; i < completions; i++) {
-                       compl = kmalloc(sizeof(*compl), GFP_KERNEL);
-                       if (!compl) {
-                               ath10k_warn("No memory for completion state\n");
-                               ath10k_pci_cleanup_ce(ar);
-                               return -ENOMEM;
-                       }
-
-                       compl->state = ATH10K_PCI_COMPL_FREE;
-                       list_add_tail(&compl->list, &pipe_info->compl_free);
-               }
-       }
-
-       return 0;
-}
-
 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -994,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
                tasklet_kill(&ar_pci->pipe_info[i].intr);
 }
 
-static void ath10k_pci_stop_ce(struct ath10k *ar)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_pci_compl *compl;
-       struct sk_buff *skb;
-
-       /* Mark pending completions as aborted, so that upper layers free up
-        * their associated resources */
-       spin_lock_bh(&ar_pci->compl_lock);
-       list_for_each_entry(compl, &ar_pci->compl_process, list) {
-               skb = compl->skb;
-               ATH10K_SKB_CB(skb)->is_aborted = true;
-       }
-       spin_unlock_bh(&ar_pci->compl_lock);
-}
-
-static void ath10k_pci_cleanup_ce(struct ath10k *ar)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ath10k_pci_compl *compl, *tmp;
-       struct ath10k_pci_pipe *pipe_info;
-       struct sk_buff *netbuf;
-       int pipe_num;
-
-       /* Free pending completions. */
-       spin_lock_bh(&ar_pci->compl_lock);
-       if (!list_empty(&ar_pci->compl_process))
-               ath10k_warn("pending completions still present! possible memory leaks.\n");
-
-       list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
-               list_del(&compl->list);
-               netbuf = compl->skb;
-               dev_kfree_skb_any(netbuf);
-               kfree(compl);
-       }
-       spin_unlock_bh(&ar_pci->compl_lock);
-
-       /* Free unused completions for each pipe. */
-       for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
-               pipe_info = &ar_pci->pipe_info[pipe_num];
-
-               spin_lock_bh(&pipe_info->pipe_lock);
-               list_for_each_entry_safe(compl, tmp,
-                                        &pipe_info->compl_free, list) {
-                       list_del(&compl->list);
-                       kfree(compl);
-               }
-               spin_unlock_bh(&pipe_info->pipe_lock);
-       }
-}
-
-static void ath10k_pci_process_ce(struct ath10k *ar)
-{
-       struct ath10k_pci *ar_pci = ar->hif.priv;
-       struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
-       struct ath10k_pci_compl *compl;
-       struct sk_buff *skb;
-       unsigned int nbytes;
-       int ret, send_done = 0;
-
-       /* Upper layers aren't ready to handle tx/rx completions in parallel so
-        * we must serialize all completion processing. */
-
-       spin_lock_bh(&ar_pci->compl_lock);
-       if (ar_pci->compl_processing) {
-               spin_unlock_bh(&ar_pci->compl_lock);
-               return;
-       }
-       ar_pci->compl_processing = true;
-       spin_unlock_bh(&ar_pci->compl_lock);
-
-       for (;;) {
-               spin_lock_bh(&ar_pci->compl_lock);
-               if (list_empty(&ar_pci->compl_process)) {
-                       spin_unlock_bh(&ar_pci->compl_lock);
-                       break;
-               }
-               compl = list_first_entry(&ar_pci->compl_process,
-                                        struct ath10k_pci_compl, list);
-               list_del(&compl->list);
-               spin_unlock_bh(&ar_pci->compl_lock);
-
-               switch (compl->state) {
-               case ATH10K_PCI_COMPL_SEND:
-                       cb->tx_completion(ar,
-                                         compl->skb,
-                                         compl->transfer_id);
-                       send_done = 1;
-                       break;
-               case ATH10K_PCI_COMPL_RECV:
-                       ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
-                       if (ret) {
-                               ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
-                                           compl->pipe_info->pipe_num, ret);
-                               break;
-                       }
-
-                       skb = compl->skb;
-                       nbytes = compl->nbytes;
-
-                       ath10k_dbg(ATH10K_DBG_PCI,
-                                  "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
-                                  skb, nbytes);
-                       ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
-                                       "ath10k rx: ", skb->data, nbytes);
-
-                       if (skb->len + skb_tailroom(skb) >= nbytes) {
-                               skb_trim(skb, 0);
-                               skb_put(skb, nbytes);
-                               cb->rx_completion(ar, skb,
-                                                 compl->pipe_info->pipe_num);
-                       } else {
-                               ath10k_warn("rxed more than expected (nbytes %d, max %d)",
-                                           nbytes,
-                                           skb->len + skb_tailroom(skb));
-                       }
-                       break;
-               case ATH10K_PCI_COMPL_FREE:
-                       ath10k_warn("free completion cannot be processed\n");
-                       break;
-               default:
-                       ath10k_warn("invalid completion state (%d)\n",
-                                   compl->state);
-                       break;
-               }
-
-               compl->state = ATH10K_PCI_COMPL_FREE;
-
-               /*
-                * Add completion back to the pipe's free list.
-                */
-               spin_lock_bh(&compl->pipe_info->pipe_lock);
-               list_add_tail(&compl->list, &compl->pipe_info->compl_free);
-               spin_unlock_bh(&compl->pipe_info->pipe_lock);
-       }
-
-       spin_lock_bh(&ar_pci->compl_lock);
-       ar_pci->compl_processing = false;
-       spin_unlock_bh(&ar_pci->compl_lock);
-}
-
 /* TODO - temporary mapping while we have too few CE's */
 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
                                              u16 service_id, u8 *ul_pipe,
@@ -1306,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
        ath10k_pci_free_early_irq(ar);
        ath10k_pci_kill_tasklet(ar);
 
-       ret = ath10k_pci_alloc_compl(ar);
-       if (ret) {
-               ath10k_warn("failed to allocate CE completions: %d\n", ret);
-               goto err_early_irq;
-       }
-
        ret = ath10k_pci_request_irq(ar);
        if (ret) {
                ath10k_warn("failed to post RX buffers for all pipes: %d\n",
                            ret);
-               goto err_free_compl;
+               goto err_early_irq;
        }
 
        ret = ath10k_pci_setup_ce_irq(ar);
@@ -1340,10 +1129,6 @@ err_stop:
        ath10k_ce_disable_interrupts(ar);
        ath10k_pci_free_irq(ar);
        ath10k_pci_kill_tasklet(ar);
-       ath10k_pci_stop_ce(ar);
-       ath10k_pci_process_ce(ar);
-err_free_compl:
-       ath10k_pci_cleanup_ce(ar);
 err_early_irq:
        /* Though there should be no interrupts (device was reset)
         * power_down() expects the early IRQ to be installed as per the
@@ -1414,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 
        while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
                                          &ce_data, &nbytes, &id) == 0) {
-               /*
-                * Indicate the completion to higer layer to free
-                * the buffer
-                */
-
-               if (!netbuf) {
-                       ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
-                                   ce_hdl->id);
+               /* no need to call tx completion for NULL pointers */
+               if (!netbuf)
                        continue;
-               }
 
-               ATH10K_SKB_CB(netbuf)->is_aborted = true;
                ar_pci->msg_callbacks_current.tx_completion(ar,
                                                            netbuf,
                                                            id);
@@ -1483,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
 
        ath10k_pci_free_irq(ar);
        ath10k_pci_kill_tasklet(ar);
-       ath10k_pci_stop_ce(ar);
 
        ret = ath10k_pci_request_early_irq(ar);
        if (ret)
@@ -1493,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
         * not DMA nor interrupt. We process the leftovers and then free
         * everything else up. */
 
-       ath10k_pci_process_ce(ar);
-       ath10k_pci_cleanup_ce(ar);
        ath10k_pci_buffer_cleanup(ar);
 
        /* Make the sure the device won't access any structures on the host by
@@ -1502,7 +1276,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
         * configuration during init. If ringbuffers are freed and the device
         * were to access them this could lead to memory corruption on the
         * host. */
-       ath10k_pci_device_reset(ar);
+       ath10k_pci_warm_reset(ar);
 
        ar_pci->started = 0;
 }
@@ -1993,7 +1767,94 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
        ath10k_pci_sleep(ar);
 }
 
-static int ath10k_pci_hif_power_up(struct ath10k *ar)
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret = 0;
+       u32 val;
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
+
+       ret = ath10k_do_pci_wake(ar);
+       if (ret) {
+               ath10k_err("failed to wake up target: %d\n", ret);
+               return ret;
+       }
+
+       /* debug */
+       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                               PCIE_INTR_CAUSE_ADDRESS);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+
+       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                               CPU_INTR_ADDRESS);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+                  val);
+
+       /* disable pending irqs */
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                          PCIE_INTR_ENABLE_ADDRESS, 0);
+
+       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                          PCIE_INTR_CLR_ADDRESS, ~0);
+
+       msleep(100);
+
+       /* clear fw indicator */
+       ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
+
+       /* clear target LF timer interrupts */
+       val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+                               SOC_LF_TIMER_CONTROL0_ADDRESS);
+       ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
+                          SOC_LF_TIMER_CONTROL0_ADDRESS,
+                          val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+
+       /* reset CE */
+       val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+                               SOC_RESET_CONTROL_ADDRESS);
+       ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+                          val | SOC_RESET_CONTROL_CE_RST_MASK);
+       val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+                               SOC_RESET_CONTROL_ADDRESS);
+       msleep(10);
+
+       /* unreset CE */
+       ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+                          val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+       val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+                               SOC_RESET_CONTROL_ADDRESS);
+       msleep(10);
+
+       /* debug */
+       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                               PCIE_INTR_CAUSE_ADDRESS);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+
+       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                               CPU_INTR_ADDRESS);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+                  val);
+
+       /* CPU warm reset */
+       val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+                               SOC_RESET_CONTROL_ADDRESS);
+       ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+                          val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+
+       val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+                               SOC_RESET_CONTROL_ADDRESS);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
+
+       msleep(100);
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
+
+       ath10k_do_pci_sleep(ar);
+       return ret;
+}
+
+static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        const char *irq_mode;
@@ -2009,7 +1870,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
         * is in an unexpected state. We try to catch that here in order to
         * reset the Target and retry the probe.
         */
-       ret = ath10k_pci_device_reset(ar);
+       if (cold_reset)
+               ret = ath10k_pci_cold_reset(ar);
+       else
+               ret = ath10k_pci_warm_reset(ar);
+
        if (ret) {
                ath10k_err("failed to reset target: %d\n", ret);
                goto err;
@@ -2079,7 +1944,7 @@ err_deinit_irq:
        ath10k_pci_deinit_irq(ar);
 err_ce:
        ath10k_pci_ce_deinit(ar);
-       ath10k_pci_device_reset(ar);
+       ath10k_pci_warm_reset(ar);
 err_ps:
        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
                ath10k_do_pci_sleep(ar);
@@ -2087,6 +1952,34 @@ err:
        return ret;
 }
 
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+       int ret;
+
+       /*
+        * Hardware CUS232 version 2 has some issues with cold reset and the
+        * preferred (and safer) way to perform a device reset is through a
+        * warm reset.
+        *
+        * Warm reset doesn't always work though (notably after a firmware
+        * crash) so fall back to cold reset if necessary.
+        */
+       ret = __ath10k_pci_hif_power_up(ar, false);
+       if (ret) {
+               ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
+                           ret);
+
+               ret = __ath10k_pci_hif_power_up(ar, true);
+               if (ret) {
+                       ath10k_err("failed to power up target using cold reset too (%d)\n",
+                                  ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 static void ath10k_pci_hif_power_down(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2094,7 +1987,7 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
        ath10k_pci_free_early_irq(ar);
        ath10k_pci_kill_tasklet(ar);
        ath10k_pci_deinit_irq(ar);
-       ath10k_pci_device_reset(ar);
+       ath10k_pci_warm_reset(ar);
 
        ath10k_pci_ce_deinit(ar);
        if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
@@ -2151,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
 #endif
 
 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
-       .send_head              = ath10k_pci_hif_send_head,
+       .tx_sg                  = ath10k_pci_hif_tx_sg,
        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
        .start                  = ath10k_pci_hif_start,
        .stop                   = ath10k_pci_hif_stop,
@@ -2411,11 +2304,10 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
        /* Try MSI-X */
        if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
                ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
-               ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
-               if (ret == 0)
-                       return 0;
+               ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
+                                                        ar_pci->num_msi_intrs);
                if (ret > 0)
-                       pci_disable_msi(ar_pci->pdev);
+                       return 0;
 
                /* fall-through */
        }
@@ -2482,6 +2374,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
        case MSI_NUM_REQUEST:
                pci_disable_msi(ar_pci->pdev);
                return 0;
+       default:
+               pci_disable_msi(ar_pci->pdev);
        }
 
        ath10k_warn("unknown irq configuration upon deinit\n");
@@ -2523,7 +2417,7 @@ out:
        return ret;
 }
 
-static int ath10k_pci_device_reset(struct ath10k *ar)
+static int ath10k_pci_cold_reset(struct ath10k *ar)
 {
        int i, ret;
        u32 val;
index a4f32038c440b7d36009496cde6b5f26ea95c23b..b43fdb4f731973544e1a55f700c779e4191d76bb 100644 (file)
@@ -43,23 +43,6 @@ struct bmi_xfer {
        u32 resp_len;
 };
 
-enum ath10k_pci_compl_state {
-       ATH10K_PCI_COMPL_FREE = 0,
-       ATH10K_PCI_COMPL_SEND,
-       ATH10K_PCI_COMPL_RECV,
-};
-
-struct ath10k_pci_compl {
-       struct list_head list;
-       enum ath10k_pci_compl_state state;
-       struct ath10k_ce_pipe *ce_state;
-       struct ath10k_pci_pipe *pipe_info;
-       struct sk_buff *skb;
-       unsigned int nbytes;
-       unsigned int transfer_id;
-       unsigned int flags;
-};
-
 /*
  * PCI-specific Target state
  *
@@ -175,9 +158,6 @@ struct ath10k_pci_pipe {
        /* protects compl_free and num_send_allowed */
        spinlock_t pipe_lock;
 
-       /* List of free CE completion slots */
-       struct list_head compl_free;
-
        struct ath10k_pci *ar_pci;
        struct tasklet_struct intr;
 };
@@ -205,14 +185,6 @@ struct ath10k_pci {
        atomic_t keep_awake_count;
        bool verified_awake;
 
-       /* List of CE completions to be processed */
-       struct list_head compl_process;
-
-       /* protects compl_processing and compl_process */
-       spinlock_t compl_lock;
-
-       bool compl_processing;
-
        struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
        struct ath10k_hif_cb msg_callbacks_current;
index 74f45fa6f428de9d297c2af388ab1acb15247b60..0541dd939ce9d8be7dc7e195ef952526abab0255 100644 (file)
@@ -51,7 +51,8 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        struct ieee80211_tx_info *info;
        struct ath10k_skb_cb *skb_cb;
        struct sk_buff *msdu;
-       int ret;
+
+       lockdep_assert_held(&htt->tx_lock);
 
        ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
                   tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
@@ -65,12 +66,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        msdu = htt->pending_tx[tx_done->msdu_id];
        skb_cb = ATH10K_SKB_CB(msdu);
 
-       ret = ath10k_skb_unmap(dev, msdu);
-       if (ret)
-               ath10k_warn("data skb unmap failed (%d)\n", ret);
+       dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 
-       if (skb_cb->htt.frag_len)
-               skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+       if (skb_cb->htt.txbuf)
+               dma_pool_free(htt->tx_pool,
+                             skb_cb->htt.txbuf,
+                             skb_cb->htt.txbuf_paddr);
 
        ath10k_report_offchan_tx(htt->ar, msdu);
 
@@ -92,13 +93,11 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        /* we do not own the msdu anymore */
 
 exit:
-       spin_lock_bh(&htt->tx_lock);
        htt->pending_tx[tx_done->msdu_id] = NULL;
        ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
        __ath10k_htt_tx_dec_pending(htt);
        if (htt->num_pending_tx == 0)
                wake_up(&htt->empty_tx_wq);
-       spin_unlock_bh(&htt->tx_lock);
 }
 
 static const u8 rx_legacy_rate_idx[] = {
@@ -204,7 +203,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
                        break;
                /* 80MHZ */
                case 2:
-                       status->flag |= RX_FLAG_80MHZ;
+                       status->vht_flag |= RX_VHT_FLAG_80MHZ;
                }
 
                status->flag |= RX_FLAG_VHT;
@@ -258,20 +257,26 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
        status->band = ch->band;
        status->freq = ch->center_freq;
 
+       if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
+               /* TSF available only in 32-bit */
+               status->mactime = info->tsf & 0xffffffff;
+               status->flag |= RX_FLAG_MACTIME_END;
+       }
+
        ath10k_dbg(ATH10K_DBG_DATA,
-                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
+                  "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
                   info->skb,
                   info->skb->len,
                   status->flag == 0 ? "legacy" : "",
                   status->flag & RX_FLAG_HT ? "ht" : "",
                   status->flag & RX_FLAG_VHT ? "vht" : "",
                   status->flag & RX_FLAG_40MHZ ? "40" : "",
-                  status->flag & RX_FLAG_80MHZ ? "80" : "",
+                  status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
                   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
                   status->rate_idx,
                   status->vht_nss,
                   status->freq,
-                  status->band);
+                  status->band, status->flag, info->fcs_err);
        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
                        info->skb->data, info->skb->len);
 
@@ -378,7 +383,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
        spin_lock_bh(&ar->data_lock);
        peer = ath10k_peer_find_by_id(ar, ev->peer_id);
        if (!peer) {
-               ath10k_warn("unknown peer id %d\n", ev->peer_id);
+               ath10k_warn("peer-unmap-event: unknown peer id %d\n",
+                           ev->peer_id);
                goto exit;
        }
 
index 712a606a080a02617fe839737ce4c2f5084a5881..cb1f7b5bcf4cdefa774411e09fa39b80728d82e4 100644 (file)
@@ -213,7 +213,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
        .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
        .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
        .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
-       .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+       .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
        .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
        .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
        .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
@@ -420,7 +420,6 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
        .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
        .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
        .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
-       .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
        .dcs = WMI_PDEV_PARAM_DCS,
        .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
        .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -472,8 +471,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
        .bcnflt_stats_update_period =
                                WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
        .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
-       .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
-       .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+       .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
        .dcs = WMI_10X_PDEV_PARAM_DCS,
        .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
        .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
@@ -561,7 +559,6 @@ err_pull:
 
 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
 {
-       struct wmi_bcn_tx_arg arg = {0};
        int ret;
 
        lockdep_assert_held(&arvif->ar->data_lock);
@@ -569,18 +566,16 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
        if (arvif->beacon == NULL)
                return;
 
-       arg.vdev_id = arvif->vdev_id;
-       arg.tx_rate = 0;
-       arg.tx_power = 0;
-       arg.bcn = arvif->beacon->data;
-       arg.bcn_len = arvif->beacon->len;
+       if (arvif->beacon_sent)
+               return;
 
-       ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+       ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
        if (ret)
                return;
 
-       dev_kfree_skb_any(arvif->beacon);
-       arvif->beacon = NULL;
+       /* We need to retain the arvif->beacon reference for DMA unmapping and
+        * freeing the skbuff later. */
+       arvif->beacon_sent = true;
 }
 
 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
@@ -1116,7 +1111,27 @@ static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
 static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
                                              struct sk_buff *skb)
 {
-       ath10k_dbg(ATH10K_DBG_WMI, "WMI_PEER_STA_KICKOUT_EVENTID\n");
+       struct wmi_peer_sta_kickout_event *ev;
+       struct ieee80211_sta *sta;
+
+       ev = (struct wmi_peer_sta_kickout_event *)skb->data;
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+                  ev->peer_macaddr.addr);
+
+       rcu_read_lock();
+
+       sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
+       if (!sta) {
+               ath10k_warn("Spurious quick kickout for STA %pM\n",
+                           ev->peer_macaddr.addr);
+               goto exit;
+       }
+
+       ieee80211_report_low_ack(sta, 10);
+
+exit:
+       rcu_read_unlock();
 }
 
 /*
@@ -1217,6 +1232,13 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
        tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
        memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
 
+       if (tim->dtim_count == 0) {
+               ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
+
+               if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
+                       ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
+       }
+
        ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
                   tim->dtim_count, tim->dtim_period,
                   tim->bitmap_ctrl, pvm_len);
@@ -1338,7 +1360,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
        struct wmi_bcn_info *bcn_info;
        struct ath10k_vif *arvif;
        struct sk_buff *bcn;
-       int vdev_id = 0;
+       int ret, vdev_id = 0;
 
        ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
 
@@ -1385,6 +1407,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                        continue;
                }
 
+               /* There are no completions for beacons so wait for next SWBA
+                * before telling mac80211 to decrement CSA counter
+                *
+                * Once CSA counter is completed stop sending beacons until
+                * actual channel switch is done */
+               if (arvif->vif->csa_active &&
+                   ieee80211_csa_is_complete(arvif->vif)) {
+                       ieee80211_csa_finish(arvif->vif);
+                       continue;
+               }
+
                bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
                if (!bcn) {
                        ath10k_warn("could not get mac80211 beacon\n");
@@ -1396,15 +1429,33 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
 
                spin_lock_bh(&ar->data_lock);
+
                if (arvif->beacon) {
-                       ath10k_warn("SWBA overrun on vdev %d\n",
-                                   arvif->vdev_id);
+                       if (!arvif->beacon_sent)
+                               ath10k_warn("SWBA overrun on vdev %d\n",
+                                           arvif->vdev_id);
+
+                       dma_unmap_single(arvif->ar->dev,
+                                        ATH10K_SKB_CB(arvif->beacon)->paddr,
+                                        arvif->beacon->len, DMA_TO_DEVICE);
                        dev_kfree_skb_any(arvif->beacon);
                }
 
+               ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
+                                                          bcn->data, bcn->len,
+                                                          DMA_TO_DEVICE);
+               ret = dma_mapping_error(arvif->ar->dev,
+                                       ATH10K_SKB_CB(bcn)->paddr);
+               if (ret) {
+                       ath10k_warn("failed to map beacon: %d\n", ret);
+                       goto skip;
+               }
+
                arvif->beacon = bcn;
+               arvif->beacon_sent = false;
 
                ath10k_wmi_tx_beacon_nowait(arvif);
+skip:
                spin_unlock_bh(&ar->data_lock);
        }
 }
@@ -2031,11 +2082,11 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
        memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
 
        ath10k_dbg(ATH10K_DBG_WMI,
-                  "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+                  "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
                   __le32_to_cpu(ev->sw_version),
                   __le32_to_cpu(ev->abi_version),
                   ev->mac_addr.addr,
-                  __le32_to_cpu(ev->status));
+                  __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
 
        complete(&ar->wmi.unified_ready);
        return 0;
@@ -2403,7 +2454,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
                                   ar->wmi.cmd->pdev_set_channel_cmdid);
 }
 
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
 {
        struct wmi_pdev_suspend_cmd *cmd;
        struct sk_buff *skb;
@@ -2413,7 +2464,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
                return -ENOMEM;
 
        cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
-       cmd->suspend_opt = WMI_PDEV_SUSPEND;
+       cmd->suspend_opt = __cpu_to_le32(suspend_opt);
 
        return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
 }
@@ -3342,7 +3393,6 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
                ci->max_power         = ch->max_power;
                ci->reg_power         = ch->max_reg_power;
                ci->antenna_max       = ch->max_antenna_gain;
-               ci->antenna_max       = 0;
 
                /* mode & flags share storage */
                ci->mode              = ch->mode;
@@ -3411,25 +3461,41 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
        return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
 }
 
-int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
-                                 const struct wmi_bcn_tx_arg *arg)
+/* This function assumes the beacon is already DMA mapped */
+int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
 {
-       struct wmi_bcn_tx_cmd *cmd;
+       struct wmi_bcn_tx_ref_cmd *cmd;
        struct sk_buff *skb;
+       struct sk_buff *beacon = arvif->beacon;
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_hdr *hdr;
        int ret;
+       u16 fc;
 
-       skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->bcn_len);
+       skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
        if (!skb)
                return -ENOMEM;
 
-       cmd = (struct wmi_bcn_tx_cmd *)skb->data;
-       cmd->hdr.vdev_id  = __cpu_to_le32(arg->vdev_id);
-       cmd->hdr.tx_rate  = __cpu_to_le32(arg->tx_rate);
-       cmd->hdr.tx_power = __cpu_to_le32(arg->tx_power);
-       cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len);
-       memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
+       hdr = (struct ieee80211_hdr *)beacon->data;
+       fc = le16_to_cpu(hdr->frame_control);
+
+       cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
+       cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
+       cmd->data_len = __cpu_to_le32(beacon->len);
+       cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
+       cmd->msdu_id = 0;
+       cmd->frame_control = __cpu_to_le32(fc);
+       cmd->flags = 0;
+
+       if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
+               cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+       if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
+               cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+       ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+                                        ar->wmi.cmd->pdev_send_bcn_cmdid);
 
-       ret = ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
        if (ret)
                dev_kfree_skb(skb);
 
index 4b5e7d3d32b62ff221992c6175db1d53f718b91e..4fcc96aa9513b89a45e11fd9c2d8da79414204c9 100644 (file)
@@ -2277,7 +2277,6 @@ struct wmi_pdev_param_map {
        u32 bcnflt_stats_update_period;
        u32 pmf_qos;
        u32 arp_ac_override;
-       u32 arpdhcp_ac_override;
        u32 dcs;
        u32 ani_enable;
        u32 ani_poll_period;
@@ -3403,6 +3402,24 @@ struct wmi_bcn_tx_arg {
        const void *bcn;
 };
 
+enum wmi_bcn_tx_ref_flags {
+       WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
+       WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
+};
+
+struct wmi_bcn_tx_ref_cmd {
+       __le32 vdev_id;
+       __le32 data_len;
+       /* physical address of the frame - dma pointer */
+       __le32 data_ptr;
+       /* id for host to track */
+       __le32 msdu_id;
+       /* frame ctrl to setup PPDU desc */
+       __le32 frame_control;
+       /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
+       __le32 flags;
+} __packed;
+
 /* Beacon filter */
 #define WMI_BCN_FILTER_ALL   0 /* Filter all beacons */
 #define WMI_BCN_FILTER_NONE  1 /* Pass all beacons */
@@ -3859,6 +3876,12 @@ enum wmi_peer_smps_state {
        WMI_PEER_SMPS_DYNAMIC = 0x2
 };
 
+enum wmi_peer_chwidth {
+       WMI_PEER_CHWIDTH_20MHZ = 0,
+       WMI_PEER_CHWIDTH_40MHZ = 1,
+       WMI_PEER_CHWIDTH_80MHZ = 2,
+};
+
 enum wmi_peer_param {
        WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
        WMI_PEER_AMPDU      = 0x2,
@@ -4039,6 +4062,10 @@ struct wmi_chan_info_event {
        __le32 cycle_count;
 } __packed;
 
+struct wmi_peer_sta_kickout_event {
+       struct wmi_mac_addr peer_macaddr;
+} __packed;
+
 #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
 
 /* FIXME: empirically extrapolated */
@@ -4172,7 +4199,7 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
 int ath10k_wmi_connect_htc_service(struct ath10k *ar);
 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
                                const struct wmi_channel_arg *);
-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
+int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
 int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
                                  u16 rd5g, u16 ctl2g, u16 ctl5g);
@@ -4219,8 +4246,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
                               enum wmi_ap_ps_peer_param param_id, u32 value);
 int ath10k_wmi_scan_chan_list(struct ath10k *ar,
                              const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
-                                 const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);
 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
                        const struct wmi_pdev_set_wmm_params_arg *arg);
 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
index ef35da84f63bd23c107d9fa45da0f293c9afbc2c..4b18434ba697c20c048e457bcdbc1c788c41da9b 100644 (file)
@@ -751,6 +751,9 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
        bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
                        DMA_TO_DEVICE);
 
+       if (dma_mapping_error(ah->dev, bf->skbaddr))
+               return -ENOSPC;
+
        ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
                               ARRAY_SIZE(bf->rates));
 
index 4ee01f654235478ea3510847d6f207593541573f..afb23b3cc7be64d6b0ae8f79d46a3a9c9d07eab3 100644 (file)
@@ -681,6 +681,7 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
        survey->channel = conf->chandef.chan;
        survey->noise = ah->ah_noise_floor;
        survey->filled = SURVEY_INFO_NOISE_DBM |
+                       SURVEY_INFO_IN_USE |
                        SURVEY_INFO_CHANNEL_TIME |
                        SURVEY_INFO_CHANNEL_TIME_BUSY |
                        SURVEY_INFO_CHANNEL_TIME_RX |
index fd4c89df67e1658ae404686a911fc62607cee1cd..c2c6f460495859ae3517c4f20daa2c15caebdde0 100644 (file)
@@ -790,7 +790,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
        if (nw_type & ADHOC_NETWORK) {
                ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
                           nw_type & ADHOC_CREATOR ? "creator" : "joiner");
-               cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+               cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
                cfg80211_put_bss(ar->wiphy, bss);
                return;
        }
@@ -861,13 +861,9 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
        }
 
        if (vif->nw_type & ADHOC_NETWORK) {
-               if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
+               if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
                        ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
                                   "%s: ath6k not in ibss mode\n", __func__);
-                       return;
-               }
-               memset(bssid, 0, ETH_ALEN);
-               cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
                return;
        }
 
@@ -3256,6 +3252,15 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
        struct ath6kl_vif *vif = netdev_priv(dev);
        u16 interval;
        int ret, rssi_thold;
+       int n_match_sets = request->n_match_sets;
+
+       /*
+        * If there's a matchset w/o an SSID, then assume it's just for
+        * the RSSI (nothing else is currently supported) and ignore it.
+        * The device only supports a global RSSI filter that we set below.
+        */
+       if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
+               n_match_sets = 0;
 
        if (ar->state != ATH6KL_STATE_ON)
                return -EIO;
@@ -3268,11 +3273,11 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
        ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
                                      request->n_ssids,
                                      request->match_sets,
-                                     request->n_match_sets);
+                                     n_match_sets);
        if (ret < 0)
                return ret;
 
-       if (!request->n_match_sets) {
+       if (!n_match_sets) {
                ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
                                               ALL_BSS_FILTER, 0);
                if (ret < 0)
@@ -3286,12 +3291,12 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
 
        if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
                     ar->fw_capabilities)) {
-               if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+               if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
                        rssi_thold = 0;
-               else if (request->rssi_thold < -127)
+               else if (request->min_rssi_thold < -127)
                        rssi_thold = -127;
                else
-                       rssi_thold = request->rssi_thold;
+                       rssi_thold = request->min_rssi_thold;
 
                ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
                                                     rssi_thold);
index f38ff6a6255e7be58c4687a2ff85115c8250225c..56c3fd5cef65a07e915d63d8c0dc24727a87406b 100644 (file)
@@ -24,7 +24,7 @@
 /* constants */
 #define TX_URB_COUNT            32
 #define RX_URB_COUNT            32
-#define ATH6KL_USB_RX_BUFFER_SIZE  1700
+#define ATH6KL_USB_RX_BUFFER_SIZE  4096
 
 /* tx/rx pipes for usb */
 enum ATH6KL_USB_PIPE_ID {
@@ -481,8 +481,8 @@ static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
         *              ATH6KL_USB_RX_BUFFER_SIZE);
         */
 
-       ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh =
-           ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2;
+       ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
+
        ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
                                       ATH6KL_USB_RX_BUFFER_SIZE);
 }
index 4f16d79c9eb187566ff878e9ac42c163cd58312a..8b4ce28e3ce8f51f7cda070364394a117d5aef66 100644 (file)
@@ -914,7 +914,7 @@ ath6kl_get_regpair(u16 regdmn)
                return NULL;
 
        for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
-               if (regDomainPairs[i].regDmnEnum == regdmn)
+               if (regDomainPairs[i].reg_domain == regdmn)
                        return &regDomainPairs[i];
        }
 
@@ -954,7 +954,7 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
                country = ath6kl_regd_find_country_by_rd((u16) reg_code);
                if (regpair)
                        ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
-                                  regpair->regDmnEnum);
+                                  regpair->reg_domain);
                else
                        ath6kl_warn("Regpair not found reg_code 0x%0x\n",
                                    reg_code);
index 7b96b3e5712db1ce37da88a11360ff584ba0b924..8fcc029a76a60690ba542e49ddb94bb855bda3ba 100644 (file)
@@ -120,18 +120,6 @@ config ATH9K_WOW
          This option enables Wake on Wireless LAN support for certain cards.
          Currently, AR9462 is supported.
 
-config ATH9K_LEGACY_RATE_CONTROL
-       bool "Atheros ath9k rate control"
-       depends on ATH9K
-       default n
-       ---help---
-         Say Y, if you want to use the ath9k specific rate control
-         module instead of minstrel_ht. Be warned that there are various
-         issues with the ath9k RC and minstrel is a more robust algorithm.
-         Note that even if this option is selected, "ath9k_rate_control"
-         has to be passed to mac80211 using the module parameter,
-         ieee80211_default_rc_algo.
-
 config ATH9K_RFKILL
        bool "Atheros ath9k rfkill support" if EXPERT
        depends on ATH9K
index a40e5c5d7418c94fbde733042f2b421b968d63e6..8e1c7b0fe76c178567fb2c03e70bd8d4e303f337 100644 (file)
@@ -8,7 +8,6 @@ ath9k-y +=      beacon.o \
                antenna.o
 
 ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
-ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
 ath9k-$(CONFIG_ATH9K_PCI) += pci.o
 ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
 ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
@@ -52,7 +51,9 @@ ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \
 obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
 
 obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
-ath9k_common-y:=       common.o
+ath9k_common-y:=       common.o \
+                       common-init.o \
+                       common-beacon.o
 
 ath9k_htc-y += htc_hst.o \
                hif_usb.o \
index 2dff2765769bb339eea79b05877414b6b9bdc0f9..a0398fe3eb284f94e650f4541e18bfcd6713e5ef 100644 (file)
@@ -39,6 +39,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
                .name = "qca955x_wmac",
                .driver_data = AR9300_DEVID_QCA955X,
        },
+       {
+               .name = "qca953x_wmac",
+               .driver_data = AR9300_DEVID_AR953X,
+       },
        {},
 };
 
@@ -82,6 +86,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
        int irq;
        int ret = 0;
        struct ath_hw *ah;
+       struct ath_common *common;
        char hw_name[64];
 
        if (!dev_get_platdata(&pdev->dev)) {
@@ -124,9 +129,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
        sc->mem = mem;
        sc->irq = irq;
 
-       /* Will be cleared in ath9k_start() */
-       set_bit(SC_OP_INVALID, &sc->sc_flags);
-
        ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
        if (ret) {
                dev_err(&pdev->dev, "request_irq failed\n");
@@ -144,6 +146,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
        wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
                   hw_name, (unsigned long)mem, irq);
 
+       common = ath9k_hw_common(sc->sc_ah);
+       /* Will be cleared in ath9k_start() */
+       set_bit(ATH_OP_INVALID, &common->op_flags);
        return 0;
 
  err_irq:
index d28923b7435b257f13a91e3f8896115c30adb1c9..6d47783f2e5b7ecfd4343c29c8ad4bcbff7252aa 100644 (file)
@@ -176,16 +176,26 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
        if (ah->opmode == NL80211_IFTYPE_STATION &&
            BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_HIGH)
                weak_sig = true;
-
        /*
-        * OFDM Weak signal detection is always enabled for AP mode.
+        * Newer chipsets are better at dealing with high PHY error counts -
+        * keep weak signal detection enabled when no RSSI threshold is
+        * available to determine if it is needed (mode != STA)
         */
-       if (ah->opmode != NL80211_IFTYPE_AP &&
-           aniState->ofdmWeakSigDetect != weak_sig) {
-               ath9k_hw_ani_control(ah,
-                                    ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
-                                    entry_ofdm->ofdm_weak_signal_on);
-       }
+       else if (AR_SREV_9300_20_OR_LATER(ah) &&
+                ah->opmode != NL80211_IFTYPE_STATION)
+               weak_sig = true;
+
+       /* Older chipsets are more sensitive to high PHY error counts */
+       else if (!AR_SREV_9300_20_OR_LATER(ah) &&
+                aniState->ofdmNoiseImmunityLevel >= 8)
+               weak_sig = false;
+
+       if (aniState->ofdmWeakSigDetect != weak_sig)
+               ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
+                                    weak_sig);
+
+       if (!AR_SREV_9300_20_OR_LATER(ah))
+               return;
 
        if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
                ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
@@ -308,17 +318,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
        BUG_ON(aniState == NULL);
        ah->stats.ast_ani_reset++;
 
-       /* only allow a subset of functions in AP mode */
-       if (ah->opmode == NL80211_IFTYPE_AP) {
-               if (IS_CHAN_2GHZ(chan)) {
-                       ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
-                                           ATH9K_ANI_FIRSTEP_LEVEL);
-                       if (AR_SREV_9300_20_OR_LATER(ah))
-                               ah->ani_function |= ATH9K_ANI_MRC_CCK;
-               } else
-                       ah->ani_function = 0;
-       }
-
        ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
                         aniState->ofdmNoiseImmunityLevel);
        cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
@@ -483,10 +482,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
 
        ath_dbg(common, ANI, "Initialize ANI\n");
 
-       ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
-       ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
-       ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
-       ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
+       if (AR_SREV_9300_20_OR_LATER(ah)) {
+               ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+               ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
+               ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
+               ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
+       } else {
+               ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
+               ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
+               ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
+               ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
+       }
 
        ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
        ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
index 21e7b83c3f6ac85e881b49b315256e931728ec7e..c40965b4c1e2982859cc2fff8f1e7f4747f376e7 100644 (file)
 /* units are errors per second */
 #define ATH9K_ANI_OFDM_TRIG_HIGH           3500
 #define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
+#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD       500
 
 #define ATH9K_ANI_OFDM_TRIG_LOW           400
 #define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
+#define ATH9K_ANI_OFDM_TRIG_LOW_OLD       200
 
 #define ATH9K_ANI_CCK_TRIG_HIGH           600
+#define ATH9K_ANI_CCK_TRIG_HIGH_OLD       200
 #define ATH9K_ANI_CCK_TRIG_LOW            300
+#define ATH9K_ANI_CCK_TRIG_LOW_OLD        100
 
 #define ATH9K_ANI_SPUR_IMMUNE_LVL         3
 #define ATH9K_ANI_FIRSTEP_LVL             2
index ff415e863ee9cc5140879ca008df366b8f1ad967..3b3e91057a4cbdb6860006ce0e19a22c3b835774 100644 (file)
@@ -26,10 +26,6 @@ static const int firstep_table[] =
 /* level:  0   1   2   3   4   5   6   7   8  */
        { -4, -2,  0,  2,  4,  6,  8, 10, 12 }; /* lvl 0-8, default 2 */
 
-static const int cycpwrThr1_table[] =
-/* level:  0   1   2   3   4   5   6   7   8  */
-       { -6, -4, -2,  0,  2,  4,  6,  8 };     /* lvl 0-7, default 3 */
-
 /*
  * register values to turn OFDM weak signal detection OFF
  */
@@ -921,7 +917,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_channel *chan = ah->curchan;
        struct ar5416AniState *aniState = &ah->ani;
-       s32 value, value2;
+       s32 value;
 
        switch (cmd & ah->ani_function) {
        case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
@@ -1008,42 +1004,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
        case ATH9K_ANI_FIRSTEP_LEVEL:{
                u32 level = param;
 
-               if (level >= ARRAY_SIZE(firstep_table)) {
-                       ath_dbg(common, ANI,
-                               "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
-                               level, ARRAY_SIZE(firstep_table));
-                       return false;
-               }
-
-               /*
-                * make register setting relative to default
-                * from INI file & cap value
-                */
-               value = firstep_table[level] -
-                       firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
-                       aniState->iniDef.firstep;
-               if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
-                       value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
-               if (value > ATH9K_SIG_FIRSTEP_SETTING_MAX)
-                       value = ATH9K_SIG_FIRSTEP_SETTING_MAX;
+               value = level * 2;
                REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
-                             AR_PHY_FIND_SIG_FIRSTEP,
-                             value);
-               /*
-                * we need to set first step low register too
-                * make register setting relative to default
-                * from INI file & cap value
-                */
-               value2 = firstep_table[level] -
-                        firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
-                        aniState->iniDef.firstepLow;
-               if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
-                       value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
-               if (value2 > ATH9K_SIG_FIRSTEP_SETTING_MAX)
-                       value2 = ATH9K_SIG_FIRSTEP_SETTING_MAX;
-
+                             AR_PHY_FIND_SIG_FIRSTEP, value);
                REG_RMW_FIELD(ah, AR_PHY_FIND_SIG_LOW,
-                             AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
+                             AR_PHY_FIND_SIG_FIRSTEP_LOW, value);
 
                if (level != aniState->firstepLevel) {
                        ath_dbg(common, ANI,
@@ -1060,7 +1025,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
                                aniState->firstepLevel,
                                level,
                                ATH9K_ANI_FIRSTEP_LVL,
-                               value2,
+                               value,
                                aniState->iniDef.firstepLow);
                        if (level > aniState->firstepLevel)
                                ah->stats.ast_ani_stepup++;
@@ -1073,41 +1038,13 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
        case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
                u32 level = param;
 
-               if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
-                       ath_dbg(common, ANI,
-                               "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
-                               level, ARRAY_SIZE(cycpwrThr1_table));
-                       return false;
-               }
-               /*
-                * make register setting relative to default
-                * from INI file & cap value
-                */
-               value = cycpwrThr1_table[level] -
-                       cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
-                       aniState->iniDef.cycpwrThr1;
-               if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
-                       value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
-               if (value > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
-                       value = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
+               value = (level + 1) * 2;
                REG_RMW_FIELD(ah, AR_PHY_TIMING5,
-                             AR_PHY_TIMING5_CYCPWR_THR1,
-                             value);
+                             AR_PHY_TIMING5_CYCPWR_THR1, value);
 
-               /*
-                * set AR_PHY_EXT_CCA for extension channel
-                * make register setting relative to default
-                * from INI file & cap value
-                */
-               value2 = cycpwrThr1_table[level] -
-                        cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
-                        aniState->iniDef.cycpwrThr1Ext;
-               if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
-                       value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
-               if (value2 > ATH9K_SIG_SPUR_IMM_SETTING_MAX)
-                       value2 = ATH9K_SIG_SPUR_IMM_SETTING_MAX;
-               REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
-                             AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
+               if (IS_CHAN_HT40(ah->curchan))
+                       REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
+                                     AR_PHY_EXT_TIMING5_CYCPWR_THR1, value);
 
                if (level != aniState->spurImmunityLevel) {
                        ath_dbg(common, ANI,
@@ -1124,7 +1061,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
                                aniState->spurImmunityLevel,
                                level,
                                ATH9K_ANI_SPUR_IMMUNE_LVL,
-                               value2,
+                               value,
                                aniState->iniDef.cycpwrThr1Ext);
                        if (level > aniState->spurImmunityLevel)
                                ah->stats.ast_ani_spurup++;
index a352128c40ad370600df154ad868f48c5fe7854a..ac8301ef52420a00009f4d78b82e700987ea7508 100644 (file)
 #define MAX_MEASUREMENT        MAX_IQCAL_MEASUREMENT
 #define MAX_MAG_DELTA  11
 #define MAX_PHS_DELTA  10
+#define MAXIQCAL        3
 
 struct coeff {
-       int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
-       int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
+       int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
+       int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
        int iqc_coeff[2];
 };
 
@@ -655,9 +656,6 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
        if (i2_m_q2_a0_d1 > 0x800)
                i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
 
-       if (i2_p_q2_a0_d1 > 0x1000)
-               i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
-
        if (iq_corr_a0_d1 > 0x800)
                iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
 
@@ -800,7 +798,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
        if (q_q_coff > 63)
                q_q_coff = 63;
 
-       iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+       iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
 
        ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
                chain_idx, iqc_coeff[0]);
@@ -831,7 +829,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
        if (q_q_coff > 63)
                q_q_coff = 63;
 
-       iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+       iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
 
        ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
                chain_idx, iqc_coeff[1]);
@@ -839,7 +837,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
        return true;
 }
 
-static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
+static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
+                                    int nmeasurement,
                                     int max_delta)
 {
        int mp_max = -64, max_idx = 0;
@@ -848,20 +847,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
 
        /* find min/max mismatch across all calibrated gains */
        for (i = 0; i < nmeasurement; i++) {
-               if (mp_coeff[i] > mp_max) {
-                       mp_max = mp_coeff[i];
+               if (mp_coeff[i][0] > mp_max) {
+                       mp_max = mp_coeff[i][0];
                        max_idx = i;
-               } else if (mp_coeff[i] < mp_min) {
-                       mp_min = mp_coeff[i];
+               } else if (mp_coeff[i][0] < mp_min) {
+                       mp_min = mp_coeff[i][0];
                        min_idx = i;
                }
        }
 
        /* find average (exclude max abs value) */
        for (i = 0; i < nmeasurement; i++) {
-               if ((abs(mp_coeff[i]) < abs(mp_max)) ||
-                   (abs(mp_coeff[i]) < abs(mp_min))) {
-                       mp_avg += mp_coeff[i];
+               if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
+                   (abs(mp_coeff[i][0]) < abs(mp_min))) {
+                       mp_avg += mp_coeff[i][0];
                        mp_count++;
                }
        }
@@ -873,7 +872,7 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
        if (mp_count)
                mp_avg /= mp_count;
        else
-               mp_avg = mp_coeff[nmeasurement - 1];
+               mp_avg = mp_coeff[nmeasurement - 1][0];
 
        /* detect outlier */
        if (abs(mp_max - mp_min) > max_delta) {
@@ -882,15 +881,16 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
                else
                        outlier_idx = min_idx;
 
-               mp_coeff[outlier_idx] = mp_avg;
+               mp_coeff[outlier_idx][0] = mp_avg;
        }
 }
 
-static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
-                                                struct coeff *coeff,
-                                                bool is_reusable)
+static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
+                                                 struct coeff *coeff,
+                                                 bool is_reusable)
 {
        int i, im, nmeasurement;
+       int magnitude, phase;
        u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
        struct ath9k_hw_cal_data *caldata = ah->caldata;
 
@@ -920,21 +920,30 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
                if (nmeasurement > MAX_MEASUREMENT)
                        nmeasurement = MAX_MEASUREMENT;
 
-               /* detect outlier only if nmeasurement > 1 */
-               if (nmeasurement > 1) {
-                       /* Detect magnitude outlier */
-                       ar9003_hw_detect_outlier(coeff->mag_coeff[i],
-                                       nmeasurement, MAX_MAG_DELTA);
-
-                       /* Detect phase outlier */
-                       ar9003_hw_detect_outlier(coeff->phs_coeff[i],
-                                       nmeasurement, MAX_PHS_DELTA);
+               /*
+                * Skip normal outlier detection for AR9550.
+                */
+               if (!AR_SREV_9550(ah)) {
+                       /* detect outlier only if nmeasurement > 1 */
+                       if (nmeasurement > 1) {
+                               /* Detect magnitude outlier */
+                               ar9003_hw_detect_outlier(coeff->mag_coeff[i],
+                                                        nmeasurement,
+                                                        MAX_MAG_DELTA);
+
+                               /* Detect phase outlier */
+                               ar9003_hw_detect_outlier(coeff->phs_coeff[i],
+                                                        nmeasurement,
+                                                        MAX_PHS_DELTA);
+                       }
                }
 
                for (im = 0; im < nmeasurement; im++) {
+                       magnitude = coeff->mag_coeff[i][im][0];
+                       phase = coeff->phs_coeff[i][im][0];
 
-                       coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) |
-                               ((coeff->phs_coeff[i][im] & 0x7f) << 7);
+                       coeff->iqc_coeff[0] =
+                               (phase & 0x7f) | ((magnitude & 0x7f) << 7);
 
                        if ((im % 2) == 0)
                                REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@@ -991,7 +1000,63 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
        return true;
 }
 
-static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
+                                   struct coeff *coeff,
+                                   int i, int nmeasurement)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       int im, ix, iy, temp;
+
+       for (im = 0; im < nmeasurement; im++) {
+               for (ix = 0; ix < MAXIQCAL - 1; ix++) {
+                       for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
+                               if (coeff->mag_coeff[i][im][iy] <
+                                   coeff->mag_coeff[i][im][ix]) {
+                                       temp = coeff->mag_coeff[i][im][ix];
+                                       coeff->mag_coeff[i][im][ix] =
+                                               coeff->mag_coeff[i][im][iy];
+                                       coeff->mag_coeff[i][im][iy] = temp;
+                               }
+                               if (coeff->phs_coeff[i][im][iy] <
+                                   coeff->phs_coeff[i][im][ix]) {
+                                       temp = coeff->phs_coeff[i][im][ix];
+                                       coeff->phs_coeff[i][im][ix] =
+                                               coeff->phs_coeff[i][im][iy];
+                                       coeff->phs_coeff[i][im][iy] = temp;
+                               }
+                       }
+               }
+               coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
+               coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
+
+               ath_dbg(common, CALIBRATE,
+                       "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
+                       i, im,
+                       coeff->mag_coeff[i][im][0],
+                       coeff->phs_coeff[i][im][0]);
+       }
+}
+
+static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
+                                   struct coeff *coeff,
+                                   int iqcal_idx,
+                                   int nmeasurement)
+{
+       int i;
+
+       if ((iqcal_idx + 1) != MAXIQCAL)
+               return false;
+
+       for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+               __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
+       }
+
+       return true;
+}
+
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
+                                         int iqcal_idx,
+                                         bool is_reusable)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@@ -1004,10 +1069,11 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
                AR_PHY_CHAN_INFO_TAB_1,
                AR_PHY_CHAN_INFO_TAB_2,
        };
-       struct coeff coeff;
+       static struct coeff coeff;
        s32 iq_res[6];
        int i, im, j;
-       int nmeasurement;
+       int nmeasurement = 0;
+       bool outlier_detect = true;
 
        for (i = 0; i < AR9300_MAX_CHAINS; i++) {
                if (!(ah->txchainmask & (1 << i)))
@@ -1065,17 +1131,23 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
                                goto tx_iqcal_fail;
                        }
 
-                       coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f;
-                       coeff.phs_coeff[i][im] =
+                       coeff.phs_coeff[i][im][iqcal_idx] =
+                               coeff.iqc_coeff[0] & 0x7f;
+                       coeff.mag_coeff[i][im][iqcal_idx] =
                                (coeff.iqc_coeff[0] >> 7) & 0x7f;
 
-                       if (coeff.mag_coeff[i][im] > 63)
-                               coeff.mag_coeff[i][im] -= 128;
-                       if (coeff.phs_coeff[i][im] > 63)
-                               coeff.phs_coeff[i][im] -= 128;
+                       if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
+                               coeff.mag_coeff[i][im][iqcal_idx] -= 128;
+                       if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
+                               coeff.phs_coeff[i][im][iqcal_idx] -= 128;
                }
        }
-       ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
+
+       if (AR_SREV_9550(ah))
+               outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
+                                                        iqcal_idx, nmeasurement);
+       if (outlier_detect)
+               ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
 
        return;
 
@@ -1409,7 +1481,7 @@ skip_tx_iqcal:
        }
 
        if (txiqcal_done)
-               ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+               ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
        else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
                ar9003_hw_tx_iq_cal_reload(ah);
 
@@ -1455,14 +1527,38 @@ skip_tx_iqcal:
        return true;
 }
 
+static bool do_ar9003_agc_cal(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       bool status;
+
+       REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+                 REG_READ(ah, AR_PHY_AGC_CONTROL) |
+                 AR_PHY_AGC_CONTROL_CAL);
+
+       status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+                              AR_PHY_AGC_CONTROL_CAL,
+                              0, AH_WAIT_TIMEOUT);
+       if (!status) {
+               ath_dbg(common, CALIBRATE,
+                       "offset calibration failed to complete in %d ms,"
+                       "noisy environment?\n",
+                       AH_WAIT_TIMEOUT / 1000);
+               return false;
+       }
+
+       return true;
+}
+
 static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
                                   struct ath9k_channel *chan)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_hw_cal_data *caldata = ah->caldata;
        bool txiqcal_done = false;
-       bool is_reusable = true, status = true;
+       bool status = true;
        bool run_agc_cal = false, sep_iq_cal = false;
+       int i = 0;
 
        /* Use chip chainmask only for calibration */
        ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
@@ -1485,7 +1581,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
         * AGC calibration. Specifically, AR9550 in SoC chips.
         */
        if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
-               txiqcal_done = true;
+               if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+                                  AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
+                               txiqcal_done = true;
+               } else {
+                       txiqcal_done = false;
+               }
                run_agc_cal = true;
        } else {
                sep_iq_cal = true;
@@ -1512,27 +1613,37 @@ skip_tx_iqcal:
                if (AR_SREV_9330_11(ah))
                        ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
 
-               /* Calibrate the AGC */
-               REG_WRITE(ah, AR_PHY_AGC_CONTROL,
-                         REG_READ(ah, AR_PHY_AGC_CONTROL) |
-                         AR_PHY_AGC_CONTROL_CAL);
-
-               /* Poll for offset calibration complete */
-               status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
-                                      AR_PHY_AGC_CONTROL_CAL,
-                                      0, AH_WAIT_TIMEOUT);
-       }
+               /*
+                * For non-AR9550 chips, we just trigger AGC calibration
+                * in the HW, poll for completion and then process
+                * the results.
+                *
+                * For AR955x, we run it multiple times and use
+                * median IQ correction.
+                */
+               if (!AR_SREV_9550(ah)) {
+                       status = do_ar9003_agc_cal(ah);
+                       if (!status)
+                               return false;
 
-       if (!status) {
-               ath_dbg(common, CALIBRATE,
-                       "offset calibration failed to complete in %d ms; noisy environment?\n",
-                       AH_WAIT_TIMEOUT / 1000);
-               return false;
+                       if (txiqcal_done)
+                               ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
+               } else {
+                       if (!txiqcal_done) {
+                               status = do_ar9003_agc_cal(ah);
+                               if (!status)
+                                       return false;
+                       } else {
+                               for (i = 0; i < MAXIQCAL; i++) {
+                                       status = do_ar9003_agc_cal(ah);
+                                       if (!status)
+                                               return false;
+                                       ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
+                               }
+                       }
+               }
        }
 
-       if (txiqcal_done)
-               ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
-
        /* Revert chainmask to runtime parameters */
        ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
 
index b8daff78b9d124ed8790223d49ee8871d831b4f1..235053ba773765392d08427916bfb398ff52d053 100644 (file)
@@ -23,8 +23,8 @@
 #define COMP_HDR_LEN 4
 #define COMP_CKSUM_LEN 2
 
-#define LE16(x) __constant_cpu_to_le16(x)
-#define LE32(x) __constant_cpu_to_le32(x)
+#define LE16(x) cpu_to_le16(x)
+#define LE32(x) cpu_to_le32(x)
 
 /* Local defines to distinguish between extension and control CTL's */
 #define EXT_ADDITIVE (0x8000)
@@ -4792,43 +4792,54 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
 
 tempslope:
        if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
+               u8 txmask = (eep->baseEepHeader.txrxMask & 0xf0) >> 4;
+
                /*
                 * AR955x has tempSlope register for each chain.
                 * Check whether temp_compensation feature is enabled or not.
                 */
                if (eep->baseEepHeader.featureEnable & 0x1) {
                        if (frequency < 4000) {
-                               REG_RMW_FIELD(ah, AR_PHY_TPC_19,
-                                             AR_PHY_TPC_19_ALPHA_THERM,
-                                             eep->base_ext2.tempSlopeLow);
-                               REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
-                                             AR_PHY_TPC_19_ALPHA_THERM,
-                                             temp_slope);
-                               REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
-                                             AR_PHY_TPC_19_ALPHA_THERM,
-                                             eep->base_ext2.tempSlopeHigh);
+                               if (txmask & BIT(0))
+                                       REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+                                                     AR_PHY_TPC_19_ALPHA_THERM,
+                                                     eep->base_ext2.tempSlopeLow);
+                               if (txmask & BIT(1))
+                                       REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+                                                     AR_PHY_TPC_19_ALPHA_THERM,
+                                                     temp_slope);
+                               if (txmask & BIT(2))
+                                       REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+                                                     AR_PHY_TPC_19_ALPHA_THERM,
+                                                     eep->base_ext2.tempSlopeHigh);
                        } else {
-                               REG_RMW_FIELD(ah, AR_PHY_TPC_19,
-                                             AR_PHY_TPC_19_ALPHA_THERM,
-                                             temp_slope);
-                               REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
-                                             AR_PHY_TPC_19_ALPHA_THERM,
-                                             temp_slope1);
-                               REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
-                                             AR_PHY_TPC_19_ALPHA_THERM,
-                                             temp_slope2);
+                               if (txmask & BIT(0))
+                                       REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+                                                     AR_PHY_TPC_19_ALPHA_THERM,
+                                                     temp_slope);
+                               if (txmask & BIT(1))
+                                       REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+                                                     AR_PHY_TPC_19_ALPHA_THERM,
+                                                     temp_slope1);
+                               if (txmask & BIT(2))
+                                       REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+                                                     AR_PHY_TPC_19_ALPHA_THERM,
+                                                     temp_slope2);
                        }
                } else {
                        /*
                         * If temp compensation is not enabled,
                         * set all registers to 0.
                         */
-                       REG_RMW_FIELD(ah, AR_PHY_TPC_19,
-                                     AR_PHY_TPC_19_ALPHA_THERM, 0);
-                       REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
-                                     AR_PHY_TPC_19_ALPHA_THERM, 0);
-                       REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
-                                     AR_PHY_TPC_19_ALPHA_THERM, 0);
+                       if (txmask & BIT(0))
+                               REG_RMW_FIELD(ah, AR_PHY_TPC_19,
+                                             AR_PHY_TPC_19_ALPHA_THERM, 0);
+                       if (txmask & BIT(1))
+                               REG_RMW_FIELD(ah, AR_PHY_TPC_19_B1,
+                                             AR_PHY_TPC_19_ALPHA_THERM, 0);
+                       if (txmask & BIT(2))
+                               REG_RMW_FIELD(ah, AR_PHY_TPC_19_B2,
+                                             AR_PHY_TPC_19_ALPHA_THERM, 0);
                }
        } else {
                REG_RMW_FIELD(ah, AR_PHY_TPC_19,
index 09facba1dc6d8069583f0eb0fdf41e59d96a15f8..8927fc34d84c2f009e192dc9a37cb140be941e54 100644 (file)
@@ -868,10 +868,6 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
 
        if (IS_CHAN_A_FAST_CLOCK(ah, chan))
                rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
-       if (IS_CHAN_QUARTER_RATE(chan))
-               rfMode |= AR_PHY_MODE_QUARTER;
-       if (IS_CHAN_HALF_RATE(chan))
-               rfMode |= AR_PHY_MODE_HALF;
 
        if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
                REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
index b5ac32cfbeb8ec496f095c9c50baf0947635e012..44d74495c4de1465dbf42cba08b6bff03090023a 100644 (file)
@@ -30,7 +30,6 @@
 #include "spectral.h"
 
 struct ath_node;
-struct ath_rate_table;
 
 extern struct ieee80211_ops ath9k_ops;
 extern int ath9k_modparam_nohwcrypt;
@@ -150,6 +149,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 #define IS_CCK_RATE(rate)  ((rate >= 0x18) && (rate <= 0x1e))
 #define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
 
+enum {
+       WLAN_RC_PHY_OFDM,
+       WLAN_RC_PHY_CCK,
+};
+
 struct ath_txq {
        int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
        u32 axq_qnum; /* ath9k hardware queue number */
@@ -399,21 +403,10 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
 #define        ATH_BCBUF                       8
 #define ATH_DEFAULT_BINTVAL            100 /* TU */
 #define ATH_DEFAULT_BMISS_LIMIT        10
-#define IEEE80211_MS_TO_TU(x)           (((x) * 1000) / 1024)
 
 #define TSF_TO_TU(_h,_l) \
        ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 
-struct ath_beacon_config {
-       int beacon_interval;
-       u16 listen_interval;
-       u16 dtim_period;
-       u16 bmiss_timeout;
-       u8 dtim_count;
-       bool enable_beacon;
-       bool ibss_creator;
-};
-
 struct ath_beacon {
        enum {
                OK,             /* no change needed */
@@ -423,11 +416,9 @@ struct ath_beacon {
 
        u32 beaconq;
        u32 bmisscnt;
-       u32 bc_tstamp;
        struct ieee80211_vif *bslot[ATH_BCBUF];
        int slottime;
        int slotupdate;
-       struct ath9k_tx_queue_info beacon_qi;
        struct ath_descdma bdma;
        struct ath_txq *cabq;
        struct list_head bbuf;
@@ -442,7 +433,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
 void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
 void ath9k_set_beacon(struct ath_softc *sc);
-bool ath9k_csa_is_finished(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_csa_update(struct ath_softc *sc);
 
 /*******************/
 /* Link Monitoring */
@@ -693,15 +685,6 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
 #define ATH_TXPOWER_MAX         100     /* .5 dBm units */
 #define MAX_GTT_CNT             5
 
-enum sc_op_flags {
-       SC_OP_INVALID,
-       SC_OP_BEACONS,
-       SC_OP_ANI_RUN,
-       SC_OP_PRIM_STA_VIF,
-       SC_OP_HW_RESET,
-       SC_OP_SCANNING,
-};
-
 /* Powersave flags */
 #define PS_WAIT_FOR_BEACON        BIT(0)
 #define PS_WAIT_FOR_CAB           BIT(1)
@@ -731,7 +714,6 @@ struct ath_softc {
        struct completion paprd_complete;
        wait_queue_head_t tx_wait;
 
-       unsigned long sc_flags;
        unsigned long driver_data;
 
        u8 gtt_cnt;
@@ -748,7 +730,6 @@ struct ath_softc {
        struct ath_rx rx;
        struct ath_tx tx;
        struct ath_beacon beacon;
-       struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
 
 #ifdef CONFIG_MAC80211_LEDS
        bool led_registered;
@@ -757,7 +738,6 @@ struct ath_softc {
 #endif
 
        struct ath9k_hw_cal_data caldata;
-       int last_rssi;
 
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct ath9k_debug debug;
@@ -774,7 +754,6 @@ struct ath_softc {
 #endif
 
        struct ath_descdma txsdma;
-       struct ieee80211_vif *csa_vif;
 
        struct ath_ant_comb ant_comb;
        u8 ant_tx, ant_rx;
index 2e8bba0eb361b5846af5666a87854e002f24b20f..471e0f624e8116e07e5bda86744ba422ca8e4fea 100644 (file)
@@ -80,7 +80,7 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
        u8 chainmask = ah->txchainmask;
        u8 rate = 0;
 
-       sband = &sc->sbands[common->hw->conf.chandef.chan->band];
+       sband = &common->sbands[common->hw->conf.chandef.chan->band];
        rate = sband->bitrates[rateidx].hw_value;
        if (vif->bss_conf.use_short_preamble)
                rate |= sband->bitrates[rateidx].hw_value_short;
@@ -292,11 +292,8 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
                (unsigned long long)tsfadjust, avp->av_bslot);
 }
 
-bool ath9k_csa_is_finished(struct ath_softc *sc)
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
 {
-       struct ieee80211_vif *vif;
-
-       vif = sc->csa_vif;
        if (!vif || !vif->csa_active)
                return false;
 
@@ -304,11 +301,23 @@ bool ath9k_csa_is_finished(struct ath_softc *sc)
                return false;
 
        ieee80211_csa_finish(vif);
-
-       sc->csa_vif = NULL;
        return true;
 }
 
+static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct ath_softc *sc = data;
+       ath9k_csa_is_finished(sc, vif);
+}
+
+void ath9k_csa_update(struct ath_softc *sc)
+{
+       ieee80211_iterate_active_interfaces(sc->hw,
+                                           IEEE80211_IFACE_ITER_NORMAL,
+                                           ath9k_csa_update_vif,
+                                           sc);
+}
+
 void ath9k_beacon_tasklet(unsigned long data)
 {
        struct ath_softc *sc = (struct ath_softc *)data;
@@ -319,7 +328,7 @@ void ath9k_beacon_tasklet(unsigned long data)
        bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
        int slot;
 
-       if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+       if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
                ath_dbg(common, RESET,
                        "reset work is pending, skip beaconing now\n");
                return;
@@ -362,13 +371,13 @@ void ath9k_beacon_tasklet(unsigned long data)
                return;
        }
 
-       /* EDMA devices check that in the tx completion function. */
-       if (!edma && ath9k_csa_is_finished(sc))
-               return;
-
        slot = ath9k_beacon_choose_slot(sc);
        vif = sc->beacon.bslot[slot];
 
+       /* EDMA devices check that in the tx completion function. */
+       if (!edma && ath9k_csa_is_finished(sc, vif))
+               return;
+
        if (!vif || !vif->bss_conf.enable_beacon)
                return;
 
@@ -438,33 +447,6 @@ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt,
        ath9k_hw_enable_interrupts(ah);
 }
 
-/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
-static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
-{
-       u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
-
-       tsf_mod = tsf & (BIT(10) - 1);
-       tsf_hi = tsf >> 32;
-       tsf_lo = ((u32) tsf) >> 10;
-
-       mod_hi = tsf_hi % div_tu;
-       mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
-
-       return (mod_lo << 10) | tsf_mod;
-}
-
-static u32 ath9k_get_next_tbtt(struct ath_softc *sc, u64 tsf,
-                              unsigned int interval)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       unsigned int offset;
-
-       tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
-       offset = ath9k_mod_tsf64_tu(tsf, interval);
-
-       return (u32) tsf + TU_TO_USEC(interval) - offset;
-}
-
 /*
  * For multi-bss ap support beacons are either staggered evenly over N slots or
  * burst together.  For the former arrange for the SWBA to be delivered for each
@@ -474,115 +456,18 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc,
                                   struct ath_beacon_config *conf)
 {
        struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       u32 nexttbtt, intval;
-
-       /* NB: the beacon interval is kept internally in TU's */
-       intval = TU_TO_USEC(conf->beacon_interval);
-       intval /= ATH_BCBUF;
-       nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
-                                      conf->beacon_interval);
-
-       if (conf->enable_beacon)
-               ah->imask |= ATH9K_INT_SWBA;
-       else
-               ah->imask &= ~ATH9K_INT_SWBA;
-
-       ath_dbg(common, BEACON,
-               "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
-               (conf->enable_beacon) ? "Enable" : "Disable",
-               nexttbtt, intval, conf->beacon_interval);
 
-       ath9k_beacon_init(sc, nexttbtt, intval, false);
+       ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF);
+       ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false);
 }
 
-/*
- * This sets up the beacon timers according to the timestamp of the last
- * received beacon and the current TSF, configures PCF and DTIM
- * handling, programs the sleep registers so the hardware will wakeup in
- * time to receive beacons, and configures the beacon miss handling so
- * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
- * we've associated with.
- */
-static void ath9k_beacon_config_sta(struct ath_softc *sc,
+static void ath9k_beacon_config_sta(struct ath_hw *ah,
                                    struct ath_beacon_config *conf)
 {
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_beacon_state bs;
-       int dtim_intval, sleepduration;
-       u32 nexttbtt = 0, intval;
-       u64 tsf;
 
-       /* No need to configure beacon if we are not associated */
-       if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
-               ath_dbg(common, BEACON,
-                       "STA is not yet associated..skipping beacon config\n");
+       if (ath9k_cmn_beacon_config_sta(ah, conf, &bs) == -EPERM)
                return;
-       }
-
-       memset(&bs, 0, sizeof(bs));
-       intval = conf->beacon_interval;
-
-       /*
-        * Setup dtim parameters according to
-        * last beacon we received (which may be none).
-        */
-       dtim_intval = intval * conf->dtim_period;
-       sleepduration = conf->listen_interval * intval;
-
-       /*
-        * Pull nexttbtt forward to reflect the current
-        * TSF and calculate dtim state for the result.
-        */
-       tsf = ath9k_hw_gettsf64(ah);
-       nexttbtt = ath9k_get_next_tbtt(sc, tsf, intval);
-
-       bs.bs_intval = TU_TO_USEC(intval);
-       bs.bs_dtimperiod = conf->dtim_period * bs.bs_intval;
-       bs.bs_nexttbtt = nexttbtt;
-       bs.bs_nextdtim = nexttbtt;
-       if (conf->dtim_period > 1)
-               bs.bs_nextdtim = ath9k_get_next_tbtt(sc, tsf, dtim_intval);
-
-       /*
-        * Calculate the number of consecutive beacons to miss* before taking
-        * a BMISS interrupt. The configuration is specified in TU so we only
-        * need calculate based on the beacon interval.  Note that we clamp the
-        * result to at most 15 beacons.
-        */
-       if (sleepduration > intval) {
-               bs.bs_bmissthreshold = conf->listen_interval *
-                       ATH_DEFAULT_BMISS_LIMIT / 2;
-       } else {
-               bs.bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, intval);
-               if (bs.bs_bmissthreshold > 15)
-                       bs.bs_bmissthreshold = 15;
-               else if (bs.bs_bmissthreshold <= 0)
-                       bs.bs_bmissthreshold = 1;
-       }
-
-       /*
-        * Calculate sleep duration. The configuration is given in ms.
-        * We ensure a multiple of the beacon period is used. Also, if the sleep
-        * duration is greater than the DTIM period then it makes senses
-        * to make it a multiple of that.
-        *
-        * XXX fixed at 100ms
-        */
-
-       bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
-                                                sleepduration));
-       if (bs.bs_sleepduration > bs.bs_dtimperiod)
-               bs.bs_sleepduration = bs.bs_dtimperiod;
-
-       /* TSF out of range threshold fixed at 1 second */
-       bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
-
-       ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
-               bs.bs_bmissthreshold, bs.bs_sleepduration);
-
-       /* Set the computed STA beacon timers */
 
        ath9k_hw_disable_interrupts(ah);
        ath9k_hw_set_sta_beacon_timers(ah, &bs);
@@ -597,36 +482,19 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       u32 intval, nexttbtt;
 
        ath9k_reset_beacon_status(sc);
 
-       intval = TU_TO_USEC(conf->beacon_interval);
-
-       if (conf->ibss_creator)
-               nexttbtt = intval;
-       else
-               nexttbtt = ath9k_get_next_tbtt(sc, ath9k_hw_gettsf64(ah),
-                                              conf->beacon_interval);
-
-       if (conf->enable_beacon)
-               ah->imask |= ATH9K_INT_SWBA;
-       else
-               ah->imask &= ~ATH9K_INT_SWBA;
-
-       ath_dbg(common, BEACON,
-               "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
-               (conf->enable_beacon) ? "Enable" : "Disable",
-               nexttbtt, intval, conf->beacon_interval);
+       ath9k_cmn_beacon_config_adhoc(ah, conf);
 
-       ath9k_beacon_init(sc, nexttbtt, intval, conf->ibss_creator);
+       ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator);
 
        /*
         * Set the global 'beacon has been configured' flag for the
         * joiner case in IBSS mode.
         */
        if (!conf->ibss_creator && conf->enable_beacon)
-               set_bit(SC_OP_BEACONS, &sc->sc_flags);
+               set_bit(ATH_OP_BEACONS, &common->op_flags);
 }
 
 static bool ath9k_allow_beacon_config(struct ath_softc *sc,
@@ -646,7 +514,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
 
        if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
                if ((vif->type == NL80211_IFTYPE_STATION) &&
-                   test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
+                   test_bit(ATH_OP_BEACONS, &common->op_flags) &&
                    !avp->primary_sta_vif) {
                        ath_dbg(common, CONFIG,
                                "Beacon already configured for a station interface\n");
@@ -668,7 +536,6 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc,
 
        cur_conf->beacon_interval = bss_conf->beacon_int;
        cur_conf->dtim_period = bss_conf->dtim_period;
-       cur_conf->listen_interval = 1;
        cur_conf->dtim_count = 1;
        cur_conf->ibss_creator = bss_conf->ibss_creator;
        cur_conf->bmiss_timeout =
@@ -698,6 +565,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
 {
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+        struct ath_hw *ah = sc->sc_ah;
+        struct ath_common *common = ath9k_hw_common(ah);
        unsigned long flags;
        bool skip_beacon = false;
 
@@ -710,7 +579,7 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
        if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
                ath9k_cache_beacon_config(sc, bss_conf);
                ath9k_set_beacon(sc);
-               set_bit(SC_OP_BEACONS, &sc->sc_flags);
+               set_bit(ATH_OP_BEACONS, &common->op_flags);
                return;
        }
 
@@ -749,13 +618,13 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
                }
 
                /*
-                * Do not set the SC_OP_BEACONS flag for IBSS joiner mode
+                * Do not set the ATH_OP_BEACONS flag for IBSS joiner mode
                 * here, it is done in ath9k_beacon_config_adhoc().
                 */
                if (cur_conf->enable_beacon && !skip_beacon)
-                       set_bit(SC_OP_BEACONS, &sc->sc_flags);
+                       set_bit(ATH_OP_BEACONS, &common->op_flags);
                else
-                       clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+                       clear_bit(ATH_OP_BEACONS, &common->op_flags);
        }
 }
 
@@ -773,7 +642,7 @@ void ath9k_set_beacon(struct ath_softc *sc)
                ath9k_beacon_config_adhoc(sc, cur_conf);
                break;
        case NL80211_IFTYPE_STATION:
-               ath9k_beacon_config_sta(sc, cur_conf);
+               ath9k_beacon_config_sta(sc->sc_ah, cur_conf);
                break;
        default:
                ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c
new file mode 100644 (file)
index 0000000..775d1d2
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "common.h"
+
+#define FUDGE 2
+
+/* Calculate the modulo of a 64 bit TSF snapshot with a TU divisor */
+static u32 ath9k_mod_tsf64_tu(u64 tsf, u32 div_tu)
+{
+       u32 tsf_mod, tsf_hi, tsf_lo, mod_hi, mod_lo;
+
+       tsf_mod = tsf & (BIT(10) - 1);
+       tsf_hi = tsf >> 32;
+       tsf_lo = ((u32) tsf) >> 10;
+
+       mod_hi = tsf_hi % div_tu;
+       mod_lo = ((mod_hi << 22) + tsf_lo) % div_tu;
+
+       return (mod_lo << 10) | tsf_mod;
+}
+
+static u32 ath9k_get_next_tbtt(struct ath_hw *ah, u64 tsf,
+                              unsigned int interval)
+{
+       unsigned int offset;
+
+       tsf += TU_TO_USEC(FUDGE + ah->config.sw_beacon_response_time);
+       offset = ath9k_mod_tsf64_tu(tsf, interval);
+
+       return (u32) tsf + TU_TO_USEC(interval) - offset;
+}
+
+/*
+ * This sets up the beacon timers according to the timestamp of the last
+ * received beacon and the current TSF, configures PCF and DTIM
+ * handling, programs the sleep registers so the hardware will wakeup in
+ * time to receive beacons, and configures the beacon miss handling so
+ * we'll receive a BMISS interrupt when we stop seeing beacons from the AP
+ * we've associated with.
+ */
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+                                struct ath_beacon_config *conf,
+                                struct ath9k_beacon_state *bs)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       int dtim_intval;
+       u64 tsf;
+
+       /* No need to configure beacon if we are not associated */
+       if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
+               ath_dbg(common, BEACON,
+                       "STA is not yet associated..skipping beacon config\n");
+               return -EPERM;
+       }
+
+       memset(bs, 0, sizeof(*bs));
+       conf->intval = conf->beacon_interval;
+
+       /*
+        * Setup dtim parameters according to
+        * last beacon we received (which may be none).
+        */
+       dtim_intval = conf->intval * conf->dtim_period;
+
+       /*
+        * Pull nexttbtt forward to reflect the current
+        * TSF and calculate dtim state for the result.
+        */
+       tsf = ath9k_hw_gettsf64(ah);
+       conf->nexttbtt = ath9k_get_next_tbtt(ah, tsf, conf->intval);
+
+       bs->bs_intval = TU_TO_USEC(conf->intval);
+       bs->bs_dtimperiod = conf->dtim_period * bs->bs_intval;
+       bs->bs_nexttbtt = conf->nexttbtt;
+       bs->bs_nextdtim = conf->nexttbtt;
+       if (conf->dtim_period > 1)
+               bs->bs_nextdtim = ath9k_get_next_tbtt(ah, tsf, dtim_intval);
+
+       /*
+        * Calculate the number of consecutive beacons to miss* before taking
+        * a BMISS interrupt. The configuration is specified in TU so we only
+        * need calculate based on the beacon interval.  Note that we clamp the
+        * result to at most 15 beacons.
+        */
+       bs->bs_bmissthreshold = DIV_ROUND_UP(conf->bmiss_timeout, conf->intval);
+       if (bs->bs_bmissthreshold > 15)
+               bs->bs_bmissthreshold = 15;
+       else if (bs->bs_bmissthreshold <= 0)
+               bs->bs_bmissthreshold = 1;
+
+       /*
+        * Calculate sleep duration. The configuration is given in ms.
+        * We ensure a multiple of the beacon period is used. Also, if the sleep
+        * duration is greater than the DTIM period then it makes senses
+        * to make it a multiple of that.
+        *
+        * XXX fixed at 100ms
+        */
+
+       bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
+                                                conf->intval));
+       if (bs->bs_sleepduration > bs->bs_dtimperiod)
+               bs->bs_sleepduration = bs->bs_dtimperiod;
+
+       /* TSF out of range threshold fixed at 1 second */
+       bs->bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
+
+       ath_dbg(common, BEACON, "bmiss: %u sleep: %u\n",
+               bs->bs_bmissthreshold, bs->bs_sleepduration);
+       return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_sta);
+
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+                                  struct ath_beacon_config *conf)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       conf->intval = TU_TO_USEC(conf->beacon_interval);
+
+       if (conf->ibss_creator)
+               conf->nexttbtt = conf->intval;
+       else
+               conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+                                              conf->beacon_interval);
+
+       if (conf->enable_beacon)
+               ah->imask |= ATH9K_INT_SWBA;
+       else
+               ah->imask &= ~ATH9K_INT_SWBA;
+
+       ath_dbg(common, BEACON,
+               "IBSS (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+               (conf->enable_beacon) ? "Enable" : "Disable",
+               conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_adhoc);
+
+/*
+ * For multi-bss ap support beacons are either staggered evenly over N slots or
+ * burst together.  For the former arrange for the SWBA to be delivered for each
+ * slot. Slots that are not occupied will generate nothing.
+ */
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+                               struct ath_beacon_config *conf,
+                               unsigned int bc_buf)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       /* NB: the beacon interval is kept internally in TU's */
+       conf->intval = TU_TO_USEC(conf->beacon_interval);
+       conf->intval /= bc_buf;
+       conf->nexttbtt = ath9k_get_next_tbtt(ah, ath9k_hw_gettsf64(ah),
+                                      conf->beacon_interval);
+
+       if (conf->enable_beacon)
+               ah->imask |= ATH9K_INT_SWBA;
+       else
+               ah->imask &= ~ATH9K_INT_SWBA;
+
+       ath_dbg(common, BEACON,
+               "AP (%s) nexttbtt: %u intval: %u conf_intval: %u\n",
+               (conf->enable_beacon) ? "Enable" : "Disable",
+               conf->nexttbtt, conf->intval, conf->beacon_interval);
+}
+EXPORT_SYMBOL(ath9k_cmn_beacon_config_ap);
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.h b/drivers/net/wireless/ath/ath9k/common-beacon.h
new file mode 100644 (file)
index 0000000..3665d27
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct ath_beacon_config;
+
+int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
+                               struct ath_beacon_config *conf,
+                               struct ath9k_beacon_state *bs);
+void ath9k_cmn_beacon_config_adhoc(struct ath_hw *ah,
+                                  struct ath_beacon_config *conf);
+void ath9k_cmn_beacon_config_ap(struct ath_hw *ah,
+                               struct ath_beacon_config *conf,
+                               unsigned int bc_buf);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
new file mode 100644 (file)
index 0000000..a006c14
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* We use the hw_value as an index into our private channel structure */
+
+#include "common.h"
+
+#define CHAN2G(_freq, _idx)  { \
+       .band = IEEE80211_BAND_2GHZ, \
+       .center_freq = (_freq), \
+       .hw_value = (_idx), \
+       .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+       .band = IEEE80211_BAND_5GHZ, \
+       .center_freq = (_freq), \
+       .hw_value = (_idx), \
+       .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
+       CHAN2G(2412, 0), /* Channel 1 */
+       CHAN2G(2417, 1), /* Channel 2 */
+       CHAN2G(2422, 2), /* Channel 3 */
+       CHAN2G(2427, 3), /* Channel 4 */
+       CHAN2G(2432, 4), /* Channel 5 */
+       CHAN2G(2437, 5), /* Channel 6 */
+       CHAN2G(2442, 6), /* Channel 7 */
+       CHAN2G(2447, 7), /* Channel 8 */
+       CHAN2G(2452, 8), /* Channel 9 */
+       CHAN2G(2457, 9), /* Channel 10 */
+       CHAN2G(2462, 10), /* Channel 11 */
+       CHAN2G(2467, 11), /* Channel 12 */
+       CHAN2G(2472, 12), /* Channel 13 */
+       CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
+       /* _We_ call this UNII 1 */
+       CHAN5G(5180, 14), /* Channel 36 */
+       CHAN5G(5200, 15), /* Channel 40 */
+       CHAN5G(5220, 16), /* Channel 44 */
+       CHAN5G(5240, 17), /* Channel 48 */
+       /* _We_ call this UNII 2 */
+       CHAN5G(5260, 18), /* Channel 52 */
+       CHAN5G(5280, 19), /* Channel 56 */
+       CHAN5G(5300, 20), /* Channel 60 */
+       CHAN5G(5320, 21), /* Channel 64 */
+       /* _We_ call this "Middle band" */
+       CHAN5G(5500, 22), /* Channel 100 */
+       CHAN5G(5520, 23), /* Channel 104 */
+       CHAN5G(5540, 24), /* Channel 108 */
+       CHAN5G(5560, 25), /* Channel 112 */
+       CHAN5G(5580, 26), /* Channel 116 */
+       CHAN5G(5600, 27), /* Channel 120 */
+       CHAN5G(5620, 28), /* Channel 124 */
+       CHAN5G(5640, 29), /* Channel 128 */
+       CHAN5G(5660, 30), /* Channel 132 */
+       CHAN5G(5680, 31), /* Channel 136 */
+       CHAN5G(5700, 32), /* Channel 140 */
+       /* _We_ call this UNII 3 */
+       CHAN5G(5745, 33), /* Channel 149 */
+       CHAN5G(5765, 34), /* Channel 153 */
+       CHAN5G(5785, 35), /* Channel 157 */
+       CHAN5G(5805, 36), /* Channel 161 */
+       CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+       ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) {              \
+       .bitrate        = (_bitrate),                   \
+       .flags          = (_flags),                     \
+       .hw_value       = (_hw_rate),                   \
+       .hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+       RATE(10, 0x1b, 0),
+       RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+       RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+       RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+       RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                       IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                       IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+       RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
+                        IEEE80211_RATE_SUPPORTS_10MHZ)),
+};
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common)
+{
+       struct ath_hw *ah = (struct ath_hw *)common->ah;
+       void *channels;
+
+       BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
+                    ARRAY_SIZE(ath9k_5ghz_chantable) !=
+                    ATH9K_NUM_CHANNELS);
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
+               channels = devm_kzalloc(ah->dev,
+                       sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
+               if (!channels)
+                   return -ENOMEM;
+
+               memcpy(channels, ath9k_2ghz_chantable,
+                      sizeof(ath9k_2ghz_chantable));
+               common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
+               common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+               common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+                       ARRAY_SIZE(ath9k_2ghz_chantable);
+               common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+               common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+                       ARRAY_SIZE(ath9k_legacy_rates);
+       }
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
+               channels = devm_kzalloc(ah->dev,
+                       sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
+               if (!channels)
+                       return -ENOMEM;
+
+               memcpy(channels, ath9k_5ghz_chantable,
+                      sizeof(ath9k_5ghz_chantable));
+               common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
+               common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+               common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+                       ARRAY_SIZE(ath9k_5ghz_chantable);
+               common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+                       ath9k_legacy_rates + 4;
+               common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+                       ARRAY_SIZE(ath9k_legacy_rates) - 4;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(ath9k_cmn_init_channels_rates);
+
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+                           struct ieee80211_sta_ht_cap *ht_info)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       u8 tx_streams, rx_streams;
+       int i, max_streams;
+
+       ht_info->ht_supported = true;
+       ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                      IEEE80211_HT_CAP_SM_PS |
+                      IEEE80211_HT_CAP_SGI_40 |
+                      IEEE80211_HT_CAP_DSSSCCK40;
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
+               ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
+               ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+       ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+       ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+       if (AR_SREV_9271(ah) || AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
+               max_streams = 1;
+       else if (AR_SREV_9462(ah))
+               max_streams = 2;
+       else if (AR_SREV_9300_20_OR_LATER(ah))
+               max_streams = 3;
+       else
+               max_streams = 2;
+
+       if (AR_SREV_9280_20_OR_LATER(ah)) {
+               if (max_streams >= 2)
+                       ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+               ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+       }
+
+       /* set up supported mcs set */
+       memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+       tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
+       rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
+
+       ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
+               tx_streams, rx_streams);
+
+       if (tx_streams != rx_streams) {
+               ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+               ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+                               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+       }
+
+       for (i = 0; i < rx_streams; i++)
+               ht_info->mcs.rx_mask[i] = 0xff;
+
+       ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+EXPORT_SYMBOL(ath9k_cmn_setup_ht_cap);
+
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!(ah->caps.hw_caps & ATH9K_HW_CAP_HT))
+               return;
+
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
+               ath9k_cmn_setup_ht_cap(ah,
+                       &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+       if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
+               ath9k_cmn_setup_ht_cap(ah,
+                       &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+}
+EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common-init.h b/drivers/net/wireless/ath/ath9k/common-init.h
new file mode 100644 (file)
index 0000000..ac03fca
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2009-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+int ath9k_cmn_init_channels_rates(struct ath_common *common);
+void ath9k_cmn_setup_ht_cap(struct ath_hw *ah,
+                           struct ieee80211_sta_ht_cap *ht_info);
+void ath9k_cmn_reload_chainmask(struct ath_hw *ah);
index 768c733cad31d4ef7ae1c45bd17c2e207f2ca82f..c6dd7f1fed65ed52b2ed1d0fd72c474260b5656f 100644 (file)
@@ -27,6 +27,250 @@ MODULE_AUTHOR("Atheros Communications");
 MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
 MODULE_LICENSE("Dual BSD/GPL");
 
+/* Assumes you've already done the endian to CPU conversion */
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+                        struct ieee80211_hdr *hdr,
+                        struct ieee80211_rx_status *rxs,
+                        struct ath_rx_status *rx_stats,
+                        bool *decrypt_error,
+                        unsigned int rxfilter)
+{
+       struct ath_hw *ah = common->ah;
+       bool is_mc, is_valid_tkip, strip_mic, mic_error;
+       __le16 fc;
+
+       fc = hdr->frame_control;
+
+       is_mc = !!is_multicast_ether_addr(hdr->addr1);
+       is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
+               test_bit(rx_stats->rs_keyix, common->tkip_keymap);
+       strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+               ieee80211_has_protected(fc) &&
+               !(rx_stats->rs_status &
+               (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+                ATH9K_RXERR_KEYMISS));
+
+       /*
+        * Key miss events are only relevant for pairwise keys where the
+        * descriptor does contain a valid key index. This has been observed
+        * mostly with CCMP encryption.
+        */
+       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+               rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+       mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
+               !ieee80211_has_morefrags(fc) &&
+               !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+               (rx_stats->rs_status & ATH9K_RXERR_MIC);
+
+       /*
+        * The rx_stats->rs_status will not be set until the end of the
+        * chained descriptors so it can be ignored if rs_more is set. The
+        * rs_more will be false at the last element of the chained
+        * descriptors.
+        */
+       if (rx_stats->rs_status != 0) {
+               u8 status_mask;
+
+               if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
+                       rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+                       mic_error = false;
+               }
+
+               if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
+                   (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
+                       *decrypt_error = true;
+                       mic_error = false;
+               }
+
+
+               /*
+                * Reject error frames with the exception of
+                * decryption and MIC failures. For monitor mode,
+                * we also ignore the CRC error.
+                */
+               status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+                             ATH9K_RXERR_KEYMISS;
+
+               if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
+                       status_mask |= ATH9K_RXERR_CRC;
+
+               if (rx_stats->rs_status & ~status_mask)
+                       return false;
+       }
+
+       /*
+        * For unicast frames the MIC error bit can have false positives,
+        * so all MIC error reports need to be validated in software.
+        * False negatives are not common, so skip software verification
+        * if the hardware considers the MIC valid.
+        */
+       if (strip_mic)
+               rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+       else if (is_mc && mic_error)
+               rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+       return true;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_accept);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+                                 struct sk_buff *skb,
+                                 struct ath_rx_status *rx_stats,
+                                 struct ieee80211_rx_status *rxs,
+                                 bool decrypt_error)
+{
+       struct ath_hw *ah = common->ah;
+       struct ieee80211_hdr *hdr;
+       int hdrlen, padpos, padsize;
+       u8 keyix;
+       __le16 fc;
+
+       /* see if any padding is done by the hw and remove it */
+       hdr = (struct ieee80211_hdr *) skb->data;
+       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+       fc = hdr->frame_control;
+       padpos = ieee80211_hdrlen(fc);
+
+       /* The MAC header is padded to have 32-bit boundary if the
+        * packet payload is non-zero. The general calculation for
+        * padsize would take into account odd header lengths:
+        * padsize = (4 - padpos % 4) % 4; However, since only
+        * even-length headers are used, padding can only be 0 or 2
+        * bytes and we can optimize this a bit. In addition, we must
+        * not try to remove padding from short control frames that do
+        * not have payload. */
+       padsize = padpos & 3;
+       if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+               memmove(skb->data + padsize, skb->data, padpos);
+               skb_pull(skb, padsize);
+       }
+
+       keyix = rx_stats->rs_keyix;
+
+       if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+           ieee80211_has_protected(fc)) {
+               rxs->flag |= RX_FLAG_DECRYPTED;
+       } else if (ieee80211_has_protected(fc)
+                  && !decrypt_error && skb->len >= hdrlen + 4) {
+               keyix = skb->data[hdrlen + 3] >> 6;
+
+               if (test_bit(keyix, common->keymap))
+                       rxs->flag |= RX_FLAG_DECRYPTED;
+       }
+       if (ah->sw_mgmt_crypto &&
+           (rxs->flag & RX_FLAG_DECRYPTED) &&
+           ieee80211_is_mgmt(fc))
+               /* Use software decrypt for management frames. */
+               rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
+
+int ath9k_cmn_process_rate(struct ath_common *common,
+                          struct ieee80211_hw *hw,
+                          struct ath_rx_status *rx_stats,
+                          struct ieee80211_rx_status *rxs)
+{
+       struct ieee80211_supported_band *sband;
+       enum ieee80211_band band;
+       unsigned int i = 0;
+       struct ath_hw *ah = common->ah;
+
+       band = ah->curchan->chan->band;
+       sband = hw->wiphy->bands[band];
+
+       if (IS_CHAN_QUARTER_RATE(ah->curchan))
+               rxs->flag |= RX_FLAG_5MHZ;
+       else if (IS_CHAN_HALF_RATE(ah->curchan))
+               rxs->flag |= RX_FLAG_10MHZ;
+
+       if (rx_stats->rs_rate & 0x80) {
+               /* HT rate */
+               rxs->flag |= RX_FLAG_HT;
+               rxs->flag |= rx_stats->flag;
+               rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+               return 0;
+       }
+
+       for (i = 0; i < sband->n_bitrates; i++) {
+               if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+                       rxs->rate_idx = i;
+                       return 0;
+               }
+               if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+                       rxs->flag |= RX_FLAG_SHORTPRE;
+                       rxs->rate_idx = i;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rate);
+
+void ath9k_cmn_process_rssi(struct ath_common *common,
+                           struct ieee80211_hw *hw,
+                           struct ath_rx_status *rx_stats,
+                           struct ieee80211_rx_status *rxs)
+{
+       struct ath_hw *ah = common->ah;
+       int last_rssi;
+       int rssi = rx_stats->rs_rssi;
+       int i, j;
+
+       /*
+        * RSSI is not available for subframes in an A-MPDU.
+        */
+       if (rx_stats->rs_moreaggr) {
+               rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+               return;
+       }
+
+       /*
+        * Check if the RSSI for the last subframe in an A-MPDU
+        * or an unaggregated frame is valid.
+        */
+       if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+               rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+               return;
+       }
+
+       for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+               s8 rssi;
+
+               if (!(ah->rxchainmask & BIT(i)))
+                       continue;
+
+               rssi = rx_stats->rs_rssi_ctl[i];
+               if (rssi != ATH9K_RSSI_BAD) {
+                   rxs->chains |= BIT(j);
+                   rxs->chain_signal[j] = ah->noise + rssi;
+               }
+               j++;
+       }
+
+       /*
+        * Update Beacon RSSI, this is used by ANI.
+        */
+       if (rx_stats->is_mybeacon &&
+           ((ah->opmode == NL80211_IFTYPE_STATION) ||
+            (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+               ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
+               last_rssi = common->last_rssi;
+
+               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+                       rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+               if (rssi < 0)
+                       rssi = 0;
+
+               ah->stats.avgbrssi = rssi;
+       }
+
+       rxs->signal = ah->noise + rx_stats->rs_rssi;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rssi);
+
 int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
index eb85e1bdca889a4e43b46261be31e691ff403363..ca38116838f00e5206c5ec35b3589c9bc4cbba58 100644 (file)
@@ -21,6 +21,9 @@
 #include "hw.h"
 #include "hw-ops.h"
 
+#include "common-init.h"
+#include "common-beacon.h"
+
 /* Common header for Atheros 802.11n base driver cores */
 
 #define WME_BA_BMP_SIZE         64
 #define ATH_EP_RND(x, mul)                                             \
        (((x) + ((mul)/2)) / (mul))
 
+#define IEEE80211_MS_TO_TU(x)   (((x) * 1000) / 1024)
+
+struct ath_beacon_config {
+       int beacon_interval;
+       u16 dtim_period;
+       u16 bmiss_timeout;
+       u8 dtim_count;
+       bool enable_beacon;
+       bool ibss_creator;
+       u32 nexttbtt;
+       u32 intval;
+};
+
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+                        struct ieee80211_hdr *hdr,
+                        struct ieee80211_rx_status *rxs,
+                        struct ath_rx_status *rx_stats,
+                        bool *decrypt_error,
+                        unsigned int rxfilter);
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+                                 struct sk_buff *skb,
+                                 struct ath_rx_status *rx_stats,
+                                 struct ieee80211_rx_status *rxs,
+                                 bool decrypt_error);
+int ath9k_cmn_process_rate(struct ath_common *common,
+                          struct ieee80211_hw *hw,
+                          struct ath_rx_status *rx_stats,
+                          struct ieee80211_rx_status *rxs);
+void ath9k_cmn_process_rssi(struct ath_common *common,
+                           struct ieee80211_hw *hw,
+                           struct ath_rx_status *rx_stats,
+                           struct ieee80211_rx_status *rxs);
 int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
 struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
                                            struct ath_hw *ah,
index ab7264c1d8f73904ac9005fc19ec6d6aa3e30b30..780ff1bee6f69ceac8729a9b75913c0bdad6c28b 100644 (file)
@@ -135,46 +135,45 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
        struct ath_softc *sc = file->private_data;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_hw *ah = sc->sc_ah;
-       unsigned int len = 0, size = 1024;
+       unsigned int len = 0;
+       const unsigned int size = 1024;
        ssize_t retval = 0;
        char *buf;
+       int i;
+       struct {
+               const char *name;
+               unsigned int val;
+       } ani_info[] = {
+               { "ANI RESET", ah->stats.ast_ani_reset },
+               { "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel },
+               { "CCK LEVEL", ah->ani.cckNoiseImmunityLevel },
+               { "SPUR UP", ah->stats.ast_ani_spurup },
+               { "SPUR DOWN", ah->stats.ast_ani_spurup },
+               { "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon },
+               { "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff },
+               { "MRC-CCK ON", ah->stats.ast_ani_ccklow },
+               { "MRC-CCK OFF", ah->stats.ast_ani_cckhigh },
+               { "FIR-STEP UP", ah->stats.ast_ani_stepup },
+               { "FIR-STEP DOWN", ah->stats.ast_ani_stepdown },
+               { "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero },
+               { "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs },
+               { "CCK ERRORS", ah->stats.ast_ani_cckerrs },
+       };
 
        buf = kzalloc(size, GFP_KERNEL);
        if (buf == NULL)
                return -ENOMEM;
 
-       if (common->disable_ani) {
-               len += scnprintf(buf + len, size - len, "%s: %s\n",
-                                "ANI", "DISABLED");
+       len += scnprintf(buf + len, size - len, "%15s: %s\n", "ANI",
+                        common->disable_ani ? "DISABLED" : "ENABLED");
+
+       if (common->disable_ani)
                goto exit;
-       }
 
-       len += scnprintf(buf + len, size - len, "%15s: %s\n",
-                        "ANI", "ENABLED");
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "ANI RESET", ah->stats.ast_ani_reset);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "SPUR UP", ah->stats.ast_ani_spurup);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "SPUR DOWN", ah->stats.ast_ani_spurup);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "MRC-CCK ON", ah->stats.ast_ani_ccklow);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "FIR-STEP UP", ah->stats.ast_ani_stepup);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
-       len += scnprintf(buf + len, size - len, "%15s: %u\n",
-                        "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+       for (i = 0; i < ARRAY_SIZE(ani_info); i++)
+               len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                                ani_info[i].name, ani_info[i].val);
+
 exit:
        if (len > size)
                len = size;
@@ -209,7 +208,7 @@ static ssize_t write_file_ani(struct file *file,
        common->disable_ani = !ani;
 
        if (common->disable_ani) {
-               clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+               clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
                ath_stop_ani(sc);
        } else {
                ath_check_ani(sc);
@@ -307,13 +306,13 @@ static ssize_t read_file_antenna_diversity(struct file *file,
        struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
        struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
        struct ath_hw_antcomb_conf div_ant_conf;
-       unsigned int len = 0, size = 1024;
+       unsigned int len = 0;
+       const unsigned int size = 1024;
        ssize_t retval = 0;
        char *buf;
-       char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
-                                "LNA2",
-                                "LNA1",
-                                "LNA1_PLUS_LNA2"};
+       static const char *lna_conf_str[4] = {
+               "LNA1_MINUS_LNA2", "LNA2", "LNA1", "LNA1_PLUS_LNA2"
+       };
 
        buf = kzalloc(size, GFP_KERNEL);
        if (buf == NULL)
@@ -716,10 +715,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
        struct ath_softc *sc = file->private_data;
        struct ath_txq *txq;
        char *buf;
-       unsigned int len = 0, size = 1024;
+       unsigned int len = 0;
+       const unsigned int size = 1024;
        ssize_t retval = 0;
        int i;
-       char *qname[4] = {"VO", "VI", "BE", "BK"};
+       static const char *qname[4] = {
+               "VO", "VI", "BE", "BK"
+       };
 
        buf = kzalloc(size, GFP_KERNEL);
        if (buf == NULL)
@@ -865,6 +867,12 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
        len += scnprintf(buf + len, sizeof(buf) - len,
                         "%17s: %2d\n", "PLL RX Hang",
                         sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "MAC Hang",
+                        sc->debug.stats.reset[RESET_TYPE_MAC_HANG]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "Stuck Beacon",
+                        sc->debug.stats.reset[RESET_TYPE_BEACON_STUCK]);
        len += scnprintf(buf + len, sizeof(buf) - len,
                         "%17s: %2d\n", "MCI Reset",
                         sc->debug.stats.reset[RESET_TYPE_MCI]);
index cc7a025d833ec454565f9da276a8b6938e8e56f8..559a68c2709cc882d5edf646417dd7348d8fffc8 100644 (file)
@@ -18,7 +18,6 @@
 #define DEBUG_H
 
 #include "hw.h"
-#include "rc.h"
 #include "dfs_debug.h"
 
 struct ath_txq;
index 0a7ddf4c88c93eb55589dedff95394e2157c3510..7936c9126a20d5dee79cae53bb2a06cdb78ed881 100644 (file)
@@ -21,6 +21,8 @@
 
 #include "hw.h"
 
+struct ath_softc;
+
 /**
  * struct ath_dfs_stats - DFS Statistics per wiphy
  * @pulses_total:     pulses reported by HW
index 6d5d716adc1b85d24782b9e38b14c7ef2b0f504a..8e7153b186ede94c4409fb422da8e7d4c58e9b0c 100644 (file)
@@ -54,6 +54,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
          .driver_info = AR9280_USB },  /* SMC Networks */
        { USB_DEVICE(0x0411, 0x017f),
          .driver_info = AR9280_USB },  /* Sony UWA-BR100 */
+       { USB_DEVICE(0x0411, 0x0197),
+         .driver_info = AR9280_USB },  /* Buffalo WLI-UV-AG300P */
        { USB_DEVICE(0x04da, 0x3904),
          .driver_info = AR9280_USB },
 
index 99a203174f45a04b50248a370ca4113e256e75ca..dab1f0cab9937d17fd0df75d88c3eafb0ff8ec54 100644 (file)
@@ -39,7 +39,6 @@
 #define ATH_RESTART_CALINTERVAL   1200000 /* 20 minutes */
 
 #define ATH_DEFAULT_BMISS_LIMIT 10
-#define IEEE80211_MS_TO_TU(x)   (((x) * 1000) / 1024)
 #define TSF_TO_TU(_h, _l) \
        ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
 
@@ -277,7 +276,6 @@ struct ath9k_htc_rxbuf {
 };
 
 struct ath9k_htc_rx {
-       int last_rssi; /* FIXME: per-STA */
        struct list_head rxbuf;
        spinlock_t rxbuflock;
 };
@@ -407,12 +405,18 @@ static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv,
 #define DEFAULT_SWBA_RESPONSE 40 /* in TUs */
 #define MIN_SWBA_RESPONSE     10 /* in TUs */
 
-struct htc_beacon_config {
+struct htc_beacon {
+       enum {
+               OK,             /* no change needed */
+               UPDATE,         /* update pending */
+               COMMIT          /* beacon sent, commit change */
+       } updateslot;           /* slot time update fsm */
+
        struct ieee80211_vif *bslot[ATH9K_HTC_MAX_BCN_VIF];
-       u16 beacon_interval;
-       u16 dtim_period;
-       u16 bmiss_timeout;
-       u32 bmiss_cnt;
+       u32 bmisscnt;
+       u32 beaconq;
+       int slottime;
+       int slotupdate;
 };
 
 struct ath_btcoex {
@@ -440,12 +444,8 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
 }
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
-#define OP_INVALID                BIT(0)
-#define OP_SCANNING               BIT(1)
-#define OP_ENABLE_BEACON           BIT(2)
 #define OP_BT_PRIORITY_DETECTED    BIT(3)
 #define OP_BT_SCAN                 BIT(4)
-#define OP_ANI_RUNNING             BIT(5)
 #define OP_TSF_RESET               BIT(6)
 
 struct ath9k_htc_priv {
@@ -488,10 +488,10 @@ struct ath9k_htc_priv {
        unsigned long op_flags;
 
        struct ath9k_hw_cal_data caldata;
-       struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
 
        spinlock_t beacon_lock;
-       struct htc_beacon_config cur_beacon_conf;
+       struct ath_beacon_config cur_beacon_conf;
+       struct htc_beacon beacon;
 
        struct ath9k_htc_rx rx;
        struct ath9k_htc_tx tx;
@@ -516,7 +516,6 @@ struct ath9k_htc_priv {
        struct work_struct led_work;
 #endif
 
-       int beaconq;
        int cabq;
        int hwq_map[IEEE80211_NUM_ACS];
 
index 8b575773459677055fe6b427e8443f65796c4ae2..e8b6ec3c1dbbb01bd2ea7d122dbe747c9d8f6d96 100644 (file)
@@ -26,7 +26,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
        memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
        memset(&qi_be, 0, sizeof(struct ath9k_tx_queue_info));
 
-       ath9k_hw_get_txq_props(ah, priv->beaconq, &qi);
+       ath9k_hw_get_txq_props(ah, priv->beacon.beaconq, &qi);
 
        if (priv->ah->opmode == NL80211_IFTYPE_AP ||
            priv->ah->opmode == NL80211_IFTYPE_MESH_POINT) {
@@ -54,220 +54,78 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
 
        }
 
-       if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
+       if (!ath9k_hw_set_txq_props(ah, priv->beacon.beaconq, &qi)) {
                ath_err(ath9k_hw_common(ah),
-                       "Unable to update beacon queue %u!\n", priv->beaconq);
+                       "Unable to update beacon queue %u!\n", priv->beacon.beaconq);
        } else {
-               ath9k_hw_resettxqueue(ah, priv->beaconq);
+               ath9k_hw_resettxqueue(ah, priv->beacon.beaconq);
        }
 }
 
-
-static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
-                                       struct htc_beacon_config *bss_conf)
+/*
+ * Both nexttbtt and intval have to be in usecs.
+ */
+static void ath9k_htc_beacon_init(struct ath9k_htc_priv *priv,
+                                 struct ath_beacon_config *conf,
+                                 bool reset_tsf)
 {
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-       struct ath9k_beacon_state bs;
-       enum ath9k_int imask = 0;
-       int dtimperiod, dtimcount, sleepduration;
-       int bmiss_timeout;
-       u32 nexttbtt = 0, intval, tsftu;
-       __be32 htc_imask = 0;
-       u64 tsf;
-       int num_beacons, offset, dtim_dec_count;
+       struct ath_hw *ah = priv->ah;
        int ret __attribute__ ((unused));
+       __be32 htc_imask = 0;
        u8 cmd_rsp;
 
-       memset(&bs, 0, sizeof(bs));
-
-       intval = bss_conf->beacon_interval;
-       bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
-
-       /*
-        * Setup dtim parameters according to
-        * last beacon we received (which may be none).
-        */
-       dtimperiod = bss_conf->dtim_period;
-       if (dtimperiod <= 0)            /* NB: 0 if not known */
-               dtimperiod = 1;
-       dtimcount = 1;
-       if (dtimcount >= dtimperiod)    /* NB: sanity check */
-               dtimcount = 0;
-
-       sleepduration = intval;
-       if (sleepduration <= 0)
-               sleepduration = intval;
-
-       /*
-        * Pull nexttbtt forward to reflect the current
-        * TSF and calculate dtim state for the result.
-        */
-       tsf = ath9k_hw_gettsf64(priv->ah);
-       tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
-
-       num_beacons = tsftu / intval + 1;
-       offset = tsftu % intval;
-       nexttbtt = tsftu - offset;
-       if (offset)
-               nexttbtt += intval;
-
-       /* DTIM Beacon every dtimperiod Beacon */
-       dtim_dec_count = num_beacons % dtimperiod;
-       dtimcount -= dtim_dec_count;
-       if (dtimcount < 0)
-               dtimcount += dtimperiod;
-
-       bs.bs_intval = TU_TO_USEC(intval);
-       bs.bs_nexttbtt = TU_TO_USEC(nexttbtt);
-       bs.bs_dtimperiod = dtimperiod * bs.bs_intval;
-       bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount * bs.bs_intval;
-
-       /*
-        * Calculate the number of consecutive beacons to miss* before taking
-        * a BMISS interrupt. The configuration is specified in TU so we only
-        * need calculate based on the beacon interval.  Note that we clamp the
-        * result to at most 15 beacons.
-        */
-       if (sleepduration > intval) {
-               bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
-       } else {
-               bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
-               if (bs.bs_bmissthreshold > 15)
-                       bs.bs_bmissthreshold = 15;
-               else if (bs.bs_bmissthreshold <= 0)
-                       bs.bs_bmissthreshold = 1;
-       }
-
-       /*
-        * Calculate sleep duration. The configuration is given in ms.
-        * We ensure a multiple of the beacon period is used. Also, if the sleep
-        * duration is greater than the DTIM period then it makes senses
-        * to make it a multiple of that.
-        *
-        * XXX fixed at 100ms
-        */
-
-       bs.bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
-                                                sleepduration));
-       if (bs.bs_sleepduration > bs.bs_dtimperiod)
-               bs.bs_sleepduration = bs.bs_dtimperiod;
-
-       /* TSF out of range threshold fixed at 1 second */
-       bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
-
-       ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
-               intval, tsf, tsftu);
-       ath_dbg(common, CONFIG, "bmiss: %u sleep: %u\n",
-               bs.bs_bmissthreshold, bs.bs_sleepduration);
-
-       /* Set the computed STA beacon timers */
+       if (conf->intval >= TU_TO_USEC(DEFAULT_SWBA_RESPONSE))
+               ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
+       else
+               ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
 
        WMI_CMD(WMI_DISABLE_INTR_CMDID);
-       ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
-       imask |= ATH9K_INT_BMISS;
-       htc_imask = cpu_to_be32(imask);
+       if (reset_tsf)
+               ath9k_hw_reset_tsf(ah);
+       ath9k_htc_beaconq_config(priv);
+       ath9k_hw_beaconinit(ah, conf->nexttbtt, conf->intval);
+       priv->beacon.bmisscnt = 0;
+       htc_imask = cpu_to_be32(ah->imask);
        WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
 }
 
-static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
-                                      struct htc_beacon_config *bss_conf)
+static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
+                                       struct ath_beacon_config *bss_conf)
 {
-       struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath9k_beacon_state bs;
        enum ath9k_int imask = 0;
-       u32 nexttbtt, intval, tsftu;
        __be32 htc_imask = 0;
        int ret __attribute__ ((unused));
        u8 cmd_rsp;
-       u64 tsf;
 
-       intval = bss_conf->beacon_interval;
-       intval /= ATH9K_HTC_MAX_BCN_VIF;
-       nexttbtt = intval;
-
-       /*
-        * To reduce beacon misses under heavy TX load,
-        * set the beacon response time to a larger value.
-        */
-       if (intval > DEFAULT_SWBA_RESPONSE)
-               priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
-       else
-               priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
-
-       if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
-               ath9k_hw_reset_tsf(priv->ah);
-               clear_bit(OP_TSF_RESET, &priv->op_flags);
-       } else {
-               /*
-                * Pull nexttbtt forward to reflect the current TSF.
-                */
-               tsf = ath9k_hw_gettsf64(priv->ah);
-               tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
-               do {
-                       nexttbtt += intval;
-               } while (nexttbtt < tsftu);
-       }
-
-       if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
-               imask |= ATH9K_INT_SWBA;
-
-       ath_dbg(common, CONFIG,
-               "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
-               bss_conf->beacon_interval, nexttbtt,
-               priv->ah->config.sw_beacon_response_time, imask);
-
-       ath9k_htc_beaconq_config(priv);
+       if (ath9k_cmn_beacon_config_sta(priv->ah, bss_conf, &bs) == -EPERM)
+               return;
 
        WMI_CMD(WMI_DISABLE_INTR_CMDID);
-       ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
-       priv->cur_beacon_conf.bmiss_cnt = 0;
+       ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
+       imask |= ATH9K_INT_BMISS;
        htc_imask = cpu_to_be32(imask);
        WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
 }
 
-static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
-                                         struct htc_beacon_config *bss_conf)
+static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
+                                      struct ath_beacon_config *conf)
 {
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-       enum ath9k_int imask = 0;
-       u32 nexttbtt, intval, tsftu;
-       __be32 htc_imask = 0;
-       int ret __attribute__ ((unused));
-       u8 cmd_rsp;
-       u64 tsf;
-
-       intval = bss_conf->beacon_interval;
-       nexttbtt = intval;
-
-       /*
-        * Pull nexttbtt forward to reflect the current TSF.
-        */
-       tsf = ath9k_hw_gettsf64(priv->ah);
-       tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE;
-       do {
-               nexttbtt += intval;
-       } while (nexttbtt < tsftu);
-
-       /*
-        * Only one IBSS interfce is allowed.
-        */
-       if (intval > DEFAULT_SWBA_RESPONSE)
-               priv->ah->config.sw_beacon_response_time = DEFAULT_SWBA_RESPONSE;
-       else
-               priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
+       struct ath_hw *ah = priv->ah;
+       ah->imask = 0;
 
-       if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
-               imask |= ATH9K_INT_SWBA;
+       ath9k_cmn_beacon_config_ap(ah, conf, ATH9K_HTC_MAX_BCN_VIF);
+       ath9k_htc_beacon_init(priv, conf, false);
+}
 
-       ath_dbg(common, CONFIG,
-               "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
-               bss_conf->beacon_interval, nexttbtt,
-               priv->ah->config.sw_beacon_response_time, imask);
+static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
+                                         struct ath_beacon_config *conf)
+{
+       struct ath_hw *ah = priv->ah;
+       ah->imask = 0;
 
-       WMI_CMD(WMI_DISABLE_INTR_CMDID);
-       ath9k_hw_beaconinit(priv->ah, TU_TO_USEC(nexttbtt), TU_TO_USEC(intval));
-       priv->cur_beacon_conf.bmiss_cnt = 0;
-       htc_imask = cpu_to_be32(imask);
-       WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
+       ath9k_cmn_beacon_config_adhoc(ah, conf);
+       ath9k_htc_beacon_init(priv, conf, conf->ibss_creator);
 }
 
 void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
@@ -287,7 +145,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
 
        spin_lock_bh(&priv->beacon_lock);
 
-       vif = priv->cur_beacon_conf.bslot[slot];
+       vif = priv->beacon.bslot[slot];
 
        skb = ieee80211_get_buffered_bc(priv->hw, vif);
 
@@ -348,10 +206,10 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
 
        spin_lock_bh(&priv->beacon_lock);
 
-       vif = priv->cur_beacon_conf.bslot[slot];
+       vif = priv->beacon.bslot[slot];
        avp = (struct ath9k_htc_vif *)vif->drv_priv;
 
-       if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
+       if (unlikely(test_bit(ATH_OP_SCANNING, &common->op_flags))) {
                spin_unlock_bh(&priv->beacon_lock);
                return;
        }
@@ -431,8 +289,8 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
        int slot;
 
        if (swba->beacon_pending != 0) {
-               priv->cur_beacon_conf.bmiss_cnt++;
-               if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
+               priv->beacon.bmisscnt++;
+               if (priv->beacon.bmisscnt > BSTUCK_THRESHOLD) {
                        ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
                        ieee80211_queue_work(priv->hw,
                                             &priv->fatal_work);
@@ -440,16 +298,16 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
                return;
        }
 
-       if (priv->cur_beacon_conf.bmiss_cnt) {
+       if (priv->beacon.bmisscnt) {
                ath_dbg(common, BSTUCK,
                        "Resuming beacon xmit after %u misses\n",
-                       priv->cur_beacon_conf.bmiss_cnt);
-               priv->cur_beacon_conf.bmiss_cnt = 0;
+                       priv->beacon.bmisscnt);
+               priv->beacon.bmisscnt = 0;
        }
 
        slot = ath9k_htc_choose_bslot(priv, swba);
        spin_lock_bh(&priv->beacon_lock);
-       if (priv->cur_beacon_conf.bslot[slot] == NULL) {
+       if (priv->beacon.bslot[slot] == NULL) {
                spin_unlock_bh(&priv->beacon_lock);
                return;
        }
@@ -468,13 +326,13 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
 
        spin_lock_bh(&priv->beacon_lock);
        for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) {
-               if (priv->cur_beacon_conf.bslot[i] == NULL) {
+               if (priv->beacon.bslot[i] == NULL) {
                        avp->bslot = i;
                        break;
                }
        }
 
-       priv->cur_beacon_conf.bslot[avp->bslot] = vif;
+       priv->beacon.bslot[avp->bslot] = vif;
        spin_unlock_bh(&priv->beacon_lock);
 
        ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
@@ -488,7 +346,7 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
        struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
 
        spin_lock_bh(&priv->beacon_lock);
-       priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
+       priv->beacon.bslot[avp->bslot] = NULL;
        spin_unlock_bh(&priv->beacon_lock);
 
        ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
@@ -504,7 +362,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *)vif->drv_priv;
-       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
        u64 tsfadjust;
 
        if (avp->bslot == 0)
@@ -536,7 +394,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
                                          struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
-       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        bool beacon_configured;
 
@@ -591,7 +449,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
                             struct ieee80211_vif *vif)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
-       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
 
@@ -627,7 +485,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
 void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
-       struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
+       struct ath_beacon_config *cur_conf = &priv->cur_beacon_conf;
 
        switch (priv->ah->opmode) {
        case NL80211_IFTYPE_STATION:
index c57d6b859c043207a11883b7a2584241c16aa7dd..8a3bd5fe3a548f9ac438691ea57a1f583f0291af 100644 (file)
@@ -38,93 +38,6 @@ static int ath9k_ps_enable;
 module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
 MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
 
-#define CHAN2G(_freq, _idx)  { \
-       .center_freq = (_freq), \
-       .hw_value = (_idx), \
-       .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
-       .band = IEEE80211_BAND_5GHZ, \
-       .center_freq = (_freq), \
-       .hw_value = (_idx), \
-       .max_power = 20, \
-}
-
-static struct ieee80211_channel ath9k_2ghz_channels[] = {
-       CHAN2G(2412, 0), /* Channel 1 */
-       CHAN2G(2417, 1), /* Channel 2 */
-       CHAN2G(2422, 2), /* Channel 3 */
-       CHAN2G(2427, 3), /* Channel 4 */
-       CHAN2G(2432, 4), /* Channel 5 */
-       CHAN2G(2437, 5), /* Channel 6 */
-       CHAN2G(2442, 6), /* Channel 7 */
-       CHAN2G(2447, 7), /* Channel 8 */
-       CHAN2G(2452, 8), /* Channel 9 */
-       CHAN2G(2457, 9), /* Channel 10 */
-       CHAN2G(2462, 10), /* Channel 11 */
-       CHAN2G(2467, 11), /* Channel 12 */
-       CHAN2G(2472, 12), /* Channel 13 */
-       CHAN2G(2484, 13), /* Channel 14 */
-};
-
-static struct ieee80211_channel ath9k_5ghz_channels[] = {
-       /* _We_ call this UNII 1 */
-       CHAN5G(5180, 14), /* Channel 36 */
-       CHAN5G(5200, 15), /* Channel 40 */
-       CHAN5G(5220, 16), /* Channel 44 */
-       CHAN5G(5240, 17), /* Channel 48 */
-       /* _We_ call this UNII 2 */
-       CHAN5G(5260, 18), /* Channel 52 */
-       CHAN5G(5280, 19), /* Channel 56 */
-       CHAN5G(5300, 20), /* Channel 60 */
-       CHAN5G(5320, 21), /* Channel 64 */
-       /* _We_ call this "Middle band" */
-       CHAN5G(5500, 22), /* Channel 100 */
-       CHAN5G(5520, 23), /* Channel 104 */
-       CHAN5G(5540, 24), /* Channel 108 */
-       CHAN5G(5560, 25), /* Channel 112 */
-       CHAN5G(5580, 26), /* Channel 116 */
-       CHAN5G(5600, 27), /* Channel 120 */
-       CHAN5G(5620, 28), /* Channel 124 */
-       CHAN5G(5640, 29), /* Channel 128 */
-       CHAN5G(5660, 30), /* Channel 132 */
-       CHAN5G(5680, 31), /* Channel 136 */
-       CHAN5G(5700, 32), /* Channel 140 */
-       /* _We_ call this UNII 3 */
-       CHAN5G(5745, 33), /* Channel 149 */
-       CHAN5G(5765, 34), /* Channel 153 */
-       CHAN5G(5785, 35), /* Channel 157 */
-       CHAN5G(5805, 36), /* Channel 161 */
-       CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
-       ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) {             \
-       .bitrate        = (_bitrate),                   \
-       .flags          = (_flags),                     \
-       .hw_value       = (_hw_rate),                   \
-       .hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
-       RATE(10, 0x1b, 0),
-       RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
-       RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
-       RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
-       RATE(60, 0x0b, 0),
-       RATE(90, 0x0f, 0),
-       RATE(120, 0x0a, 0),
-       RATE(180, 0x0e, 0),
-       RATE(240, 0x09, 0),
-       RATE(360, 0x0d, 0),
-       RATE(480, 0x08, 0),
-       RATE(540, 0x0c, 0),
-};
-
 #ifdef CONFIG_MAC80211_LEDS
 static const struct ieee80211_tpt_blink ath9k_htc_tpt_blink[] = {
        { .throughput = 0 * 1024, .blink_time = 334 },
@@ -343,6 +256,25 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
        }
 }
 
+static void ath9k_regwrite_multi(struct ath_common *common)
+{
+       struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+       u32 rsp_status;
+       int r;
+
+       r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
+                         (u8 *) &priv->wmi->multi_write,
+                         sizeof(struct register_write) * priv->wmi->multi_write_idx,
+                         (u8 *) &rsp_status, sizeof(rsp_status),
+                         100);
+       if (unlikely(r)) {
+               ath_dbg(common, WMI,
+                       "REGISTER WRITE FAILED, multi len: %d\n",
+                       priv->wmi->multi_write_idx);
+       }
+       priv->wmi->multi_write_idx = 0;
+}
+
 static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
 {
        struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -369,8 +301,6 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
        struct ath_hw *ah = (struct ath_hw *) hw_priv;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
-       u32 rsp_status;
-       int r;
 
        mutex_lock(&priv->wmi->multi_write_mutex);
 
@@ -383,19 +313,8 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
        priv->wmi->multi_write_idx++;
 
        /* If the buffer is full, send it out. */
-       if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
-               r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
-                         (u8 *) &priv->wmi->multi_write,
-                         sizeof(struct register_write) * priv->wmi->multi_write_idx,
-                         (u8 *) &rsp_status, sizeof(rsp_status),
-                         100);
-               if (unlikely(r)) {
-                       ath_dbg(common, WMI,
-                               "REGISTER WRITE FAILED, multi len: %d\n",
-                               priv->wmi->multi_write_idx);
-               }
-               priv->wmi->multi_write_idx = 0;
-       }
+       if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER)
+               ath9k_regwrite_multi(common);
 
        mutex_unlock(&priv->wmi->multi_write_mutex);
 }
@@ -426,26 +345,13 @@ static void ath9k_regwrite_flush(void *hw_priv)
        struct ath_hw *ah = (struct ath_hw *) hw_priv;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
-       u32 rsp_status;
-       int r;
 
        atomic_dec(&priv->wmi->mwrite_cnt);
 
        mutex_lock(&priv->wmi->multi_write_mutex);
 
-       if (priv->wmi->multi_write_idx) {
-               r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
-                         (u8 *) &priv->wmi->multi_write,
-                         sizeof(struct register_write) * priv->wmi->multi_write_idx,
-                         (u8 *) &rsp_status, sizeof(rsp_status),
-                         100);
-               if (unlikely(r)) {
-                       ath_dbg(common, WMI,
-                               "REGISTER WRITE FAILED, multi len: %d\n",
-                               priv->wmi->multi_write_idx);
-               }
-               priv->wmi->multi_write_idx = 0;
-       }
+       if (priv->wmi->multi_write_idx)
+               ath9k_regwrite_multi(common);
 
        mutex_unlock(&priv->wmi->multi_write_mutex);
 }
@@ -491,51 +397,6 @@ static const struct ath_bus_ops ath9k_usb_bus_ops = {
        .eeprom_read = ath_usb_eeprom_read,
 };
 
-static void setup_ht_cap(struct ath9k_htc_priv *priv,
-                        struct ieee80211_sta_ht_cap *ht_info)
-{
-       struct ath_common *common = ath9k_hw_common(priv->ah);
-       u8 tx_streams, rx_streams;
-       int i;
-
-       ht_info->ht_supported = true;
-       ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
-                      IEEE80211_HT_CAP_SM_PS |
-                      IEEE80211_HT_CAP_SGI_40 |
-                      IEEE80211_HT_CAP_DSSSCCK40;
-
-       if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-               ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
-       ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-
-       ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-       ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
-       memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
-       /* ath9k_htc supports only 1 or 2 stream devices */
-       tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
-       rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
-
-       ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
-               tx_streams, rx_streams);
-
-       if (tx_streams >= 2)
-               ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
-
-       if (tx_streams != rx_streams) {
-               ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
-               ht_info->mcs.tx_params |= ((tx_streams - 1) <<
-                                          IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
-       }
-
-       for (i = 0; i < rx_streams; i++)
-               ht_info->mcs.rx_mask[i] = 0xff;
-
-       ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
 static int ath9k_init_queues(struct ath9k_htc_priv *priv)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
@@ -544,8 +405,8 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
        for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
                priv->hwq_map[i] = -1;
 
-       priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
-       if (priv->beaconq == -1) {
+       priv->beacon.beaconq = ath9k_hw_beaconq_setup(priv->ah);
+       if (priv->beacon.beaconq == -1) {
                ath_err(common, "Unable to setup BEACON xmit queue\n");
                goto err;
        }
@@ -580,37 +441,13 @@ err:
        return -EINVAL;
 }
 
-static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
-{
-       if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
-               priv->sbands[IEEE80211_BAND_2GHZ].channels =
-                       ath9k_2ghz_channels;
-               priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
-               priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
-                       ARRAY_SIZE(ath9k_2ghz_channels);
-               priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
-               priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
-                       ARRAY_SIZE(ath9k_legacy_rates);
-       }
-
-       if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
-               priv->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_channels;
-               priv->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
-               priv->sbands[IEEE80211_BAND_5GHZ].n_channels =
-                       ARRAY_SIZE(ath9k_5ghz_channels);
-               priv->sbands[IEEE80211_BAND_5GHZ].bitrates =
-                       ath9k_legacy_rates + 4;
-               priv->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
-                       ARRAY_SIZE(ath9k_legacy_rates) - 4;
-       }
-}
-
 static void ath9k_init_misc(struct ath9k_htc_priv *priv)
 {
        struct ath_common *common = ath9k_hw_common(priv->ah);
 
        memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
 
+       common->last_rssi = ATH_RSSI_DUMMY_MARKER;
        priv->ah->opmode = NL80211_IFTYPE_STATION;
 }
 
@@ -622,12 +459,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
        struct ath_common *common;
        int i, ret = 0, csz = 0;
 
-       set_bit(OP_INVALID, &priv->op_flags);
-
        ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
        if (!ah)
                return -ENOMEM;
 
+       ah->dev = priv->dev;
        ah->hw_version.devid = devid;
        ah->hw_version.usbdev = drv_info;
        ah->ah_flags |= AH_USE_EEPROM;
@@ -647,6 +483,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
        common->priv = priv;
        common->debug_mask = ath9k_debug;
        common->btcoex_enabled = ath9k_htc_btcoex_enable == 1;
+       set_bit(ATH_OP_INVALID, &common->op_flags);
 
        spin_lock_init(&priv->beacon_lock);
        spin_lock_init(&priv->tx.tx_lock);
@@ -682,10 +519,11 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
                goto err_queues;
 
        for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++)
-               priv->cur_beacon_conf.bslot[i] = NULL;
+               priv->beacon.bslot[i] = NULL;
+       priv->beacon.slottime = ATH9K_SLOT_TIME_9;
 
+       ath9k_cmn_init_channels_rates(common);
        ath9k_cmn_init_crypto(ah);
-       ath9k_init_channels_rates(priv);
        ath9k_init_misc(priv);
        ath9k_htc_init_btcoex(priv, product);
 
@@ -721,6 +559,7 @@ static const struct ieee80211_iface_combination if_comb = {
 static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
                               struct ieee80211_hw *hw)
 {
+       struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(priv->ah);
        struct base_eep_header *pBase;
 
@@ -765,19 +604,12 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
 
        if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &priv->sbands[IEEE80211_BAND_2GHZ];
+                       &common->sbands[IEEE80211_BAND_2GHZ];
        if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &priv->sbands[IEEE80211_BAND_5GHZ];
-
-       if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
-               if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-                       setup_ht_cap(priv,
-                                    &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
-               if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-                       setup_ht_cap(priv,
-                                    &priv->sbands[IEEE80211_BAND_5GHZ].ht_cap);
-       }
+                       &common->sbands[IEEE80211_BAND_5GHZ];
+
+       ath9k_cmn_reload_chainmask(ah);
 
        pBase = ath9k_htc_get_eeprom_base(priv);
        if (pBase) {
index c9254a61ca52d0984c23efdf4cb01aeb6b40ae23..f46cd0250e488217ca4aa517be12924e3b61c6a8 100644 (file)
@@ -250,7 +250,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
        u8 cmd_rsp;
        int ret;
 
-       if (test_bit(OP_INVALID, &priv->op_flags))
+       if (test_bit(ATH_OP_INVALID, &common->op_flags))
                return -EIO;
 
        fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +304,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
 
        htc_start(priv->htc);
 
-       if (!test_bit(OP_SCANNING, &priv->op_flags) &&
+       if (!test_bit(ATH_OP_SCANNING, &common->op_flags) &&
            !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
                ath9k_htc_vif_reconfig(priv);
 
@@ -748,7 +748,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
        common->ani.shortcal_timer = timestamp;
        common->ani.checkani_timer = timestamp;
 
-       set_bit(OP_ANI_RUNNING, &priv->op_flags);
+       set_bit(ATH_OP_ANI_RUN, &common->op_flags);
 
        ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
                                     msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -756,8 +756,9 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
 
 void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
 {
+       struct ath_common *common = ath9k_hw_common(priv->ah);
        cancel_delayed_work_sync(&priv->ani_work);
-       clear_bit(OP_ANI_RUNNING, &priv->op_flags);
+       clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
 }
 
 void ath9k_htc_ani_work(struct work_struct *work)
@@ -942,7 +943,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
                ath_dbg(common, CONFIG,
                        "Failed to update capability in target\n");
 
-       clear_bit(OP_INVALID, &priv->op_flags);
+       clear_bit(ATH_OP_INVALID, &common->op_flags);
        htc_start(priv->htc);
 
        spin_lock_bh(&priv->tx.tx_lock);
@@ -971,7 +972,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
 
        mutex_lock(&priv->mutex);
 
-       if (test_bit(OP_INVALID, &priv->op_flags)) {
+       if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
                ath_dbg(common, ANY, "Device not present\n");
                mutex_unlock(&priv->mutex);
                return;
@@ -1013,7 +1014,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
        ath9k_htc_ps_restore(priv);
        ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
 
-       set_bit(OP_INVALID, &priv->op_flags);
+       set_bit(ATH_OP_INVALID, &common->op_flags);
 
        ath_dbg(common, CONFIG, "Driver halt\n");
        mutex_unlock(&priv->mutex);
@@ -1087,7 +1088,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
        ath9k_htc_set_opmode(priv);
 
        if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
-           !test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
+           !test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
                ath9k_hw_set_tsfadjust(priv->ah, true);
                ath9k_htc_start_ani(priv);
        }
@@ -1245,13 +1246,14 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
                                       u64 multicast)
 {
        struct ath9k_htc_priv *priv = hw->priv;
+       struct ath_common *common = ath9k_hw_common(priv->ah);
        u32 rfilt;
 
        mutex_lock(&priv->mutex);
        changed_flags &= SUPPORTED_FILTERS;
        *total_flags &= SUPPORTED_FILTERS;
 
-       if (test_bit(OP_INVALID, &priv->op_flags)) {
+       if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
                ath_dbg(ath9k_hw_common(priv->ah), ANY,
                        "Unable to configure filter on invalid state\n");
                mutex_unlock(&priv->mutex);
@@ -1474,7 +1476,9 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 
        if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
                common->curaid = bss_conf->aid;
+               common->last_rssi = ATH_RSSI_DUMMY_MARKER;
                memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+               set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
        }
 }
 
@@ -1496,6 +1500,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
        struct ath9k_htc_priv *priv = hw->priv;
        struct ath_hw *ah = priv->ah;
        struct ath_common *common = ath9k_hw_common(ah);
+       int slottime;
 
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
@@ -1507,6 +1512,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                bss_conf->assoc ?
                        priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
 
+               if (!bss_conf->assoc)
+                       clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
+
                if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
                        ath9k_htc_choose_set_bssid(priv);
                        if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
@@ -1528,7 +1536,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
                        bss_conf->bssid);
                ath9k_htc_set_tsfadjust(priv, vif);
-               set_bit(OP_ENABLE_BEACON, &priv->op_flags);
+               priv->cur_beacon_conf.enable_beacon = 1;
                ath9k_htc_beacon_config(priv, vif);
        }
 
@@ -1542,7 +1550,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
                        ath_dbg(common, CONFIG,
                                "Beacon disabled for BSS: %pM\n",
                                bss_conf->bssid);
-                       clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
+                       priv->cur_beacon_conf.enable_beacon = 0;
                        ath9k_htc_beacon_config(priv, vif);
                }
        }
@@ -1568,11 +1576,21 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
                if (bss_conf->use_short_slot)
-                       ah->slottime = 9;
+                       slottime = 9;
                else
-                       ah->slottime = 20;
-
-               ath9k_hw_init_global_settings(ah);
+                       slottime = 20;
+               if (vif->type == NL80211_IFTYPE_AP) {
+                       /*
+                        * Defer update, so that connected stations can adjust
+                        * their settings at the same time.
+                        * See beacon.c for more details
+                        */
+                       priv->beacon.slottime = slottime;
+                       priv->beacon.updateslot = UPDATE;
+               } else {
+                       ah->slottime = slottime;
+                       ath9k_hw_init_global_settings(ah);
+               }
        }
 
        if (changed & BSS_CHANGED_HT)
@@ -1669,10 +1687,11 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
 static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
 {
        struct ath9k_htc_priv *priv = hw->priv;
+       struct ath_common *common = ath9k_hw_common(priv->ah);
 
        mutex_lock(&priv->mutex);
        spin_lock_bh(&priv->beacon_lock);
-       set_bit(OP_SCANNING, &priv->op_flags);
+       set_bit(ATH_OP_SCANNING, &common->op_flags);
        spin_unlock_bh(&priv->beacon_lock);
        cancel_work_sync(&priv->ps_work);
        ath9k_htc_stop_ani(priv);
@@ -1682,10 +1701,11 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
 static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
 {
        struct ath9k_htc_priv *priv = hw->priv;
+       struct ath_common *common = ath9k_hw_common(priv->ah);
 
        mutex_lock(&priv->mutex);
        spin_lock_bh(&priv->beacon_lock);
-       clear_bit(OP_SCANNING, &priv->op_flags);
+       clear_bit(ATH_OP_SCANNING, &common->op_flags);
        spin_unlock_bh(&priv->beacon_lock);
        ath9k_htc_ps_wakeup(priv);
        ath9k_htc_vif_reconfig(priv);
index 12e0f32a4905b64bcfaaa7b5ae3b9972d6069c34..e8149e3dbdd58a425d4537c4fe2dee8f0d6ce881 100644 (file)
@@ -924,46 +924,43 @@ static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
 
 void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
 {
+       struct ath_common *common = ath9k_hw_common(priv->ah);
        ath9k_hw_rxena(priv->ah);
        ath9k_htc_opmode_init(priv);
-       ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
-       priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
+       ath9k_hw_startpcureceive(priv->ah, test_bit(ATH_OP_SCANNING, &common->op_flags));
 }
 
-static void ath9k_process_rate(struct ieee80211_hw *hw,
-                              struct ieee80211_rx_status *rxs,
-                              u8 rx_rate, u8 rs_flags)
+static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
+                                  struct ath_htc_rx_status *rxstatus)
 {
-       struct ieee80211_supported_band *sband;
-       enum ieee80211_band band;
-       unsigned int i = 0;
-
-       if (rx_rate & 0x80) {
-               /* HT rate */
-               rxs->flag |= RX_FLAG_HT;
-               if (rs_flags & ATH9K_RX_2040)
-                       rxs->flag |= RX_FLAG_40MHZ;
-               if (rs_flags & ATH9K_RX_GI)
-                       rxs->flag |= RX_FLAG_SHORT_GI;
-               rxs->rate_idx = rx_rate & 0x7f;
-               return;
-       }
-
-       band = hw->conf.chandef.chan->band;
-       sband = hw->wiphy->bands[band];
-
-       for (i = 0; i < sband->n_bitrates; i++) {
-               if (sband->bitrates[i].hw_value == rx_rate) {
-                       rxs->rate_idx = i;
-                       return;
-               }
-               if (sband->bitrates[i].hw_value_short == rx_rate) {
-                       rxs->rate_idx = i;
-                       rxs->flag |= RX_FLAG_SHORTPRE;
-                       return;
-               }
-       }
+       rx_stats->flag = 0;
+       if (rxstatus->rs_flags & ATH9K_RX_2040)
+               rx_stats->flag |= RX_FLAG_40MHZ;
+       if (rxstatus->rs_flags & ATH9K_RX_GI)
+               rx_stats->flag |= RX_FLAG_SHORT_GI;
+}
 
+static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
+                                struct ath_htc_rx_status *rxstatus)
+{
+       rx_stats->rs_datalen    = rxstatus->rs_datalen;
+       rx_stats->rs_status     = rxstatus->rs_status;
+       rx_stats->rs_phyerr     = rxstatus->rs_phyerr;
+       rx_stats->rs_rssi       = rxstatus->rs_rssi;
+       rx_stats->rs_keyix      = rxstatus->rs_keyix;
+       rx_stats->rs_rate       = rxstatus->rs_rate;
+       rx_stats->rs_antenna    = rxstatus->rs_antenna;
+       rx_stats->rs_more       = rxstatus->rs_more;
+
+       memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
+               sizeof(rx_stats->rs_rssi_ctl));
+       memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
+               sizeof(rx_stats->rs_rssi_ext));
+
+       rx_stats->rs_isaggr     = rxstatus->rs_isaggr;
+       rx_stats->rs_moreaggr   = rxstatus->rs_moreaggr;
+       rx_stats->rs_num_delims = rxstatus->rs_num_delims;
+       convert_htc_flag(rx_stats, rxstatus);
 }
 
 static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
@@ -975,10 +972,10 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        struct ieee80211_hw *hw = priv->hw;
        struct sk_buff *skb = rxbuf->skb;
        struct ath_common *common = ath9k_hw_common(priv->ah);
+       struct ath_hw *ah = common->ah;
        struct ath_htc_rx_status *rxstatus;
-       int hdrlen, padsize;
-       int last_rssi = ATH_RSSI_DUMMY_MARKER;
-       __le16 fc;
+       struct ath_rx_status rx_stats;
+       bool decrypt_error;
 
        if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
                ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -999,103 +996,39 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
        ath9k_htc_err_stat_rx(priv, rxstatus);
 
        /* Get the RX status information */
-       memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
-       skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
-
-       hdr = (struct ieee80211_hdr *)skb->data;
-       fc = hdr->frame_control;
-       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-
-       padsize = hdrlen & 3;
-       if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
-               memmove(skb->data + padsize, skb->data, hdrlen);
-               skb_pull(skb, padsize);
-       }
 
        memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
 
-       if (rxbuf->rxstatus.rs_status != 0) {
-               if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
-                       rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
-               if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
-                       goto rx_next;
-
-               if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
-                       /* FIXME */
-               } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
-                       if (ieee80211_is_ctl(fc))
-                               /*
-                                * Sometimes, we get invalid
-                                * MIC failures on valid control frames.
-                                * Remove these mic errors.
-                                */
-                               rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
-                       else
-                               rx_status->flag |= RX_FLAG_MMIC_ERROR;
-               }
-
-               /*
-                * Reject error frames with the exception of
-                * decryption and MIC failures. For monitor mode,
-                * we also ignore the CRC error.
-                */
-               if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
-                       if (rxbuf->rxstatus.rs_status &
-                           ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
-                             ATH9K_RXERR_CRC))
-                               goto rx_next;
-               } else {
-                       if (rxbuf->rxstatus.rs_status &
-                           ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
-                               goto rx_next;
-                       }
-               }
-       }
-
-       if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
-               u8 keyix;
-               keyix = rxbuf->rxstatus.rs_keyix;
-               if (keyix != ATH9K_RXKEYIX_INVALID) {
-                       rx_status->flag |= RX_FLAG_DECRYPTED;
-               } else if (ieee80211_has_protected(fc) &&
-                          skb->len >= hdrlen + 4) {
-                       keyix = skb->data[hdrlen + 3] >> 6;
-                       if (test_bit(keyix, common->keymap))
-                               rx_status->flag |= RX_FLAG_DECRYPTED;
-               }
-       }
-
-       ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
-                          rxbuf->rxstatus.rs_flags);
-
-       if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
-           !rxbuf->rxstatus.rs_moreaggr)
-               ATH_RSSI_LPF(priv->rx.last_rssi,
-                            rxbuf->rxstatus.rs_rssi);
-
-       last_rssi = priv->rx.last_rssi;
+       /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
+        * After this, we can drop this part of skb. */
+       rx_status_htc_to_ath(&rx_stats, rxstatus);
+       rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
+       skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
 
-       if (ath_is_mybeacon(common, hdr)) {
-               s8 rssi = rxbuf->rxstatus.rs_rssi;
+       /*
+        * everything but the rate is checked here, the rate check is done
+        * separately to avoid doing two lookups for a rate for each frame.
+        */
+       hdr = (struct ieee80211_hdr *)skb->data;
+       if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
+                       &decrypt_error, priv->rxfilter))
+               goto rx_next;
 
-               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-                       rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+       ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
+                                    rx_status, decrypt_error);
 
-               if (rssi < 0)
-                       rssi = 0;
+       if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
+               goto rx_next;
 
-               priv->ah->stats.avgbrssi = rssi;
-       }
+       rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
+       ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
 
-       rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
-       rx_status->band = hw->conf.chandef.chan->band;
-       rx_status->freq = hw->conf.chandef.chan->center_freq;
-       rx_status->signal =  rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
-       rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+       rx_status->band = ah->curchan->chan->band;
+       rx_status->freq = ah->curchan->chan->center_freq;
+       rx_status->antenna = rx_stats.rs_antenna;
        rx_status->flag |= RX_FLAG_MACTIME_END;
 
        return true;
-
 rx_next:
        return false;
 }
index aac4a406a5134727e49fef2999e563e39169d83f..a0ff5b6370543809d8b715ec098986732d8f7809 100644 (file)
@@ -358,6 +358,36 @@ ret:
                kfree_skb(skb);
 }
 
+static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+                                     struct sk_buff *skb)
+{
+       uint32_t *pattern = (uint32_t *)skb->data;
+
+       switch (*pattern) {
+       case 0x33221199:
+               {
+               struct htc_panic_bad_vaddr *htc_panic;
+               htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
+               dev_err(htc_handle->dev, "ath: firmware panic! "
+                       "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
+                       htc_panic->exccause, htc_panic->pc,
+                       htc_panic->badvaddr);
+               break;
+               }
+       case 0x33221299:
+               {
+               struct htc_panic_bad_epid *htc_panic;
+               htc_panic = (struct htc_panic_bad_epid *) skb->data;
+               dev_err(htc_handle->dev, "ath: firmware panic! "
+                       "bad epid: 0x%08x\n", htc_panic->epid);
+               break;
+               }
+       default:
+               dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
+               break;
+       }
+}
+
 /*
  * HTC Messages are handled directly here and the obtained SKB
  * is freed.
@@ -379,6 +409,12 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
        htc_hdr = (struct htc_frame_hdr *) skb->data;
        epid = htc_hdr->endpoint_id;
 
+       if (epid == 0x99) {
+               ath9k_htc_fw_panic_report(htc_handle, skb);
+               kfree_skb(skb);
+               return;
+       }
+
        if (epid >= ENDPOINT_MAX) {
                if (pipe_id != USB_REG_IN_PIPE)
                        dev_kfree_skb_any(skb);
index e1ffbb6bd636049686b3181705b236862004d29f..06474ccc7696597a24dcc596ed573e36f30b8694 100644 (file)
@@ -77,6 +77,18 @@ struct htc_config_pipe_msg {
        u8 credits;
 } __packed;
 
+struct htc_panic_bad_vaddr {
+       __be32 pattern;
+       __be32 exccause;
+       __be32 pc;
+       __be32 badvaddr;
+} __packed;
+
+struct htc_panic_bad_epid {
+       __be32 pattern;
+       __be32 epid;
+} __packed;
+
 struct htc_ep_callbacks {
        void *priv;
        void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
index 9078a6c5a74e3b340266836f7c0151b242ccbc1e..c8a9dfab1fee2ea4778046b186428ba3f014e054 100644 (file)
@@ -23,7 +23,6 @@
 
 #include "hw.h"
 #include "hw-ops.h"
-#include "rc.h"
 #include "ar9003_mac.h"
 #include "ar9003_mci.h"
 #include "ar9003_phy.h"
@@ -883,7 +882,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
                AR_IMR_RXORN |
                AR_IMR_BCNMISC;
 
-       if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
+       if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
                sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
 
        if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -3048,6 +3047,7 @@ static struct {
        { AR_SREV_VERSION_9462,         "9462" },
        { AR_SREV_VERSION_9550,         "9550" },
        { AR_SREV_VERSION_9565,         "9565" },
+       { AR_SREV_VERSION_9531,         "9531" },
 };
 
 /* For devices with external radios */
index 1fc2e5a26b525b5695be27cd3d29d71b6732118d..c0a4e866edcac9b5325a497d6e7bdedfdadb3b66 100644 (file)
@@ -62,111 +62,6 @@ module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
 MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
 
 bool is_ath9k_unloaded;
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx)  { \
-       .band = IEEE80211_BAND_2GHZ, \
-       .center_freq = (_freq), \
-       .hw_value = (_idx), \
-       .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
-       .band = IEEE80211_BAND_5GHZ, \
-       .center_freq = (_freq), \
-       .hw_value = (_idx), \
-       .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
-       CHAN2G(2412, 0), /* Channel 1 */
-       CHAN2G(2417, 1), /* Channel 2 */
-       CHAN2G(2422, 2), /* Channel 3 */
-       CHAN2G(2427, 3), /* Channel 4 */
-       CHAN2G(2432, 4), /* Channel 5 */
-       CHAN2G(2437, 5), /* Channel 6 */
-       CHAN2G(2442, 6), /* Channel 7 */
-       CHAN2G(2447, 7), /* Channel 8 */
-       CHAN2G(2452, 8), /* Channel 9 */
-       CHAN2G(2457, 9), /* Channel 10 */
-       CHAN2G(2462, 10), /* Channel 11 */
-       CHAN2G(2467, 11), /* Channel 12 */
-       CHAN2G(2472, 12), /* Channel 13 */
-       CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
-       /* _We_ call this UNII 1 */
-       CHAN5G(5180, 14), /* Channel 36 */
-       CHAN5G(5200, 15), /* Channel 40 */
-       CHAN5G(5220, 16), /* Channel 44 */
-       CHAN5G(5240, 17), /* Channel 48 */
-       /* _We_ call this UNII 2 */
-       CHAN5G(5260, 18), /* Channel 52 */
-       CHAN5G(5280, 19), /* Channel 56 */
-       CHAN5G(5300, 20), /* Channel 60 */
-       CHAN5G(5320, 21), /* Channel 64 */
-       /* _We_ call this "Middle band" */
-       CHAN5G(5500, 22), /* Channel 100 */
-       CHAN5G(5520, 23), /* Channel 104 */
-       CHAN5G(5540, 24), /* Channel 108 */
-       CHAN5G(5560, 25), /* Channel 112 */
-       CHAN5G(5580, 26), /* Channel 116 */
-       CHAN5G(5600, 27), /* Channel 120 */
-       CHAN5G(5620, 28), /* Channel 124 */
-       CHAN5G(5640, 29), /* Channel 128 */
-       CHAN5G(5660, 30), /* Channel 132 */
-       CHAN5G(5680, 31), /* Channel 136 */
-       CHAN5G(5700, 32), /* Channel 140 */
-       /* _We_ call this UNII 3 */
-       CHAN5G(5745, 33), /* Channel 149 */
-       CHAN5G(5765, 34), /* Channel 153 */
-       CHAN5G(5785, 35), /* Channel 157 */
-       CHAN5G(5805, 36), /* Channel 161 */
-       CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
-       ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) {              \
-       .bitrate        = (_bitrate),                   \
-       .flags          = (_flags),                     \
-       .hw_value       = (_hw_rate),                   \
-       .hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
-       RATE(10, 0x1b, 0),
-       RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
-       RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
-       RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
-       RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                       IEEE80211_RATE_SUPPORTS_10MHZ)),
-       RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                       IEEE80211_RATE_SUPPORTS_10MHZ)),
-       RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                        IEEE80211_RATE_SUPPORTS_10MHZ)),
-       RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                        IEEE80211_RATE_SUPPORTS_10MHZ)),
-       RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                        IEEE80211_RATE_SUPPORTS_10MHZ)),
-       RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                        IEEE80211_RATE_SUPPORTS_10MHZ)),
-       RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                        IEEE80211_RATE_SUPPORTS_10MHZ)),
-       RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
-                        IEEE80211_RATE_SUPPORTS_10MHZ)),
-};
 
 #ifdef CONFIG_MAC80211_LEDS
 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
@@ -258,64 +153,6 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
 /*     Initialization     */
 /**************************/
 
-static void setup_ht_cap(struct ath_softc *sc,
-                        struct ieee80211_sta_ht_cap *ht_info)
-{
-       struct ath_hw *ah = sc->sc_ah;
-       struct ath_common *common = ath9k_hw_common(ah);
-       u8 tx_streams, rx_streams;
-       int i, max_streams;
-
-       ht_info->ht_supported = true;
-       ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
-                      IEEE80211_HT_CAP_SM_PS |
-                      IEEE80211_HT_CAP_SGI_40 |
-                      IEEE80211_HT_CAP_DSSSCCK40;
-
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
-               ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
-
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
-               ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
-
-       ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
-       ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
-       if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
-               max_streams = 1;
-       else if (AR_SREV_9462(ah))
-               max_streams = 2;
-       else if (AR_SREV_9300_20_OR_LATER(ah))
-               max_streams = 3;
-       else
-               max_streams = 2;
-
-       if (AR_SREV_9280_20_OR_LATER(ah)) {
-               if (max_streams >= 2)
-                       ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
-               ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
-       }
-
-       /* set up supported mcs set */
-       memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-       tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
-       rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
-
-       ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
-               tx_streams, rx_streams);
-
-       if (tx_streams != rx_streams) {
-               ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
-               ht_info->mcs.tx_params |= ((tx_streams - 1) <<
-                               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
-       }
-
-       for (i = 0; i < rx_streams; i++)
-               ht_info->mcs.rx_mask[i] = 0xff;
-
-       ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
 static void ath9k_reg_notifier(struct wiphy *wiphy,
                               struct regulatory_request *request)
 {
@@ -486,51 +323,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
        return 0;
 }
 
-static int ath9k_init_channels_rates(struct ath_softc *sc)
-{
-       void *channels;
-
-       BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
-                    ARRAY_SIZE(ath9k_5ghz_chantable) !=
-                    ATH9K_NUM_CHANNELS);
-
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
-               channels = devm_kzalloc(sc->dev,
-                       sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
-               if (!channels)
-                   return -ENOMEM;
-
-               memcpy(channels, ath9k_2ghz_chantable,
-                      sizeof(ath9k_2ghz_chantable));
-               sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
-               sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
-               sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
-                       ARRAY_SIZE(ath9k_2ghz_chantable);
-               sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
-               sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
-                       ARRAY_SIZE(ath9k_legacy_rates);
-       }
-
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
-               channels = devm_kzalloc(sc->dev,
-                       sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
-               if (!channels)
-                       return -ENOMEM;
-
-               memcpy(channels, ath9k_5ghz_chantable,
-                      sizeof(ath9k_5ghz_chantable));
-               sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
-               sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
-               sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
-                       ARRAY_SIZE(ath9k_5ghz_chantable);
-               sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
-                       ath9k_legacy_rates + 4;
-               sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
-                       ARRAY_SIZE(ath9k_legacy_rates) - 4;
-       }
-       return 0;
-}
-
 static void ath9k_init_misc(struct ath_softc *sc)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -538,7 +330,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
 
        setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
 
-       sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+       common->last_rssi = ATH_RSSI_DUMMY_MARKER;
        sc->config.txpowlimit = ATH_TXPOWER_MAX;
        memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
        sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -793,7 +585,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        if (ret)
                goto err_btcoex;
 
-       ret = ath9k_init_channels_rates(sc);
+       ret = ath9k_cmn_init_channels_rates(common);
        if (ret)
                goto err_btcoex;
 
@@ -823,10 +615,11 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
        struct ieee80211_supported_band *sband;
        struct ieee80211_channel *chan;
        struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct cfg80211_chan_def chandef;
        int i;
 
-       sband = &sc->sbands[band];
+       sband = &common->sbands[band];
        for (i = 0; i < sband->n_channels; i++) {
                chan = &sband->channels[i];
                ah->curchan = &ah->channels[chan->hw_value];
@@ -849,17 +642,6 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
        ah->curchan = curchan;
 }
 
-void ath9k_reload_chainmask_settings(struct ath_softc *sc)
-{
-       if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
-               return;
-
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
-               setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
-       if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
-               setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
-}
-
 static const struct ieee80211_iface_limit if_limits[] = {
        { .max = 2048,  .types = BIT(NL80211_IFTYPE_STATION) |
                                 BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -949,6 +731,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
        hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
        hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+       hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
 
        hw->queues = 4;
        hw->max_rates = 4;
@@ -969,13 +752,13 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
-                       &sc->sbands[IEEE80211_BAND_2GHZ];
+                       &common->sbands[IEEE80211_BAND_2GHZ];
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
-                       &sc->sbands[IEEE80211_BAND_5GHZ];
+                       &common->sbands[IEEE80211_BAND_5GHZ];
 
        ath9k_init_wow(hw);
-       ath9k_reload_chainmask_settings(sc);
+       ath9k_cmn_reload_chainmask(ah);
 
        SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
 }
@@ -1106,19 +889,11 @@ static int __init ath9k_init(void)
 {
        int error;
 
-       /* Register rate control algorithm */
-       error = ath_rate_control_register();
-       if (error != 0) {
-               pr_err("Unable to register rate control algorithm: %d\n",
-                      error);
-               goto err_out;
-       }
-
        error = ath_pci_init();
        if (error < 0) {
                pr_err("No PCI devices found, driver not installed\n");
                error = -ENODEV;
-               goto err_rate_unregister;
+               goto err_out;
        }
 
        error = ath_ahb_init();
@@ -1131,9 +906,6 @@ static int __init ath9k_init(void)
 
  err_pci_exit:
        ath_pci_exit();
-
- err_rate_unregister:
-       ath_rate_control_unregister();
  err_out:
        return error;
 }
@@ -1144,7 +916,6 @@ static void __exit ath9k_exit(void)
        is_ath9k_unloaded = true;
        ath_ahb_exit();
        ath_pci_exit();
-       ath_rate_control_unregister();
        pr_info("%s: Driver unloaded\n", dev_info);
 }
 module_exit(ath9k_exit);
index 30dcef5aba100d0edfaf7171aa38726ef6e42674..72a715fe8f24e2699323e0b434cc6a4a1f894dd5 100644 (file)
@@ -115,13 +115,14 @@ void ath_hw_pll_work(struct work_struct *work)
        u32 pll_sqsum;
        struct ath_softc *sc = container_of(work, struct ath_softc,
                                            hw_pll_work.work);
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        /*
         * ensure that the PLL WAR is executed only
         * after the STA is associated (or) if the
         * beaconing had started in interfaces that
         * uses beacons.
         */
-       if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+       if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
                return;
 
        if (sc->tx99_state)
@@ -414,7 +415,7 @@ void ath_start_ani(struct ath_softc *sc)
        unsigned long timestamp = jiffies_to_msecs(jiffies);
 
        if (common->disable_ani ||
-           !test_bit(SC_OP_ANI_RUN, &sc->sc_flags) ||
+           !test_bit(ATH_OP_ANI_RUN, &common->op_flags) ||
            (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
                return;
 
@@ -438,6 +439,7 @@ void ath_stop_ani(struct ath_softc *sc)
 void ath_check_ani(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
 
        /*
@@ -453,23 +455,23 @@ void ath_check_ani(struct ath_softc *sc)
                         * Disable ANI only when there are no
                         * associated stations.
                         */
-                       if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+                       if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
                                goto stop_ani;
                }
        } else if (ah->opmode == NL80211_IFTYPE_STATION) {
-               if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+               if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
                        goto stop_ani;
        }
 
-       if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags)) {
-               set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+       if (!test_bit(ATH_OP_ANI_RUN, &common->op_flags)) {
+               set_bit(ATH_OP_ANI_RUN, &common->op_flags);
                ath_start_ani(sc);
        }
 
        return;
 
 stop_ani:
-       clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+       clear_bit(ATH_OP_ANI_RUN, &common->op_flags);
        ath_stop_ani(sc);
 }
 
index 5f727588ca2788b0e598ccf98450af8f396c7135..51ce36f108f9a8f0f6619e6b73e613722c2a72a8 100644 (file)
@@ -827,7 +827,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
                return;
        }
 
-       if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
+       if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah))
                sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
 
        async_mask = AR_INTR_MAC_IRQ;
index 10271373a0cd1081fa89b53ee35992982fd7679b..89df634e81f9a703d6dcf5eefee8881ef738570b 100644 (file)
@@ -155,12 +155,8 @@ struct ath_htc_rx_status {
        u8 rs_status;
        u8 rs_phyerr;
        int8_t rs_rssi;
-       int8_t rs_rssi_ctl0;
-       int8_t rs_rssi_ctl1;
-       int8_t rs_rssi_ctl2;
-       int8_t rs_rssi_ext0;
-       int8_t rs_rssi_ext1;
-       int8_t rs_rssi_ext2;
+       int8_t rs_rssi_ctl[3];
+       int8_t rs_rssi_ext[3];
        u8 rs_keyix;
        u8 rs_rate;
        u8 rs_antenna;
@@ -170,6 +166,7 @@ struct ath_htc_rx_status {
        u8 rs_num_delims;
        u8 rs_flags;
        u8 rs_dummy;
+       /* FIXME: evm* never used? */
        __be32 evm0;
        __be32 evm1;
        __be32 evm2;
index 5924f72dd4932c4be474f87f246643c6e49ccabb..d69853b848ce1f10167275c4e85d6026ee41c5f1 100644 (file)
@@ -229,16 +229,16 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
        ath9k_cmn_update_txpow(ah, sc->curtxpow,
                               sc->config.txpowlimit, &sc->curtxpow);
 
-       clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
+       clear_bit(ATH_OP_HW_RESET, &common->op_flags);
        ath9k_hw_set_interrupts(ah);
        ath9k_hw_enable_interrupts(ah);
 
        if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
-               if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+               if (!test_bit(ATH_OP_BEACONS, &common->op_flags))
                        goto work;
 
                if (ah->opmode == NL80211_IFTYPE_STATION &&
-                   test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+                   test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
                        spin_lock_irqsave(&sc->sc_pm_lock, flags);
                        sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
                        spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -336,7 +336,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
        int old_pos = -1;
        int r;
 
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+       if (test_bit(ATH_OP_INVALID, &common->op_flags))
                return -EIO;
 
        offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -402,7 +402,7 @@ static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chand
                        chan->center_freq);
        } else {
                /* perform spectral scan if requested. */
-               if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+               if (test_bit(ATH_OP_SCANNING, &common->op_flags) &&
                        sc->spectral_mode == SPECTRAL_CHANSCAN)
                        ath9k_spectral_scan_trigger(hw);
        }
@@ -451,7 +451,7 @@ void ath9k_tasklet(unsigned long data)
                 * interrupts are enabled in the reset routine.
                 */
                atomic_inc(&ah->intr_ref_cnt);
-               ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
+               ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
                goto out;
        }
 
@@ -471,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
                         * interrupts are enabled in the reset routine.
                         */
                        atomic_inc(&ah->intr_ref_cnt);
-                       ath_dbg(common, ANY,
+                       ath_dbg(common, RESET,
                                "BB_WATCHDOG: Skipping interrupts\n");
                        goto out;
                }
@@ -484,7 +484,7 @@ void ath9k_tasklet(unsigned long data)
                        type = RESET_TYPE_TX_GTT;
                        ath9k_queue_reset(sc, type);
                        atomic_inc(&ah->intr_ref_cnt);
-                       ath_dbg(common, ANY,
+                       ath_dbg(common, RESET,
                                "GTT: Skipping interrupts\n");
                        goto out;
                }
@@ -566,6 +566,7 @@ irqreturn_t ath_isr(int irq, void *dev)
 
        struct ath_softc *sc = dev;
        struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        enum ath9k_int status;
        u32 sync_cause = 0;
        bool sched = false;
@@ -575,7 +576,7 @@ irqreturn_t ath_isr(int irq, void *dev)
         * touch anything. Note this can happen early
         * on if the IRQ is shared.
         */
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+       if (test_bit(ATH_OP_INVALID, &common->op_flags))
                return IRQ_NONE;
 
        /* shared irq, not for us */
@@ -583,7 +584,7 @@ irqreturn_t ath_isr(int irq, void *dev)
        if (!ath9k_hw_intrpend(ah))
                return IRQ_NONE;
 
-       if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+       if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
                ath9k_hw_kill_interrupts(ah);
                return IRQ_HANDLED;
        }
@@ -684,10 +685,11 @@ int ath_reset(struct ath_softc *sc)
 
 void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
 {
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 #ifdef CONFIG_ATH9K_DEBUGFS
        RESET_STAT_INC(sc, type);
 #endif
-       set_bit(SC_OP_HW_RESET, &sc->sc_flags);
+       set_bit(ATH_OP_HW_RESET, &common->op_flags);
        ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
 }
 
@@ -768,7 +770,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
 
        ath_mci_enable(sc);
 
-       clear_bit(SC_OP_INVALID, &sc->sc_flags);
+       clear_bit(ATH_OP_INVALID, &common->op_flags);
        sc->sc_ah->is_monitoring = false;
 
        if (!ath_complete_reset(sc, false))
@@ -885,7 +887,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        ath_cancel_work(sc);
 
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+       if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
                ath_dbg(common, ANY, "Device not present\n");
                mutex_unlock(&sc->mutex);
                return;
@@ -940,7 +942,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
 
        ath9k_ps_restore(sc);
 
-       set_bit(SC_OP_INVALID, &sc->sc_flags);
+       set_bit(ATH_OP_INVALID, &common->op_flags);
        sc->ps_idle = prev_idle;
 
        mutex_unlock(&sc->mutex);
@@ -1081,7 +1083,7 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
         */
        if (ah->opmode == NL80211_IFTYPE_STATION &&
            old_opmode == NL80211_IFTYPE_AP &&
-           test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+           test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
                ieee80211_iterate_active_interfaces_atomic(
                        sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
                        ath9k_sta_vif_iter, sc);
@@ -1178,9 +1180,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
 
-       if (sc->csa_vif == vif)
-               sc->csa_vif = NULL;
-
        ath9k_ps_wakeup(sc);
        ath9k_calculate_summary_state(hw, NULL);
        ath9k_ps_restore(sc);
@@ -1593,7 +1592,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
        unsigned long flags;
 
-       set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+       set_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
        avp->primary_sta_vif = true;
 
        /*
@@ -1609,7 +1608,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
        common->curaid = bss_conf->aid;
        ath9k_hw_write_associd(sc->sc_ah);
 
-       sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+       common->last_rssi = ATH_RSSI_DUMMY_MARKER;
        sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
 
        spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -1628,8 +1627,9 @@ static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
 {
        struct ath_softc *sc = data;
        struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-       if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+       if (test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags))
                return;
 
        if (bss_conf->assoc)
@@ -1660,18 +1660,18 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
                        bss_conf->bssid, bss_conf->assoc);
 
                if (avp->primary_sta_vif && !bss_conf->assoc) {
-                       clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+                       clear_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags);
                        avp->primary_sta_vif = false;
 
                        if (ah->opmode == NL80211_IFTYPE_STATION)
-                               clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+                               clear_bit(ATH_OP_BEACONS, &common->op_flags);
                }
 
                ieee80211_iterate_active_interfaces_atomic(
                        sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
                        ath9k_bss_assoc_iter, sc);
 
-               if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
+               if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags) &&
                    ah->opmode == NL80211_IFTYPE_STATION) {
                        memset(common->curbssid, 0, ETH_ALEN);
                        common->curaid = 0;
@@ -1866,7 +1866,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
 
 static bool ath9k_has_tx_pending(struct ath_softc *sc)
 {
-       int i, npend;
+       int i, npend = 0;
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
                if (!ATH_TXQ_SETUP(sc, i))
@@ -1900,7 +1900,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
                return;
        }
 
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+       if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
                ath_dbg(common, ANY, "Device not present\n");
                mutex_unlock(&sc->mutex);
                return;
@@ -2056,7 +2056,7 @@ static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
                ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
 
        ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
-       ath9k_reload_chainmask_settings(sc);
+       ath9k_cmn_reload_chainmask(ah);
 
        return 0;
 }
@@ -2073,26 +2073,23 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
 {
        struct ath_softc *sc = hw->priv;
-       set_bit(SC_OP_SCANNING, &sc->sc_flags);
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       set_bit(ATH_OP_SCANNING, &common->op_flags);
 }
 
 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
 {
        struct ath_softc *sc = hw->priv;
-       clear_bit(SC_OP_SCANNING, &sc->sc_flags);
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       clear_bit(ATH_OP_SCANNING, &common->op_flags);
 }
 
 static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif,
                                        struct cfg80211_chan_def *chandef)
 {
-       struct ath_softc *sc = hw->priv;
-
-       /* mac80211 does not support CSA in multi-if cases (yet) */
-       if (WARN_ON(sc->csa_vif))
-               return;
-
-       sc->csa_vif = vif;
+       /* depend on vif->csa_active only */
+       return;
 }
 
 struct ieee80211_ops ath9k_ops = {
index 71799fcade543fca00ba709970c26f6ff33a948e..a0dbcc4123840cbeab93e56939ce6b095f3e885d 100644 (file)
@@ -555,7 +555,7 @@ void ath_mci_intr(struct ath_softc *sc)
                mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
 
                while (more_data == MCI_GPM_MORE) {
-                       if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+                       if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
                                return;
 
                        pgpm = mci->gpm_buf.bf_addr;
index 55724b02316b17d44c9d7d788f0d55452c616286..25304adece571d9d4e498df9398103a49ad08a7e 100644 (file)
@@ -784,6 +784,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct ath_softc *sc;
        struct ieee80211_hw *hw;
+       struct ath_common *common;
        u8 csz;
        u32 val;
        int ret = 0;
@@ -858,9 +859,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        sc->mem = pcim_iomap_table(pdev)[0];
        sc->driver_data = id->driver_data;
 
-       /* Will be cleared in ath9k_start() */
-       set_bit(SC_OP_INVALID, &sc->sc_flags);
-
        ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
        if (ret) {
                dev_err(&pdev->dev, "request_irq failed\n");
@@ -879,6 +877,10 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
                   hw_name, (unsigned long)sc->mem, pdev->irq);
 
+       /* Will be cleared in ath9k_start() */
+       common = ath9k_hw_common(sc->sc_ah);
+       set_bit(ATH_OP_INVALID, &common->op_flags);
+
        return 0;
 
 err_init:
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
deleted file mode 100644 (file)
index d829bb6..0000000
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2004-2011 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "ath9k.h"
-
-static const struct ath_rate_table ar5416_11na_ratetable = {
-       68,
-       8, /* MCS start */
-       {
-               [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 0, 12 }, /* 6 Mb */
-               [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
-                       7800,  1, 18 }, /* 9 Mb */
-               [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10000, 2, 24 }, /* 12 Mb */
-               [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       13900, 3, 36 }, /* 18 Mb */
-               [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17300, 4, 48 }, /* 24 Mb */
-               [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23000, 5, 72 }, /* 36 Mb */
-               [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 6, 96 }, /* 48 Mb */
-               [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       29300, 7, 108 }, /* 54 Mb */
-               [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0 }, /* 6.5 Mb */
-               [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1 }, /* 13 Mb */
-               [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2 }, /* 19.5 Mb */
-               [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3 }, /* 26 Mb */
-               [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4 }, /* 39 Mb */
-               [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5 }, /* 52 Mb */
-               [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6 }, /* 58.5 Mb */
-               [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7 }, /* 65 Mb */
-               [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7 }, /* 75 Mb */
-               [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8 }, /* 13 Mb */
-               [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9 }, /* 26 Mb */
-               [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10 }, /* 39 Mb */
-               [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11 }, /* 52 Mb */
-               [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12 }, /* 78 Mb */
-               [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13 }, /* 104 Mb */
-               [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14 }, /* 117 Mb */
-               [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15 }, /* 130 Mb */
-               [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15 }, /* 144.4 Mb */
-               [26] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16 }, /* 19.5 Mb */
-               [27] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17 }, /* 39 Mb */
-               [28] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18 }, /* 58.5 Mb */
-               [29] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19 }, /* 78 Mb */
-               [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20 }, /* 117 Mb */
-               [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20 }, /* 130 Mb*/
-               [32] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21 }, /* 156 Mb */
-               [33] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21 }, /* 173.3 Mb */
-               [34] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22 }, /* 175.5 Mb */
-               [35] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22 }, /* 195 Mb*/
-               [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23 }, /* 195 Mb */
-               [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23 }, /* 216.7 Mb */
-               [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0 }, /* 13.5 Mb*/
-               [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1 }, /* 27.0 Mb*/
-               [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2 }, /* 40.5 Mb*/
-               [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3 }, /* 54 Mb */
-               [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4 }, /* 81 Mb */
-               [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5 }, /* 108 Mb */
-               [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6 }, /* 121.5 Mb*/
-               [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7 }, /* 135 Mb */
-               [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7 }, /* 150 Mb */
-               [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8 }, /* 27 Mb */
-               [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9 }, /* 54 Mb */
-               [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10 }, /* 81 Mb */
-               [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11 }, /* 108 Mb */
-               [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12 }, /* 162 Mb */
-               [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13 }, /* 216 Mb */
-               [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14 }, /* 243 Mb */
-               [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15 }, /* 270 Mb */
-               [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15 }, /* 300 Mb */
-               [56] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16 }, /* 40.5 Mb */
-               [57] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17 }, /* 81 Mb */
-               [58] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18 }, /* 121.5 Mb */
-               [59] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19 }, /*  162 Mb */
-               [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20 }, /*  243 Mb */
-               [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20 }, /*  270 Mb */
-               [62] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21 }, /*  324 Mb */
-               [63] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21 }, /*  360 Mb */
-               [64] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22 }, /* 364.5 Mb */
-               [65] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22 }, /* 405 Mb */
-               [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23 }, /* 405 Mb */
-               [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23 }, /* 450 Mb */
-       },
-       50,  /* probe interval */
-       WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
-};
-
-/* 4ms frame limit not used for NG mode.  The values filled
- * for HT are the 64K max aggregate limit */
-
-static const struct ath_rate_table ar5416_11ng_ratetable = {
-       72,
-       12, /* MCS start */
-       {
-               [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
-                       900, 0, 2 }, /* 1 Mb */
-               [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
-                       1900, 1, 4 }, /* 2 Mb */
-               [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
-                       4900, 2, 11 }, /* 5.5 Mb */
-               [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
-                       8100, 3, 22 }, /* 11 Mb */
-               [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
-                       5400, 4, 12 }, /* 6 Mb */
-               [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
-                       7800, 5, 18 }, /* 9 Mb */
-               [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
-                       10100, 6, 24 }, /* 12 Mb */
-               [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
-                       14100, 7, 36 }, /* 18 Mb */
-               [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
-                       17700, 8, 48 }, /* 24 Mb */
-               [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
-                       23700, 9, 72 }, /* 36 Mb */
-               [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
-                       27400, 10, 96 }, /* 48 Mb */
-               [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
-                       30900, 11, 108 }, /* 54 Mb */
-               [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
-                       6400, 0, 0 }, /* 6.5 Mb */
-               [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
-                       12700, 1, 1 }, /* 13 Mb */
-               [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
-                       18800, 2, 2 }, /* 19.5 Mb*/
-               [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
-                       25000, 3, 3 }, /* 26 Mb */
-               [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
-                       36700, 4, 4 }, /* 39 Mb */
-               [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
-                       48100, 5, 5 }, /* 52 Mb */
-               [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
-                       53500, 6, 6 }, /* 58.5 Mb */
-               [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
-                       59000, 7, 7 }, /* 65 Mb */
-               [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
-                       65400, 7, 7 }, /* 65 Mb*/
-               [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
-                       12700, 8, 8 }, /* 13 Mb */
-               [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
-                       24800, 9, 9 }, /* 26 Mb */
-               [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
-                       36600, 10, 10 }, /* 39 Mb */
-               [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
-                       48100, 11, 11 }, /* 52 Mb */
-               [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
-                       69500, 12, 12 }, /* 78 Mb */
-               [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
-                       89500, 13, 13 }, /* 104 Mb */
-               [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
-                       98900, 14, 14 }, /* 117 Mb */
-               [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
-                       108300, 15, 15 }, /* 130 Mb */
-               [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
-                       120000, 15, 15 }, /* 144.4 Mb */
-               [30] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
-                       17400, 16, 16 }, /* 19.5 Mb */
-               [31] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
-                       35100, 17, 17 }, /* 39 Mb */
-               [32] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
-                       52600, 18, 18 }, /* 58.5 Mb */
-               [33] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
-                       70400, 19, 19 }, /* 78 Mb */
-               [34] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
-                       104900, 20, 20 }, /* 117 Mb */
-               [35] = {  RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
-                       115800, 20, 20 }, /* 130 Mb */
-               [36] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
-                       137200, 21, 21 }, /* 156 Mb */
-               [37] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
-                       151100, 21, 21 }, /* 173.3 Mb */
-               [38] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
-                       152800, 22, 22 }, /* 175.5 Mb */
-               [39] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
-                       168400, 22, 22 }, /* 195 Mb */
-               [40] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
-                       168400, 23, 23 }, /* 195 Mb */
-               [41] = {  RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
-                       185000, 23, 23 }, /* 216.7 Mb */
-               [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
-                       13200, 0, 0 }, /* 13.5 Mb */
-               [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
-                       25900, 1, 1 }, /* 27.0 Mb */
-               [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
-                       38600, 2, 2 }, /* 40.5 Mb */
-               [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
-                       49800, 3, 3 }, /* 54 Mb */
-               [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
-                       72200, 4, 4 }, /* 81 Mb */
-               [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
-                       92900, 5, 5 }, /* 108 Mb */
-               [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
-                       102700, 6, 6 }, /* 121.5 Mb */
-               [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
-                       112000, 7, 7 }, /* 135 Mb */
-               [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
-                       122000, 7, 7 }, /* 150 Mb */
-               [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
-                       25800, 8, 8 }, /* 27 Mb */
-               [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
-                       49800, 9, 9 }, /* 54 Mb */
-               [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
-                       71900, 10, 10 }, /* 81 Mb */
-               [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
-                       92500, 11, 11 }, /* 108 Mb */
-               [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
-                       130300, 12, 12 }, /* 162 Mb */
-               [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
-                       162800, 13, 13 }, /* 216 Mb */
-               [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
-                       178200, 14, 14 }, /* 243 Mb */
-               [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
-                       192100, 15, 15 }, /* 270 Mb */
-               [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
-                       207000, 15, 15 }, /* 300 Mb */
-               [60] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
-                       36100, 16, 16 }, /* 40.5 Mb */
-               [61] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
-                       72900, 17, 17 }, /* 81 Mb */
-               [62] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
-                       108300, 18, 18 }, /* 121.5 Mb */
-               [63] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
-                       142000, 19, 19 }, /* 162 Mb */
-               [64] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
-                       205100, 20, 20 }, /* 243 Mb */
-               [65] = {  RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
-                       224700, 20, 20 }, /* 270 Mb */
-               [66] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
-                       263100, 21, 21 }, /* 324 Mb */
-               [67] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
-                       288000, 21, 21 }, /* 360 Mb */
-               [68] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
-                       290700, 22, 22 }, /* 364.5 Mb */
-               [69] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
-                       317200, 22, 22 }, /* 405 Mb */
-               [70] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
-                       317200, 23, 23 }, /* 405 Mb */
-               [71] = {  RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
-                       346400, 23, 23 }, /* 450 Mb */
-       },
-       50,  /* probe interval */
-       WLAN_RC_HT_FLAG,  /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11a_ratetable = {
-       8,
-       0,
-       {
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 0, 12},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800,  1, 18},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 2, 24},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 3, 36},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 4, 48},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 5, 72},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 6, 96},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 7, 108},
-       },
-       50,  /* probe interval */
-       0,   /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11g_ratetable = {
-       12,
-       0,
-       {
-               { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
-                       900, 0, 2},
-               { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
-                       1900, 1, 4},
-               { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
-                       4900, 2, 11},
-               { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
-                       8100, 3, 22},
-               { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
-                       5400, 4, 12},
-               { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
-                       7800, 5, 18},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
-                       10000, 6, 24},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
-                       13900, 7, 36},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
-                       17300, 8, 48},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
-                       23000, 9, 72},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
-                       27400, 10, 96},
-               { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
-                       29300, 11, 108},
-       },
-       50,  /* probe interval */
-       0,   /* Phy rates allowed initially */
-};
-
-static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
-                               struct ieee80211_tx_rate *rate)
-{
-       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
-       int rix, i, idx = 0;
-
-       if (!(rate->flags & IEEE80211_TX_RC_MCS))
-               return rate->idx;
-
-       for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
-               idx = ath_rc_priv->valid_rate_index[i];
-
-               if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
-                   rate_table->info[idx].ratecode == rate->idx)
-                       break;
-       }
-
-       rix = idx;
-
-       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
-               rix++;
-
-       return rix;
-}
-
-static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
-{
-       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
-       u8 i, j, idx, idx_next;
-
-       for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
-               for (j = 0; j <= i-1; j++) {
-                       idx = ath_rc_priv->valid_rate_index[j];
-                       idx_next = ath_rc_priv->valid_rate_index[j+1];
-
-                       if (rate_table->info[idx].ratekbps >
-                               rate_table->info[idx_next].ratekbps) {
-                               ath_rc_priv->valid_rate_index[j] = idx_next;
-                               ath_rc_priv->valid_rate_index[j+1] = idx;
-                       }
-               }
-       }
-}
-
-static inline
-int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
-                               struct ath_rate_priv *ath_rc_priv,
-                               u8 cur_valid_txrate,
-                               u8 *next_idx)
-{
-       u8 i;
-
-       for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) {
-               if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
-                       *next_idx = ath_rc_priv->valid_rate_index[i+1];
-                       return 1;
-               }
-       }
-
-       /* No more valid rates */
-       *next_idx = 0;
-
-       return 0;
-}
-
-/* Return true only for single stream */
-
-static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
-{
-       if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
-               return 0;
-       if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
-               return 0;
-       if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG))
-               return 0;
-       if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
-               return 0;
-       if (!ignore_cw && WLAN_RC_PHY_HT(phy))
-               if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
-                       return 0;
-       return 1;
-}
-
-static inline int
-ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
-                    u8 cur_valid_txrate, u8 *next_idx)
-{
-       int8_t i;
-
-       for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) {
-               if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
-                       *next_idx = ath_rc_priv->valid_rate_index[i-1];
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
-{
-       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
-       u8 i, hi = 0;
-
-       for (i = 0; i < rate_table->rate_cnt; i++) {
-               if (rate_table->info[i].rate_flags & RC_LEGACY) {
-                       u32 phy = rate_table->info[i].phy;
-                       u8 valid_rate_count = 0;
-
-                       if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
-                               continue;
-
-                       valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
-
-                       ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
-                       ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_priv->valid_rate_index[i] = true;
-                       hi = i;
-               }
-       }
-
-       return hi;
-}
-
-static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
-                                      u32 phy, u32 capflag)
-{
-       if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
-               return false;
-
-       if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
-               return false;
-
-       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
-               return false;
-
-       return true;
-}
-
-static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
-                                  u32 phy, u32 capflag)
-{
-       if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
-               return false;
-
-       if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
-               return false;
-
-       if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
-               return false;
-
-       return true;
-}
-
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
-{
-       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
-       struct ath_rateset *rateset;
-       u32 phy, capflag = ath_rc_priv->ht_cap;
-       u16 rate_flags;
-       u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
-
-       if (legacy)
-               rateset = &ath_rc_priv->neg_rates;
-       else
-               rateset = &ath_rc_priv->neg_ht_rates;
-
-       for (i = 0; i < rateset->rs_nrates; i++) {
-               for (j = 0; j < rate_table->rate_cnt; j++) {
-                       phy = rate_table->info[j].phy;
-                       rate_flags = rate_table->info[j].rate_flags;
-                       rate = rateset->rs_rates[i];
-                       dot11rate = rate_table->info[j].dot11rate;
-
-                       if (legacy &&
-                           !ath_rc_check_legacy(rate, dot11rate,
-                                                rate_flags, phy, capflag))
-                               continue;
-
-                       if (!legacy &&
-                           !ath_rc_check_ht(rate, dot11rate,
-                                            rate_flags, phy, capflag))
-                               continue;
-
-                       if (!ath_rc_valid_phyrate(phy, capflag, 0))
-                               continue;
-
-                       valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
-                       ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
-                       ath_rc_priv->valid_phy_ratecnt[phy] += 1;
-                       ath_rc_priv->valid_rate_index[j] = true;
-                       hi = max(hi, j);
-               }
-       }
-
-       return hi;
-}
-
-static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
-                                int *is_probing)
-{
-       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
-       u32 best_thruput, this_thruput, now_msec;
-       u8 rate, next_rate, best_rate, maxindex, minindex;
-       int8_t index = 0;
-
-       now_msec = jiffies_to_msecs(jiffies);
-       *is_probing = 0;
-       best_thruput = 0;
-       maxindex = ath_rc_priv->max_valid_rate-1;
-       minindex = 0;
-       best_rate = minindex;
-
-       /*
-        * Try the higher rate first. It will reduce memory moving time
-        * if we have very good channel characteristics.
-        */
-       for (index = maxindex; index >= minindex ; index--) {
-               u8 per_thres;
-
-               rate = ath_rc_priv->valid_rate_index[index];
-               if (rate > ath_rc_priv->rate_max_phy)
-                       continue;
-
-               /*
-                * For TCP the average collision rate is around 11%,
-                * so we ignore PERs less than this.  This is to
-                * prevent the rate we are currently using (whose
-                * PER might be in the 10-15 range because of TCP
-                * collisions) looking worse than the next lower
-                * rate whose PER has decayed close to 0.  If we
-                * used to next lower rate, its PER would grow to
-                * 10-15 and we would be worse off then staying
-                * at the current rate.
-                */
-               per_thres = ath_rc_priv->per[rate];
-               if (per_thres < 12)
-                       per_thres = 12;
-
-               this_thruput = rate_table->info[rate].user_ratekbps *
-                       (100 - per_thres);
-
-               if (best_thruput <= this_thruput) {
-                       best_thruput = this_thruput;
-                       best_rate    = rate;
-               }
-       }
-
-       rate = best_rate;
-
-       /*
-        * Must check the actual rate (ratekbps) to account for
-        * non-monoticity of 11g's rate table
-        */
-
-       if (rate >= ath_rc_priv->rate_max_phy) {
-               rate = ath_rc_priv->rate_max_phy;
-
-               /* Probe the next allowed phy state */
-               if (ath_rc_get_nextvalid_txrate(rate_table,
-                                       ath_rc_priv, rate, &next_rate) &&
-                   (now_msec - ath_rc_priv->probe_time >
-                    rate_table->probe_interval) &&
-                   (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
-                       rate = next_rate;
-                       ath_rc_priv->probe_rate = rate;
-                       ath_rc_priv->probe_time = now_msec;
-                       ath_rc_priv->hw_maxretry_pktcnt = 0;
-                       *is_probing = 1;
-               }
-       }
-
-       if (rate > (ath_rc_priv->rate_table_size - 1))
-               rate = ath_rc_priv->rate_table_size - 1;
-
-       if (RC_TS_ONLY(rate_table->info[rate].rate_flags) &&
-           (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG))
-               return rate;
-
-       if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) &&
-           (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG)))
-               return rate;
-
-       if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags))
-               return rate;
-
-       /* This should not happen */
-       WARN_ON_ONCE(1);
-
-       rate = ath_rc_priv->valid_rate_index[0];
-
-       return rate;
-}
-
-static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
-                                  struct ieee80211_tx_rate *rate,
-                                  struct ieee80211_tx_rate_control *txrc,
-                                  u8 tries, u8 rix, int rtsctsenable)
-{
-       rate->count = tries;
-       rate->idx = rate_table->info[rix].ratecode;
-
-       if (txrc->rts || rtsctsenable)
-               rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
-
-       if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
-               rate->flags |= IEEE80211_TX_RC_MCS;
-               if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
-                   conf_is_ht40(&txrc->hw->conf))
-                       rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-               if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
-                       rate->flags |= IEEE80211_TX_RC_SHORT_GI;
-       }
-}
-
-static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
-                                  const struct ath_rate_table *rate_table,
-                                  struct ieee80211_tx_info *tx_info)
-{
-       struct ieee80211_bss_conf *bss_conf;
-
-       if (!tx_info->control.vif)
-               return;
-       /*
-        * For legacy frames, mac80211 takes care of CTS protection.
-        */
-       if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
-               return;
-
-       bss_conf = &tx_info->control.vif->bss_conf;
-
-       if (!bss_conf->basic_rates)
-               return;
-
-       /*
-        * For now, use the lowest allowed basic rate for HT frames.
-        */
-       tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
-}
-
-static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
-                        struct ieee80211_tx_rate_control *txrc)
-{
-       struct ath_softc *sc = priv;
-       struct ath_rate_priv *ath_rc_priv = priv_sta;
-       const struct ath_rate_table *rate_table;
-       struct sk_buff *skb = txrc->skb;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_tx_rate *rates = tx_info->control.rates;
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       __le16 fc = hdr->frame_control;
-       u8 try_per_rate, i = 0, rix;
-       int is_probe = 0;
-
-       if (rate_control_send_low(sta, priv_sta, txrc))
-               return;
-
-       /*
-        * For Multi Rate Retry we use a different number of
-        * retry attempt counts. This ends up looking like this:
-        *
-        * MRR[0] = 4
-        * MRR[1] = 4
-        * MRR[2] = 4
-        * MRR[3] = 8
-        *
-        */
-       try_per_rate = 4;
-
-       rate_table = ath_rc_priv->rate_table;
-       rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
-
-       if (conf_is_ht(&sc->hw->conf) &&
-           (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
-               tx_info->flags |= IEEE80211_TX_CTL_LDPC;
-
-       if (conf_is_ht(&sc->hw->conf) &&
-           (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
-               tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
-
-       if (is_probe) {
-               /*
-                * Set one try for probe rates. For the
-                * probes don't enable RTS.
-                */
-               ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
-                                      1, rix, 0);
-               /*
-                * Get the next tried/allowed rate.
-                * No RTS for the next series after the probe rate.
-                */
-               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
-               ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
-                                      try_per_rate, rix, 0);
-
-               tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-       } else {
-               /*
-                * Set the chosen rate. No RTS for first series entry.
-                */
-               ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
-                                      try_per_rate, rix, 0);
-       }
-
-       for ( ; i < 4; i++) {
-               /*
-                * Use twice the number of tries for the last MRR segment.
-                */
-               if (i + 1 == 4)
-                       try_per_rate = 8;
-
-               ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
-
-               /*
-                * All other rates in the series have RTS enabled.
-                */
-               ath_rc_rate_set_series(rate_table, &rates[i], txrc,
-                                      try_per_rate, rix, 1);
-       }
-
-       /*
-        * NB:Change rate series to enable aggregation when operating
-        * at lower MCS rates. When first rate in series is MCS2
-        * in HT40 @ 2.4GHz, series should look like:
-        *
-        * {MCS2, MCS1, MCS0, MCS0}.
-        *
-        * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
-        * look like:
-        *
-        * {MCS3, MCS2, MCS1, MCS1}
-        *
-        * So, set fourth rate in series to be same as third one for
-        * above conditions.
-        */
-       if ((sc->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) &&
-           (conf_is_ht(&sc->hw->conf))) {
-               u8 dot11rate = rate_table->info[rix].dot11rate;
-               u8 phy = rate_table->info[rix].phy;
-               if (i == 4 &&
-                   ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
-                    (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
-                       rates[3].idx = rates[2].idx;
-                       rates[3].flags = rates[2].flags;
-               }
-       }
-
-       /*
-        * Force hardware to use computed duration for next
-        * fragment by disabling multi-rate retry, which
-        * updates duration based on the multi-rate duration table.
-        *
-        * FIXME: Fix duration
-        */
-       if (ieee80211_has_morefrags(fc) ||
-           (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
-               rates[1].count = rates[2].count = rates[3].count = 0;
-               rates[1].idx = rates[2].idx = rates[3].idx = 0;
-               rates[0].count = ATH_TXMAXTRY;
-       }
-
-       ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
-}
-
-static void ath_rc_update_per(struct ath_softc *sc,
-                             const struct ath_rate_table *rate_table,
-                             struct ath_rate_priv *ath_rc_priv,
-                                 struct ieee80211_tx_info *tx_info,
-                             int tx_rate, int xretries, int retries,
-                             u32 now_msec)
-{
-       int count, n_bad_frames;
-       u8 last_per;
-       static const u32 nretry_to_per_lookup[10] = {
-               100 * 0 / 1,
-               100 * 1 / 4,
-               100 * 1 / 2,
-               100 * 3 / 4,
-               100 * 4 / 5,
-               100 * 5 / 6,
-               100 * 6 / 7,
-               100 * 7 / 8,
-               100 * 8 / 9,
-               100 * 9 / 10
-       };
-
-       last_per = ath_rc_priv->per[tx_rate];
-       n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
-
-       if (xretries) {
-               if (xretries == 1) {
-                       ath_rc_priv->per[tx_rate] += 30;
-                       if (ath_rc_priv->per[tx_rate] > 100)
-                               ath_rc_priv->per[tx_rate] = 100;
-               } else {
-                       /* xretries == 2 */
-                       count = ARRAY_SIZE(nretry_to_per_lookup);
-                       if (retries >= count)
-                               retries = count - 1;
-
-                       /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
-                       ath_rc_priv->per[tx_rate] =
-                               (u8)(last_per - (last_per >> 3) + (100 >> 3));
-               }
-
-               /* xretries == 1 or 2 */
-
-               if (ath_rc_priv->probe_rate == tx_rate)
-                       ath_rc_priv->probe_rate = 0;
-
-       } else { /* xretries == 0 */
-               count = ARRAY_SIZE(nretry_to_per_lookup);
-               if (retries >= count)
-                       retries = count - 1;
-
-               if (n_bad_frames) {
-                       /* new_PER = 7/8*old_PER + 1/8*(currentPER)
-                        * Assuming that n_frames is not 0.  The current PER
-                        * from the retries is 100 * retries / (retries+1),
-                        * since the first retries attempts failed, and the
-                        * next one worked.  For the one that worked,
-                        * n_bad_frames subframes out of n_frames wored,
-                        * so the PER for that part is
-                        * 100 * n_bad_frames / n_frames, and it contributes
-                        * 100 * n_bad_frames / (n_frames * (retries+1)) to
-                        * the above PER.  The expression below is a
-                        * simplified version of the sum of these two terms.
-                        */
-                       if (tx_info->status.ampdu_len > 0) {
-                               int n_frames, n_bad_tries;
-                               u8 cur_per, new_per;
-
-                               n_bad_tries = retries * tx_info->status.ampdu_len +
-                                       n_bad_frames;
-                               n_frames = tx_info->status.ampdu_len * (retries + 1);
-                               cur_per = (100 * n_bad_tries / n_frames) >> 3;
-                               new_per = (u8)(last_per - (last_per >> 3) + cur_per);
-                               ath_rc_priv->per[tx_rate] = new_per;
-                       }
-               } else {
-                       ath_rc_priv->per[tx_rate] =
-                               (u8)(last_per - (last_per >> 3) +
-                                    (nretry_to_per_lookup[retries] >> 3));
-               }
-
-
-               /*
-                * If we got at most one retry then increase the max rate if
-                * this was a probe.  Otherwise, ignore the probe.
-                */
-               if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
-                       if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
-                               /*
-                                * Since we probed with just a single attempt,
-                                * any retries means the probe failed.  Also,
-                                * if the attempt worked, but more than half
-                                * the subframes were bad then also consider
-                                * the probe a failure.
-                                */
-                               ath_rc_priv->probe_rate = 0;
-                       } else {
-                               u8 probe_rate = 0;
-
-                               ath_rc_priv->rate_max_phy =
-                                       ath_rc_priv->probe_rate;
-                               probe_rate = ath_rc_priv->probe_rate;
-
-                               if (ath_rc_priv->per[probe_rate] > 30)
-                                       ath_rc_priv->per[probe_rate] = 20;
-
-                               ath_rc_priv->probe_rate = 0;
-
-                               /*
-                                * Since this probe succeeded, we allow the next
-                                * probe twice as soon.  This allows the maxRate
-                                * to move up faster if the probes are
-                                * successful.
-                                */
-                               ath_rc_priv->probe_time =
-                                       now_msec - rate_table->probe_interval / 2;
-                       }
-               }
-
-               if (retries > 0) {
-                       /*
-                        * Don't update anything.  We don't know if
-                        * this was because of collisions or poor signal.
-                        */
-                       ath_rc_priv->hw_maxretry_pktcnt = 0;
-               } else {
-                       /*
-                        * It worked with no retries. First ignore bogus (small)
-                        * rssi_ack values.
-                        */
-                       if (tx_rate == ath_rc_priv->rate_max_phy &&
-                           ath_rc_priv->hw_maxretry_pktcnt < 255) {
-                               ath_rc_priv->hw_maxretry_pktcnt++;
-                       }
-
-               }
-       }
-}
-
-static void ath_rc_update_ht(struct ath_softc *sc,
-                            struct ath_rate_priv *ath_rc_priv,
-                            struct ieee80211_tx_info *tx_info,
-                            int tx_rate, int xretries, int retries)
-{
-       u32 now_msec = jiffies_to_msecs(jiffies);
-       int rate;
-       u8 last_per;
-       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
-       int size = ath_rc_priv->rate_table_size;
-
-       if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
-               return;
-
-       last_per = ath_rc_priv->per[tx_rate];
-
-       /* Update PER first */
-       ath_rc_update_per(sc, rate_table, ath_rc_priv,
-                         tx_info, tx_rate, xretries,
-                         retries, now_msec);
-
-       /*
-        * If this rate looks bad (high PER) then stop using it for
-        * a while (except if we are probing).
-        */
-       if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
-           rate_table->info[tx_rate].ratekbps <=
-           rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
-               ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
-                                    &ath_rc_priv->rate_max_phy);
-
-               /* Don't probe for a little while. */
-               ath_rc_priv->probe_time = now_msec;
-       }
-
-       /* Make sure the rates below this have lower PER */
-       /* Monotonicity is kept only for rates below the current rate. */
-       if (ath_rc_priv->per[tx_rate] < last_per) {
-               for (rate = tx_rate - 1; rate >= 0; rate--) {
-
-                       if (ath_rc_priv->per[rate] >
-                           ath_rc_priv->per[rate+1]) {
-                               ath_rc_priv->per[rate] =
-                                       ath_rc_priv->per[rate+1];
-                       }
-               }
-       }
-
-       /* Maintain monotonicity for rates above the current rate */
-       for (rate = tx_rate; rate < size - 1; rate++) {
-               if (ath_rc_priv->per[rate+1] <
-                   ath_rc_priv->per[rate])
-                       ath_rc_priv->per[rate+1] =
-                               ath_rc_priv->per[rate];
-       }
-
-       /* Every so often, we reduce the thresholds
-        * and PER (different for CCK and OFDM). */
-       if (now_msec - ath_rc_priv->per_down_time >=
-           rate_table->probe_interval) {
-               for (rate = 0; rate < size; rate++) {
-                       ath_rc_priv->per[rate] =
-                               7 * ath_rc_priv->per[rate] / 8;
-               }
-
-               ath_rc_priv->per_down_time = now_msec;
-       }
-
-       ath_debug_stat_retries(ath_rc_priv, tx_rate, xretries, retries,
-                              ath_rc_priv->per[tx_rate]);
-
-}
-
-static void ath_rc_tx_status(struct ath_softc *sc,
-                            struct ath_rate_priv *ath_rc_priv,
-                            struct sk_buff *skb)
-{
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_tx_rate *rates = tx_info->status.rates;
-       struct ieee80211_tx_rate *rate;
-       int final_ts_idx = 0, xretries = 0, long_retry = 0;
-       u8 flags;
-       u32 i = 0, rix;
-
-       for (i = 0; i < sc->hw->max_rates; i++) {
-               rate = &tx_info->status.rates[i];
-               if (rate->idx < 0 || !rate->count)
-                       break;
-
-               final_ts_idx = i;
-               long_retry = rate->count - 1;
-       }
-
-       if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
-               xretries = 1;
-
-       /*
-        * If the first rate is not the final index, there
-        * are intermediate rate failures to be processed.
-        */
-       if (final_ts_idx != 0) {
-               for (i = 0; i < final_ts_idx ; i++) {
-                       if (rates[i].count != 0 && (rates[i].idx >= 0)) {
-                               flags = rates[i].flags;
-
-                               /* If HT40 and we have switched mode from
-                                * 40 to 20 => don't update */
-
-                               if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
-                                   !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
-                                       return;
-
-                               rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
-                               ath_rc_update_ht(sc, ath_rc_priv, tx_info,
-                                                rix, xretries ? 1 : 2,
-                                                rates[i].count);
-                       }
-               }
-       }
-
-       flags = rates[final_ts_idx].flags;
-
-       /* If HT40 and we have switched mode from 40 to 20 => don't update */
-       if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
-           !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
-               return;
-
-       rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
-       ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
-       ath_debug_stat_rc(ath_rc_priv, rix);
-}
-
-static const
-struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
-                                            enum ieee80211_band band,
-                                            bool is_ht)
-{
-       switch(band) {
-       case IEEE80211_BAND_2GHZ:
-               if (is_ht)
-                       return &ar5416_11ng_ratetable;
-               return &ar5416_11g_ratetable;
-       case IEEE80211_BAND_5GHZ:
-               if (is_ht)
-                       return &ar5416_11na_ratetable;
-               return &ar5416_11a_ratetable;
-       default:
-               return NULL;
-       }
-}
-
-static void ath_rc_init(struct ath_softc *sc,
-                       struct ath_rate_priv *ath_rc_priv)
-{
-       const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
-       struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       u8 i, j, k, hi = 0, hthi = 0;
-
-       ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
-
-       for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
-               ath_rc_priv->per[i] = 0;
-               ath_rc_priv->valid_rate_index[i] = 0;
-       }
-
-       for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
-               for (j = 0; j < RATE_TABLE_SIZE; j++)
-                       ath_rc_priv->valid_phy_rateidx[i][j] = 0;
-               ath_rc_priv->valid_phy_ratecnt[i] = 0;
-       }
-
-       if (!rateset->rs_nrates) {
-               hi = ath_rc_init_validrates(ath_rc_priv);
-       } else {
-               hi = ath_rc_setvalid_rates(ath_rc_priv, true);
-
-               if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
-                       hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
-
-               hi = max(hi, hthi);
-       }
-
-       ath_rc_priv->rate_table_size = hi + 1;
-       ath_rc_priv->rate_max_phy = 0;
-       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-
-       for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
-               for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
-                       ath_rc_priv->valid_rate_index[k++] =
-                               ath_rc_priv->valid_phy_rateidx[i][j];
-               }
-
-               if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
-                   !ath_rc_priv->valid_phy_ratecnt[i])
-                       continue;
-
-               ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
-       }
-       WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-       WARN_ON(k > RATE_TABLE_SIZE);
-
-       ath_rc_priv->max_valid_rate = k;
-       ath_rc_sort_validrates(ath_rc_priv);
-       ath_rc_priv->rate_max_phy = (k > 4) ?
-               ath_rc_priv->valid_rate_index[k-4] :
-               ath_rc_priv->valid_rate_index[k-1];
-
-       ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
-               ath_rc_priv->ht_cap);
-}
-
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
-{
-       u8 caps = 0;
-
-       if (sta->ht_cap.ht_supported) {
-               caps = WLAN_RC_HT_FLAG;
-               if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2])
-                       caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
-               else if (sta->ht_cap.mcs.rx_mask[1])
-                       caps |= WLAN_RC_DS_FLAG;
-               if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
-                       caps |= WLAN_RC_40_FLAG;
-                       if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
-                               caps |= WLAN_RC_SGI_FLAG;
-               } else {
-                       if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
-                               caps |= WLAN_RC_SGI_FLAG;
-               }
-       }
-
-       return caps;
-}
-
-static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
-                             u8 tidno)
-{
-       struct ath_node *an = (struct ath_node *)sta->drv_priv;
-       struct ath_atx_tid *txtid;
-
-       if (!sta->ht_cap.ht_supported)
-               return false;
-
-       txtid = ATH_AN_2_TID(an, tidno);
-       return !txtid->active;
-}
-
-
-/***********************************/
-/* mac80211 Rate Control callbacks */
-/***********************************/
-
-static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
-                         struct ieee80211_sta *sta, void *priv_sta,
-                         struct sk_buff *skb)
-{
-       struct ath_softc *sc = priv;
-       struct ath_rate_priv *ath_rc_priv = priv_sta;
-       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       __le16 fc = hdr->frame_control;
-
-       if (!priv_sta || !ieee80211_is_data(fc))
-               return;
-
-       /* This packet was aggregated but doesn't carry status info */
-       if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
-           !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
-               return;
-
-       if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
-               return;
-
-       ath_rc_tx_status(sc, ath_rc_priv, skb);
-
-       /* Check if aggregation has to be enabled for this tid */
-       if (conf_is_ht(&sc->hw->conf) &&
-           !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
-               if (ieee80211_is_data_qos(fc) &&
-                   skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
-                       u8 *qc, tid;
-
-                       qc = ieee80211_get_qos_ctl(hdr);
-                       tid = qc[0] & 0xf;
-
-                       if(ath_tx_aggr_check(sc, sta, tid))
-                               ieee80211_start_tx_ba_session(sta, tid, 0);
-               }
-       }
-}
-
-static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
-                         struct cfg80211_chan_def *chandef,
-                          struct ieee80211_sta *sta, void *priv_sta)
-{
-       struct ath_softc *sc = priv;
-       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_rate_priv *ath_rc_priv = priv_sta;
-       int i, j = 0;
-       u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
-
-       for (i = 0; i < sband->n_bitrates; i++) {
-               if (sta->supp_rates[sband->band] & BIT(i)) {
-                       if ((rate_flags & sband->bitrates[i].flags)
-                           != rate_flags)
-                               continue;
-
-                       ath_rc_priv->neg_rates.rs_rates[j]
-                               = (sband->bitrates[i].bitrate * 2) / 10;
-                       j++;
-               }
-       }
-       ath_rc_priv->neg_rates.rs_nrates = j;
-
-       if (sta->ht_cap.ht_supported) {
-               for (i = 0, j = 0; i < 77; i++) {
-                       if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
-                               ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
-                       if (j == ATH_RATE_MAX)
-                               break;
-               }
-               ath_rc_priv->neg_ht_rates.rs_nrates = j;
-       }
-
-       ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
-                                                       sta->ht_cap.ht_supported);
-       if (!ath_rc_priv->rate_table) {
-               ath_err(common, "No rate table chosen\n");
-               return;
-       }
-
-       ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
-       ath_rc_init(sc, priv_sta);
-}
-
-static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
-                           struct cfg80211_chan_def *chandef,
-                           struct ieee80211_sta *sta, void *priv_sta,
-                           u32 changed)
-{
-       struct ath_softc *sc = priv;
-       struct ath_rate_priv *ath_rc_priv = priv_sta;
-
-       if (changed & IEEE80211_RC_BW_CHANGED) {
-               ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
-               ath_rc_init(sc, priv_sta);
-
-               ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
-                       "Operating Bandwidth changed to: %d\n",
-                       sc->hw->conf.chandef.width);
-       }
-}
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-       struct ath_rc_stats *stats;
-
-       stats = &rc->rcstats[final_rate];
-       stats->success++;
-}
-
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
-                           int xretries, int retries, u8 per)
-{
-       struct ath_rc_stats *stats = &rc->rcstats[rix];
-
-       stats->xretries += xretries;
-       stats->retries += retries;
-       stats->per = per;
-}
-
-static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
-                               size_t count, loff_t *ppos)
-{
-       struct ath_rate_priv *rc = file->private_data;
-       char *buf;
-       unsigned int len = 0, max;
-       int rix;
-       ssize_t retval;
-
-       if (rc->rate_table == NULL)
-               return 0;
-
-       max = 80 + rc->rate_table_size * 1024 + 1;
-       buf = kmalloc(max, GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       len += sprintf(buf, "%6s %6s %6s "
-                      "%10s %10s %10s %10s\n",
-                      "HT", "MCS", "Rate",
-                      "Success", "Retries", "XRetries", "PER");
-
-       for (rix = 0; rix < rc->max_valid_rate; rix++) {
-               u8 i = rc->valid_rate_index[rix];
-               u32 ratekbps = rc->rate_table->info[i].ratekbps;
-               struct ath_rc_stats *stats = &rc->rcstats[i];
-               char mcs[5];
-               char htmode[5];
-               int used_mcs = 0, used_htmode = 0;
-
-               if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
-                       used_mcs = scnprintf(mcs, 5, "%d",
-                                            rc->rate_table->info[i].ratecode);
-
-                       if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
-                               used_htmode = scnprintf(htmode, 5, "HT40");
-                       else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
-                               used_htmode = scnprintf(htmode, 5, "HT20");
-                       else
-                               used_htmode = scnprintf(htmode, 5, "????");
-               }
-
-               mcs[used_mcs] = '\0';
-               htmode[used_htmode] = '\0';
-
-               len += scnprintf(buf + len, max - len,
-                                "%6s %6s %3u.%d: "
-                                "%10u %10u %10u %10u\n",
-                                htmode,
-                                mcs,
-                                ratekbps / 1000,
-                                (ratekbps % 1000) / 100,
-                                stats->success,
-                                stats->retries,
-                                stats->xretries,
-                                stats->per);
-       }
-
-       if (len > max)
-               len = max;
-
-       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
-       kfree(buf);
-       return retval;
-}
-
-static const struct file_operations fops_rcstat = {
-       .read = read_file_rcstat,
-       .open = simple_open,
-       .owner = THIS_MODULE
-};
-
-static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
-                                    struct dentry *dir)
-{
-       struct ath_rate_priv *rc = priv_sta;
-       rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
-                                                 dir, rc, &fops_rcstat);
-}
-
-static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
-{
-       struct ath_rate_priv *rc = priv_sta;
-       debugfs_remove(rc->debugfs_rcstats);
-}
-
-#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
-
-static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
-       return hw->priv;
-}
-
-static void ath_rate_free(void *priv)
-{
-       return;
-}
-
-static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
-       return kzalloc(sizeof(struct ath_rate_priv), gfp);
-}
-
-static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
-                             void *priv_sta)
-{
-       struct ath_rate_priv *rate_priv = priv_sta;
-       kfree(rate_priv);
-}
-
-static struct rate_control_ops ath_rate_ops = {
-       .module = NULL,
-       .name = "ath9k_rate_control",
-       .tx_status = ath_tx_status,
-       .get_rate = ath_get_rate,
-       .rate_init = ath_rate_init,
-       .rate_update = ath_rate_update,
-       .alloc = ath_rate_alloc,
-       .free = ath_rate_free,
-       .alloc_sta = ath_rate_alloc_sta,
-       .free_sta = ath_rate_free_sta,
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-       .add_sta_debugfs = ath_rate_add_sta_debugfs,
-       .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
-#endif
-};
-
-int ath_rate_control_register(void)
-{
-       return ieee80211_rate_control_register(&ath_rate_ops);
-}
-
-void ath_rate_control_unregister(void)
-{
-       ieee80211_rate_control_unregister(&ath_rate_ops);
-}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
deleted file mode 100644 (file)
index b9a8738..0000000
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2004 Sam Leffler, Errno Consulting
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2008-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef RC_H
-#define RC_H
-
-#include "hw.h"
-
-struct ath_softc;
-
-#define ATH_RATE_MAX     30
-#define RATE_TABLE_SIZE  72
-
-#define RC_INVALID     0x0000
-#define RC_LEGACY      0x0001
-#define RC_SS          0x0002
-#define RC_DS          0x0004
-#define RC_TS          0x0008
-#define RC_HT_20       0x0010
-#define RC_HT_40       0x0020
-
-#define RC_STREAM_MASK 0xe
-#define RC_DS_OR_LATER(f)      ((((f) & RC_STREAM_MASK) == RC_DS) || \
-                               (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
-#define RC_TS_ONLY(f)          (((f) & RC_STREAM_MASK) == RC_TS)
-#define RC_SS_OR_LEGACY(f)     ((f) & (RC_SS | RC_LEGACY))
-
-#define RC_HT_2040             (RC_HT_20 | RC_HT_40)
-#define RC_ALL_STREAM          (RC_SS | RC_DS | RC_TS)
-#define RC_L_SD                        (RC_LEGACY | RC_SS | RC_DS)
-#define RC_L_SDT               (RC_LEGACY | RC_SS | RC_DS | RC_TS)
-#define RC_HT_S_20             (RC_HT_20 | RC_SS)
-#define RC_HT_D_20             (RC_HT_20 | RC_DS)
-#define RC_HT_T_20             (RC_HT_20 | RC_TS)
-#define RC_HT_S_40             (RC_HT_40 | RC_SS)
-#define RC_HT_D_40             (RC_HT_40 | RC_DS)
-#define RC_HT_T_40             (RC_HT_40 | RC_TS)
-
-#define RC_HT_SD_20            (RC_HT_20 | RC_SS | RC_DS)
-#define RC_HT_DT_20            (RC_HT_20 | RC_DS | RC_TS)
-#define RC_HT_SD_40            (RC_HT_40 | RC_SS | RC_DS)
-#define RC_HT_DT_40            (RC_HT_40 | RC_DS | RC_TS)
-
-#define RC_HT_SD_2040          (RC_HT_2040 | RC_SS | RC_DS)
-#define RC_HT_SDT_2040         (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
-
-#define RC_HT_SDT_20           (RC_HT_20 | RC_SS | RC_DS | RC_TS)
-#define RC_HT_SDT_40           (RC_HT_40 | RC_SS | RC_DS | RC_TS)
-
-#define RC_ALL                 (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
-
-enum {
-       WLAN_RC_PHY_OFDM,
-       WLAN_RC_PHY_CCK,
-       WLAN_RC_PHY_HT_20_SS,
-       WLAN_RC_PHY_HT_20_DS,
-       WLAN_RC_PHY_HT_20_TS,
-       WLAN_RC_PHY_HT_40_SS,
-       WLAN_RC_PHY_HT_40_DS,
-       WLAN_RC_PHY_HT_40_TS,
-       WLAN_RC_PHY_HT_20_SS_HGI,
-       WLAN_RC_PHY_HT_20_DS_HGI,
-       WLAN_RC_PHY_HT_20_TS_HGI,
-       WLAN_RC_PHY_HT_40_SS_HGI,
-       WLAN_RC_PHY_HT_40_DS_HGI,
-       WLAN_RC_PHY_HT_40_TS_HGI,
-       WLAN_RC_PHY_MAX
-};
-
-#define WLAN_RC_PHY_DS(_phy)   ((_phy == WLAN_RC_PHY_HT_20_DS)         \
-                               || (_phy == WLAN_RC_PHY_HT_40_DS)       \
-                               || (_phy == WLAN_RC_PHY_HT_20_DS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
-#define WLAN_RC_PHY_TS(_phy)   ((_phy == WLAN_RC_PHY_HT_20_TS)         \
-                               || (_phy == WLAN_RC_PHY_HT_40_TS)       \
-                               || (_phy == WLAN_RC_PHY_HT_20_TS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_20(_phy)   ((_phy == WLAN_RC_PHY_HT_20_SS)         \
-                               || (_phy == WLAN_RC_PHY_HT_20_DS)       \
-                               || (_phy == WLAN_RC_PHY_HT_20_TS)       \
-                               || (_phy == WLAN_RC_PHY_HT_20_SS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_20_DS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
-#define WLAN_RC_PHY_40(_phy)   ((_phy == WLAN_RC_PHY_HT_40_SS)         \
-                               || (_phy == WLAN_RC_PHY_HT_40_DS)       \
-                               || (_phy == WLAN_RC_PHY_HT_40_TS)       \
-                               || (_phy == WLAN_RC_PHY_HT_40_SS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_SGI(_phy)  ((_phy == WLAN_RC_PHY_HT_20_SS_HGI)      \
-                               || (_phy == WLAN_RC_PHY_HT_20_DS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_20_TS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_40_SS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)   \
-                               || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-
-#define WLAN_RC_PHY_HT(_phy)    (_phy >= WLAN_RC_PHY_HT_20_SS)
-
-#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ?      \
-       ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
-
-#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ?    \
-       (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
-
-/* Return TRUE if flag supports HT20 && client supports HT20 or
- * return TRUE if flag supports HT40 && client supports HT40.
- * This is used becos some rates overlap between HT20/HT40.
- */
-#define WLAN_RC_PHY_HT_VALID(flag, capflag)                    \
-       (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
-        ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
-
-#define WLAN_RC_DS_FLAG         (0x01)
-#define WLAN_RC_TS_FLAG         (0x02)
-#define WLAN_RC_40_FLAG         (0x04)
-#define WLAN_RC_SGI_FLAG        (0x08)
-#define WLAN_RC_HT_FLAG         (0x10)
-
-/**
- * struct ath_rate_table - Rate Control table
- * @rate_cnt: total number of rates for the given wireless mode
- * @mcs_start: MCS rate index offset
- * @rate_flags: Rate Control flags
- * @phy: CCK/OFDM/HT20/HT40
- * @ratekbps: rate in Kbits per second
- * @user_ratekbps: user rate in Kbits per second
- * @ratecode: rate that goes into HW descriptors
- * @dot11rate: value that goes into supported
- *     rates info element of MLME
- * @ctrl_rate: Index of next lower basic rate, used for duration computation
- * @cw40index: Index of rates having 40MHz channel width
- * @sgi_index: Index of rates having Short Guard Interval
- * @ht_index: high throughput rates having 40MHz channel width and
- *     Short Guard Interval
- * @probe_interval: interval for rate control to probe for other rates
- * @initial_ratemax: initial ratemax value
- */
-struct ath_rate_table {
-       int rate_cnt;
-       int mcs_start;
-       struct {
-               u16 rate_flags;
-               u8 phy;
-               u32 ratekbps;
-               u32 user_ratekbps;
-               u8 ratecode;
-               u8 dot11rate;
-       } info[RATE_TABLE_SIZE];
-       u32 probe_interval;
-       u8 initial_ratemax;
-};
-
-struct ath_rateset {
-       u8 rs_nrates;
-       u8 rs_rates[ATH_RATE_MAX];
-};
-
-struct ath_rc_stats {
-       u32 success;
-       u32 retries;
-       u32 xretries;
-       u8 per;
-};
-
-/**
- * struct ath_rate_priv - Rate Control priv data
- * @state: RC state
- * @probe_rate: rate we are probing at
- * @probe_time: msec timestamp for last probe
- * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
- * @max_valid_rate: maximum number of valid rate
- * @per_down_time: msec timestamp for last PER down step
- * @valid_phy_ratecnt: valid rate count
- * @rate_max_phy: phy index for the max rate
- * @per: PER for every valid rate in %
- * @probe_interval: interval for ratectrl to probe for other rates
- * @ht_cap: HT capabilities
- * @neg_rates: Negotatied rates
- * @neg_ht_rates: Negotiated HT rates
- */
-struct ath_rate_priv {
-       u8 rate_table_size;
-       u8 probe_rate;
-       u8 hw_maxretry_pktcnt;
-       u8 max_valid_rate;
-       u8 valid_rate_index[RATE_TABLE_SIZE];
-       u8 ht_cap;
-       u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
-       u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
-       u8 rate_max_phy;
-       u8 per[RATE_TABLE_SIZE];
-       u32 probe_time;
-       u32 per_down_time;
-       u32 probe_interval;
-       struct ath_rateset neg_rates;
-       struct ath_rateset neg_ht_rates;
-       const struct ath_rate_table *rate_table;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-       struct dentry *debugfs_rcstats;
-       struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
-#endif
-};
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
-                           int xretries, int retries, u8 per);
-#else
-static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-}
-static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
-                                         int xretries, int retries, u8 per)
-{
-}
-#endif
-
-#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
-int ath_rate_control_register(void);
-void ath_rate_control_unregister(void);
-#else
-static inline int ath_rate_control_register(void)
-{
-       return 0;
-}
-
-static inline void ath_rate_control_unregister(void)
-{
-}
-#endif
-
-#endif /* RC_H */
index 82e340d3ec60a81cc83d2427bd9280e55ca3af3a..6c9accdb52e4140076d7378f530e975c34f68433 100644 (file)
@@ -762,204 +762,6 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
        return bf;
 }
 
-/* Assumes you've already done the endian to CPU conversion */
-static bool ath9k_rx_accept(struct ath_common *common,
-                           struct ieee80211_hdr *hdr,
-                           struct ieee80211_rx_status *rxs,
-                           struct ath_rx_status *rx_stats,
-                           bool *decrypt_error)
-{
-       struct ath_softc *sc = (struct ath_softc *) common->priv;
-       bool is_mc, is_valid_tkip, strip_mic, mic_error;
-       struct ath_hw *ah = common->ah;
-       __le16 fc;
-
-       fc = hdr->frame_control;
-
-       is_mc = !!is_multicast_ether_addr(hdr->addr1);
-       is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
-               test_bit(rx_stats->rs_keyix, common->tkip_keymap);
-       strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
-               ieee80211_has_protected(fc) &&
-               !(rx_stats->rs_status &
-               (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
-                ATH9K_RXERR_KEYMISS));
-
-       /*
-        * Key miss events are only relevant for pairwise keys where the
-        * descriptor does contain a valid key index. This has been observed
-        * mostly with CCMP encryption.
-        */
-       if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
-           !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
-               rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
-
-       mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
-               !ieee80211_has_morefrags(fc) &&
-               !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
-               (rx_stats->rs_status & ATH9K_RXERR_MIC);
-
-       /*
-        * The rx_stats->rs_status will not be set until the end of the
-        * chained descriptors so it can be ignored if rs_more is set. The
-        * rs_more will be false at the last element of the chained
-        * descriptors.
-        */
-       if (rx_stats->rs_status != 0) {
-               u8 status_mask;
-
-               if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
-                       rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
-                       mic_error = false;
-               }
-
-               if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
-                   (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
-                       *decrypt_error = true;
-                       mic_error = false;
-               }
-
-               /*
-                * Reject error frames with the exception of
-                * decryption and MIC failures. For monitor mode,
-                * we also ignore the CRC error.
-                */
-               status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
-                             ATH9K_RXERR_KEYMISS;
-
-               if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
-                       status_mask |= ATH9K_RXERR_CRC;
-
-               if (rx_stats->rs_status & ~status_mask)
-                       return false;
-       }
-
-       /*
-        * For unicast frames the MIC error bit can have false positives,
-        * so all MIC error reports need to be validated in software.
-        * False negatives are not common, so skip software verification
-        * if the hardware considers the MIC valid.
-        */
-       if (strip_mic)
-               rxs->flag |= RX_FLAG_MMIC_STRIPPED;
-       else if (is_mc && mic_error)
-               rxs->flag |= RX_FLAG_MMIC_ERROR;
-
-       return true;
-}
-
-static int ath9k_process_rate(struct ath_common *common,
-                             struct ieee80211_hw *hw,
-                             struct ath_rx_status *rx_stats,
-                             struct ieee80211_rx_status *rxs)
-{
-       struct ieee80211_supported_band *sband;
-       enum ieee80211_band band;
-       unsigned int i = 0;
-       struct ath_softc __maybe_unused *sc = common->priv;
-       struct ath_hw *ah = sc->sc_ah;
-
-       band = ah->curchan->chan->band;
-       sband = hw->wiphy->bands[band];
-
-       if (IS_CHAN_QUARTER_RATE(ah->curchan))
-               rxs->flag |= RX_FLAG_5MHZ;
-       else if (IS_CHAN_HALF_RATE(ah->curchan))
-               rxs->flag |= RX_FLAG_10MHZ;
-
-       if (rx_stats->rs_rate & 0x80) {
-               /* HT rate */
-               rxs->flag |= RX_FLAG_HT;
-               rxs->flag |= rx_stats->flag;
-               rxs->rate_idx = rx_stats->rs_rate & 0x7f;
-               return 0;
-       }
-
-       for (i = 0; i < sband->n_bitrates; i++) {
-               if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
-                       rxs->rate_idx = i;
-                       return 0;
-               }
-               if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
-                       rxs->flag |= RX_FLAG_SHORTPRE;
-                       rxs->rate_idx = i;
-                       return 0;
-               }
-       }
-
-       /*
-        * No valid hardware bitrate found -- we should not get here
-        * because hardware has already validated this frame as OK.
-        */
-       ath_dbg(common, ANY,
-               "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
-               rx_stats->rs_rate);
-       RX_STAT_INC(rx_rate_err);
-       return -EINVAL;
-}
-
-static void ath9k_process_rssi(struct ath_common *common,
-                              struct ieee80211_hw *hw,
-                              struct ath_rx_status *rx_stats,
-                              struct ieee80211_rx_status *rxs)
-{
-       struct ath_softc *sc = hw->priv;
-       struct ath_hw *ah = common->ah;
-       int last_rssi;
-       int rssi = rx_stats->rs_rssi;
-       int i, j;
-
-       /*
-        * RSSI is not available for subframes in an A-MPDU.
-        */
-       if (rx_stats->rs_moreaggr) {
-               rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
-               return;
-       }
-
-       /*
-        * Check if the RSSI for the last subframe in an A-MPDU
-        * or an unaggregated frame is valid.
-        */
-       if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
-               rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
-               return;
-       }
-
-       for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
-               s8 rssi;
-
-               if (!(ah->rxchainmask & BIT(i)))
-                       continue;
-
-               rssi = rx_stats->rs_rssi_ctl[i];
-               if (rssi != ATH9K_RSSI_BAD) {
-                   rxs->chains |= BIT(j);
-                   rxs->chain_signal[j] = ah->noise + rssi;
-               }
-               j++;
-       }
-
-       /*
-        * Update Beacon RSSI, this is used by ANI.
-        */
-       if (rx_stats->is_mybeacon &&
-           ((ah->opmode == NL80211_IFTYPE_STATION) ||
-            (ah->opmode == NL80211_IFTYPE_ADHOC))) {
-               ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
-               last_rssi = sc->last_rssi;
-
-               if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
-                       rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
-               if (rssi < 0)
-                       rssi = 0;
-
-               ah->stats.avgbrssi = rssi;
-       }
-
-       rxs->signal = ah->noise + rx_stats->rs_rssi;
-}
-
 static void ath9k_process_tsf(struct ath_rx_status *rs,
                              struct ieee80211_rx_status *rxs,
                              u64 tsf)
@@ -1055,7 +857,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
         * everything but the rate is checked here, the rate check is done
         * separately to avoid doing two lookups for a rate for each frame.
         */
-       if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+       if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter))
                return -EINVAL;
 
        if (ath_is_mybeacon(common, hdr)) {
@@ -1069,10 +871,18 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
        if (WARN_ON(!ah->curchan))
                return -EINVAL;
 
-       if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+       if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
+               /*
+                * No valid hardware bitrate found -- we should not get here
+                * because hardware has already validated this frame as OK.
+                */
+               ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
+                       rx_stats->rs_rate);
+               RX_STAT_INC(rx_rate_err);
                return -EINVAL;
+       }
 
-       ath9k_process_rssi(common, hw, rx_stats, rx_status);
+       ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
 
        rx_status->band = ah->curchan->chan->band;
        rx_status->freq = ah->curchan->chan->center_freq;
@@ -1092,57 +902,6 @@ corrupt:
        return -EINVAL;
 }
 
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
-                                    struct sk_buff *skb,
-                                    struct ath_rx_status *rx_stats,
-                                    struct ieee80211_rx_status *rxs,
-                                    bool decrypt_error)
-{
-       struct ath_hw *ah = common->ah;
-       struct ieee80211_hdr *hdr;
-       int hdrlen, padpos, padsize;
-       u8 keyix;
-       __le16 fc;
-
-       /* see if any padding is done by the hw and remove it */
-       hdr = (struct ieee80211_hdr *) skb->data;
-       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-       fc = hdr->frame_control;
-       padpos = ieee80211_hdrlen(fc);
-
-       /* The MAC header is padded to have 32-bit boundary if the
-        * packet payload is non-zero. The general calculation for
-        * padsize would take into account odd header lengths:
-        * padsize = (4 - padpos % 4) % 4; However, since only
-        * even-length headers are used, padding can only be 0 or 2
-        * bytes and we can optimize this a bit. In addition, we must
-        * not try to remove padding from short control frames that do
-        * not have payload. */
-       padsize = padpos & 3;
-       if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
-               memmove(skb->data + padsize, skb->data, padpos);
-               skb_pull(skb, padsize);
-       }
-
-       keyix = rx_stats->rs_keyix;
-
-       if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
-           ieee80211_has_protected(fc)) {
-               rxs->flag |= RX_FLAG_DECRYPTED;
-       } else if (ieee80211_has_protected(fc)
-                  && !decrypt_error && skb->len >= hdrlen + 4) {
-               keyix = skb->data[hdrlen + 3] >> 6;
-
-               if (test_bit(keyix, common->keymap))
-                       rxs->flag |= RX_FLAG_DECRYPTED;
-       }
-       if (ah->sw_mgmt_crypto &&
-           (rxs->flag & RX_FLAG_DECRYPTED) &&
-           ieee80211_is_mgmt(fc))
-               /* Use software decrypt for management frames. */
-               rxs->flag &= ~RX_FLAG_DECRYPTED;
-}
-
 /*
  * Run the LNA combining algorithm only in these cases:
  *
@@ -1292,8 +1051,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                        skb_pull(skb, ah->caps.rx_status_len);
 
                if (!rs.rs_more)
-                       ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
-                                                rxs, decrypt_error);
+                       ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
+                                                    rxs, decrypt_error);
 
                if (rs.rs_more) {
                        RX_STAT_INC(rx_frags);
index b686a749845038781e91c292f6070bb565a1da66..a65cfb91adcae12ea2d196c8e7c0f8658316f6ef 100644 (file)
@@ -108,7 +108,7 @@ static int ath9k_tx99_init(struct ath_softc *sc)
        struct ath_tx_control txctl;
        int r;
 
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+       if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
                ath_err(common,
                        "driver is in invalid state unable to use TX99");
                return -EINVAL;
index 1b3230fa36510916c740bbc62a2a1b6ec202f5a0..2879887f56912dea38e2568ddb748f25790e7bb0 100644 (file)
@@ -198,7 +198,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
        ath_cancel_work(sc);
        ath_stop_ani(sc);
 
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+       if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
                ath_dbg(common, ANY, "Device not present\n");
                ret = -EINVAL;
                goto fail_wow;
@@ -224,7 +224,7 @@ int ath9k_suspend(struct ieee80211_hw *hw,
         * STA.
         */
 
-       if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+       if (!test_bit(ATH_OP_PRIM_STA_VIF, &common->op_flags)) {
                ath_dbg(common, WOW, "None of the STA vifs are associated\n");
                ret = 1;
                goto fail_wow;
index 55897d508a76c7220d041f22c077cef2e435fff4..87cbec47fb48371403daaa70b32c1b9bc40ce1ec 100644 (file)
@@ -1040,11 +1040,11 @@ static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
        int symbols, bits;
        int bytes = 0;
 
+       usec -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
        symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
        bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
        bits -= OFDM_PLCP_BITS;
        bytes = bits / 8;
-       bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
        if (bytes > 65532)
                bytes = 65532;
 
@@ -1076,6 +1076,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
                             struct ath_tx_info *info, int len, bool rts)
 {
        struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        struct sk_buff *skb;
        struct ieee80211_tx_info *tx_info;
        struct ieee80211_tx_rate *rates;
@@ -1145,7 +1146,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
                }
 
                /* legacy rates */
-               rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
+               rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
                if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
                    !(rate->flags & IEEE80211_RATE_ERP_G))
                        phy = WLAN_RC_PHY_CCK;
@@ -1698,7 +1699,7 @@ int ath_cabq_update(struct ath_softc *sc)
 
        ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
 
-       qi.tqi_readyTime = (cur_conf->beacon_interval *
+       qi.tqi_readyTime = (TU_TO_USEC(cur_conf->beacon_interval) *
                            ATH_CABQ_READY_TIME) / 100;
        ath_txq_update(sc, qnum, &qi);
 
@@ -1768,7 +1769,7 @@ bool ath_drain_all_txq(struct ath_softc *sc)
        int i;
        u32 npend = 0;
 
-       if (test_bit(SC_OP_INVALID, &sc->sc_flags))
+       if (test_bit(ATH_OP_INVALID, &common->op_flags))
                return true;
 
        ath9k_hw_abort_tx_dma(ah);
@@ -1816,11 +1817,12 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
  */
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_atx_ac *ac, *last_ac;
        struct ath_atx_tid *tid, *last_tid;
        bool sent = false;
 
-       if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
+       if (test_bit(ATH_OP_HW_RESET, &common->op_flags) ||
            list_empty(&txq->axq_acq))
                return;
 
@@ -2470,7 +2472,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
 
        ath_txq_lock(sc, txq);
        for (;;) {
-               if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+               if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
                        break;
 
                if (list_empty(&txq->axq_q)) {
@@ -2553,7 +2555,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
        int status;
 
        for (;;) {
-               if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+               if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
                        break;
 
                status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
@@ -2569,7 +2571,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
                        sc->beacon.tx_processed = true;
                        sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
 
-                       ath9k_csa_is_finished(sc);
+                       ath9k_csa_update(sc);
                        continue;
                }
 
index 536bc46a291244e8b743a63fcae41a7d30963b8c..924135b8e57543ea19060b802f122627a42ff904 100644 (file)
@@ -572,7 +572,7 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
 
 static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
 {
-       struct ieee80211_bar *bar = (void *) data;
+       struct ieee80211_bar *bar = data;
        struct carl9170_bar_list_entry *entry;
        unsigned int queue;
 
index e5e905910db49c569c8287de38141fc4a4b3d244..415393dfb6fc17b51d33f4d24e70c6700620d4b5 100644 (file)
@@ -222,7 +222,7 @@ static const struct ieee80211_regdomain *ath_default_world_regdomain(void)
 static const struct
 ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
 {
-       switch (reg->regpair->regDmnEnum) {
+       switch (reg->regpair->reg_domain) {
        case 0x60:
        case 0x61:
        case 0x62:
@@ -431,7 +431,7 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
                                      enum nl80211_reg_initiator initiator,
                                      struct ath_regulatory *reg)
 {
-       switch (reg->regpair->regDmnEnum) {
+       switch (reg->regpair->reg_domain) {
        case 0x60:
        case 0x63:
        case 0x66:
@@ -560,7 +560,7 @@ static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
                        printk(KERN_DEBUG "ath: EEPROM indicates we "
                               "should expect a direct regpair map\n");
                for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++)
-                       if (regDomainPairs[i].regDmnEnum == rd)
+                       if (regDomainPairs[i].reg_domain == rd)
                                return true;
        }
        printk(KERN_DEBUG
@@ -617,7 +617,7 @@ ath_get_regpair(int regdmn)
        if (regdmn == NO_ENUMRD)
                return NULL;
        for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) {
-               if (regDomainPairs[i].regDmnEnum == regdmn)
+               if (regDomainPairs[i].reg_domain == regdmn)
                        return &regDomainPairs[i];
        }
        return NULL;
@@ -741,7 +741,7 @@ static int __ath_regd_init(struct ath_regulatory *reg)
        printk(KERN_DEBUG "ath: Country alpha2 being used: %c%c\n",
                reg->alpha2[0], reg->alpha2[1]);
        printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
-               reg->regpair->regDmnEnum);
+               reg->regpair->reg_domain);
 
        return 0;
 }
index ee25786b44478fd094df1c3b165c9caf0d0bc73b..73f12f196f14ec6ee14368dee20673f67b8045a3 100644 (file)
@@ -44,6 +44,14 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
        writel(data, wcn->mmio + addr);
 }
 
+#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data)                \
+do {                                                                    \
+       if (wcn->chip_version == WCN36XX_CHIP_3680)                      \
+               wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
+       else                                                             \
+               wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
+} while (0)                                                             \
+
 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
 {
        *data = readl(wcn->mmio + addr);
@@ -680,7 +688,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
 
        /* Setting interrupt path */
        reg_data = WCN36XX_DXE_CCU_INT;
-       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+       wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
 
        /***************************************/
        /* Init descriptors for TX LOW channel */
index c88562f85de1c21a30c80a884cc36f85c7c6cfc9..35ee7e966bd2789f8ea9bb283598d13be4b35e62 100644 (file)
@@ -28,11 +28,11 @@ H2H_TEST_RX_TX = DMA2
 */
 
 /* DXE registers */
-#define WCN36XX_DXE_MEM_BASE                   0x03000000
 #define WCN36XX_DXE_MEM_REG                    0x202000
 
 #define WCN36XX_DXE_CCU_INT                    0xA0011
-#define WCN36XX_DXE_REG_CCU_INT                        0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3660           0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3680           0x2050dc
 
 /* TODO This must calculated properly but not hardcoded */
 #define WCN36XX_DXE_CTRL_TX_L                  0x328a44
index 3c2ef0c32f72c7ef37b9b032e3b66ec3ab9619d6..a1f1127d7808d739e4f6571fa0f071491ca289bc 100644 (file)
@@ -4384,11 +4384,13 @@ enum place_holder_in_cap_bitmap {
        MAX_FEATURE_SUPPORTED = 128,
 };
 
+#define WCN36XX_HAL_CAPS_SIZE 4
+
 struct wcn36xx_hal_feat_caps_msg {
 
        struct wcn36xx_hal_msg_header header;
 
-       u32 feat_caps[4];
+       u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
 } __packed;
 
 /* status codes to help debug rekey failures */
index e64a6784079e20446ad57078f607e0f01b7565a1..4ab5370ab7a6ff5df3d5e4b1525b7f852cc55cb6 100644 (file)
@@ -17,6 +17,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
+#include <linux/firmware.h>
 #include <linux/platform_device.h>
 #include "wcn36xx.h"
 
@@ -177,6 +178,60 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
               sta_priv->sta_index;
 }
 
+static const char * const wcn36xx_caps_names[] = {
+       "MCC",                          /* 0 */
+       "P2P",                          /* 1 */
+       "DOT11AC",                      /* 2 */
+       "SLM_SESSIONIZATION",           /* 3 */
+       "DOT11AC_OPMODE",               /* 4 */
+       "SAP32STA",                     /* 5 */
+       "TDLS",                         /* 6 */
+       "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
+       "WLANACTIVE_OFFLOAD",           /* 8 */
+       "BEACON_OFFLOAD",               /* 9 */
+       "SCAN_OFFLOAD",                 /* 10 */
+       "ROAM_OFFLOAD",                 /* 11 */
+       "BCN_MISS_OFFLOAD",             /* 12 */
+       "STA_POWERSAVE",                /* 13 */
+       "STA_ADVANCED_PWRSAVE",         /* 14 */
+       "AP_UAPSD",                     /* 15 */
+       "AP_DFS",                       /* 16 */
+       "BLOCKACK",                     /* 17 */
+       "PHY_ERR",                      /* 18 */
+       "BCN_FILTER",                   /* 19 */
+       "RTT",                          /* 20 */
+       "RATECTRL",                     /* 21 */
+       "WOW"                           /* 22 */
+};
+
+static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
+{
+       if (x >= ARRAY_SIZE(wcn36xx_caps_names))
+               return "UNKNOWN";
+       return wcn36xx_caps_names[x];
+}
+
+static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
+{
+       int i;
+
+       for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+               if (get_feat_caps(wcn->fw_feat_caps, i))
+                       wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
+       }
+}
+
+static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
+{
+       if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
+               wcn36xx_info("Chip is 3680\n");
+               wcn->chip_version = WCN36XX_CHIP_3680;
+       } else {
+               wcn36xx_info("Chip is 3660\n");
+               wcn->chip_version = WCN36XX_CHIP_3660;
+       }
+}
+
 static int wcn36xx_start(struct ieee80211_hw *hw)
 {
        struct wcn36xx *wcn = hw->priv;
@@ -223,6 +278,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
                goto out_free_smd_buf;
        }
 
+       if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+               ret = wcn36xx_smd_feature_caps_exchange(wcn);
+               if (ret)
+                       wcn36xx_warn("Exchange feature caps failed\n");
+               else
+                       wcn36xx_feat_caps_info(wcn);
+       }
+
+       wcn36xx_detect_chip_version(wcn);
+
        /* DMA channel initialization */
        ret = wcn36xx_dxe_init(wcn);
        if (ret) {
@@ -232,11 +297,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
 
        wcn36xx_debugfs_init(wcn);
 
-       if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
-               ret = wcn36xx_smd_feature_caps_exchange(wcn);
-               if (ret)
-                       wcn36xx_warn("Exchange feature caps failed\n");
-       }
        INIT_LIST_HEAD(&wcn->vif_list);
        return 0;
 
@@ -648,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
                            bss_conf->enable_beacon);
 
                if (bss_conf->enable_beacon) {
+                       vif_priv->dtim_period = bss_conf->dtim_period;
                        vif_priv->bss_index = 0xff;
                        wcn36xx_smd_config_bss(wcn, vif, NULL,
                                               vif->addr, false);
@@ -992,6 +1053,7 @@ static int wcn36xx_remove(struct platform_device *pdev)
        struct wcn36xx *wcn = hw->priv;
        wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
 
+       release_firmware(wcn->nv);
        mutex_destroy(&wcn->hal_mutex);
 
        ieee80211_unregister_hw(hw);
index 750626b0e22d528d6d76fbe4ad402a2c175316be..7bf0ef8a1f56f176e14cf0a7905410a0593362a3 100644 (file)
@@ -195,9 +195,11 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
 static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
 {
        int ret = 0;
+       unsigned long start;
        wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
 
        init_completion(&wcn->hal_rsp_compl);
+       start = jiffies;
        ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
        if (ret) {
                wcn36xx_err("HAL TX failed\n");
@@ -205,10 +207,13 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
        }
        if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
                msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
-               wcn36xx_err("Timeout while waiting SMD response\n");
+               wcn36xx_err("Timeout! No SMD response in %dms\n",
+                           HAL_MSG_TIMEOUT);
                ret = -ETIME;
                goto out;
        }
+       wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
+                   jiffies_to_msecs(jiffies - start));
 out:
        return ret;
 }
@@ -246,21 +251,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
 
 int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
 {
-       const struct firmware *nv;
        struct nv_data *nv_d;
        struct wcn36xx_hal_nv_img_download_req_msg msg_body;
        int fw_bytes_left;
        int ret;
        u16 fm_offset = 0;
 
-       ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
-       if (ret) {
-               wcn36xx_err("Failed to load nv file %s: %d\n",
-                             WLAN_NV_FILE, ret);
-               goto out_free_nv;
+       if (!wcn->nv) {
+               ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
+               if (ret) {
+                       wcn36xx_err("Failed to load nv file %s: %d\n",
+                                     WLAN_NV_FILE, ret);
+                       goto out;
+               }
        }
 
-       nv_d = (struct nv_data *)nv->data;
+       nv_d = (struct nv_data *)wcn->nv->data;
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
 
        msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
@@ -270,7 +276,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
        mutex_lock(&wcn->hal_mutex);
 
        do {
-               fw_bytes_left = nv->size - fm_offset - 4;
+               fw_bytes_left = wcn->nv->size - fm_offset - 4;
                if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
                        msg_body.last_fragment = 0;
                        msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
@@ -308,10 +314,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
 
 out_unlock:
        mutex_unlock(&wcn->hal_mutex);
-out_free_nv:
-       release_firmware(nv);
-
-       return ret;
+out:   return ret;
 }
 
 static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
@@ -899,11 +902,12 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
 
        sta_priv->sta_index = params->sta_index;
        sta_priv->dpu_desc_index = params->dpu_index;
+       sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
 
        wcn36xx_dbg(WCN36XX_DBG_HAL,
-                   "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+                   "hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
                    params->status, params->sta_index, params->bssid_index,
-                   params->p2p);
+                   params->uc_ucast_sig, params->p2p);
 
        return 0;
 }
@@ -1118,7 +1122,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
                priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
        }
 
-       priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+       priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
 
        return 0;
 }
@@ -1637,12 +1641,12 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
 
        ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
        if (ret) {
-               wcn36xx_err("Sending hal_exit_bmps failed\n");
+               wcn36xx_err("Sending hal_keep_alive failed\n");
                goto out;
        }
        ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
        if (ret) {
-               wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+               wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
                goto out;
        }
 out:
@@ -1682,8 +1686,7 @@ out:
        return ret;
 }
 
-static inline void set_feat_caps(u32 *bitmap,
-                                enum place_holder_in_cap_bitmap cap)
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
 {
        int arr_idx, bit_idx;
 
@@ -1697,8 +1700,7 @@ static inline void set_feat_caps(u32 *bitmap,
        bitmap[arr_idx] |= (1 << bit_idx);
 }
 
-static inline int get_feat_caps(u32 *bitmap,
-                               enum place_holder_in_cap_bitmap cap)
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
 {
        int arr_idx, bit_idx;
        int ret = 0;
@@ -1714,8 +1716,7 @@ static inline int get_feat_caps(u32 *bitmap,
        return ret;
 }
 
-static inline void clear_feat_caps(u32 *bitmap,
-                               enum place_holder_in_cap_bitmap cap)
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
 {
        int arr_idx, bit_idx;
 
@@ -1731,8 +1732,8 @@ static inline void clear_feat_caps(u32 *bitmap,
 
 int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
 {
-       struct wcn36xx_hal_feat_caps_msg msg_body;
-       int ret = 0;
+       struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
+       int ret = 0, i;
 
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -1746,12 +1747,15 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
                wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
                goto out;
        }
-       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
-       if (ret) {
-               wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
-                           ret);
+       if (wcn->hal_rsp_len != sizeof(*rsp)) {
+               wcn36xx_err("Invalid hal_feature_caps_exchange response");
                goto out;
        }
+
+       rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
+
+       for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
+               wcn->fw_feat_caps[i] = rsp->feat_caps[i];
 out:
        mutex_unlock(&wcn->hal_mutex);
        return ret;
index e7c39019c6f1aece7d149b410a77bc42ae05093d..008d03423dbf460fc3316924147a872552d47a94 100644 (file)
@@ -24,7 +24,7 @@
 
 #define WCN36XX_HAL_BUF_SIZE                           4096
 
-#define HAL_MSG_TIMEOUT 200
+#define HAL_MSG_TIMEOUT 500
 #define WCN36XX_SMSM_WLAN_TX_ENABLE                    0x00000400
 #define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY               0x00000200
 /* The PNO version info be contained in the rsp msg */
@@ -112,6 +112,9 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
 int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
                             u32 arg3, u32 arg4, u32 arg5);
 int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
 
 int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
                struct ieee80211_sta *sta,
index b2b60e30caaf8492886467e6c8a14b32b6f95467..32bb26a0db2abf429de627c32aabac9f453b7227 100644 (file)
@@ -57,8 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
                       RX_FLAG_MMIC_STRIPPED |
                       RX_FLAG_DECRYPTED;
 
-       wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
-                   status.flag,  status.vendor_radiotap_len);
+       wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
 
        memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
 
@@ -132,6 +131,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
                                   struct ieee80211_vif,
                                   drv_priv);
 
+               bd->dpu_sign = sta_priv->ucast_dpu_sign;
                if (vif->type == NL80211_IFTYPE_STATION) {
                        bd->sta_index = sta_priv->bss_sta_index;
                        bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
@@ -145,10 +145,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
                __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
                bd->sta_index = __vif_priv->self_sta_index;
                bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+               bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
        }
 
-       bd->dpu_sign = __vif_priv->ucast_dpu_signature;
-
        if (ieee80211_is_nullfunc(hdr->frame_control) ||
           (sta_priv && !sta_priv->is_data_encrypted))
                bd->dpu_ne = 1;
index 8fa5cbace5abba1e027954a64b312bdc37f7b13c..f0fb81dfd17b9d8547f2925d85cf007207f305f8 100644 (file)
@@ -125,10 +125,10 @@ struct wcn36xx_vif {
        enum wcn36xx_power_state pw_state;
 
        u8 bss_index;
-       u8 ucast_dpu_signature;
        /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
        u8 self_sta_index;
        u8 self_dpu_desc_index;
+       u8 self_ucast_dpu_sign;
 };
 
 /**
@@ -159,6 +159,7 @@ struct wcn36xx_sta {
        u16 tid;
        u8 sta_index;
        u8 dpu_desc_index;
+       u8 ucast_dpu_sign;
        u8 bss_sta_index;
        u8 bss_dpu_desc_index;
        bool is_data_encrypted;
@@ -171,10 +172,14 @@ struct wcn36xx {
        struct device           *dev;
        struct list_head        vif_list;
 
+       const struct firmware   *nv;
+
        u8                      fw_revision;
        u8                      fw_version;
        u8                      fw_minor;
        u8                      fw_major;
+       u32                     fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
+       u32                     chip_version;
 
        /* extra byte for the NULL termination */
        u8                      crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -222,6 +227,9 @@ struct wcn36xx {
 
 };
 
+#define WCN36XX_CHIP_3660      0
+#define WCN36XX_CHIP_3680      1
+
 static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
                                         u8 major,
                                         u8 minor,
index 990dd42ae79ed312652a336f46fc1ab4c9ad7ad8..c7a3465fd02ad3b6942fd43bb1aa88bf4bc0e44f 100644 (file)
@@ -9,6 +9,7 @@ wil6210-y += wmi.o
 wil6210-y += interrupt.o
 wil6210-y += txrx.o
 wil6210-y += debug.o
+wil6210-y += rx_reorder.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 
 # for tracing framework to find trace.h
index 5b340769d5bb2196bf3d0192e007ed13f23236fc..4806a49cb61b139e0250e8152e27c91b79b50a0e 100644 (file)
@@ -104,41 +104,125 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type)
        return -EOPNOTSUPP;
 }
 
-static int wil_cfg80211_get_station(struct wiphy *wiphy,
-                                   struct net_device *ndev,
-                                   u8 *mac, struct station_info *sinfo)
+static int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
+                             struct station_info *sinfo)
 {
-       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       int rc;
        struct wmi_notify_req_cmd cmd = {
-               .cid = 0,
+               .cid = cid,
                .interval_usec = 0,
        };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_notify_req_done_event evt;
+       } __packed reply;
+       struct wil_net_stats *stats = &wil->sta[cid].stats;
+       int rc;
 
-       if (memcmp(mac, wil->dst_addr[0], ETH_ALEN))
-               return -ENOENT;
-
-       /* WMI_NOTIFY_REQ_DONE_EVENTID handler fills wil->stats.bf_mcs */
        rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd),
-                     WMI_NOTIFY_REQ_DONE_EVENTID, NULL, 0, 20);
+                     WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20);
        if (rc)
                return rc;
 
+       wil_dbg_wmi(wil, "Link status for CID %d: {\n"
+                   "  MCS %d TSF 0x%016llx\n"
+                   "  BF status 0x%08x SNR 0x%08x SQI %d%%\n"
+                   "  Tx Tpt %d goodput %d Rx goodput %d\n"
+                   "  Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
+                   cid, le16_to_cpu(reply.evt.bf_mcs),
+                   le64_to_cpu(reply.evt.tsf), reply.evt.status,
+                   le32_to_cpu(reply.evt.snr_val),
+                   reply.evt.sqi,
+                   le32_to_cpu(reply.evt.tx_tpt),
+                   le32_to_cpu(reply.evt.tx_goodput),
+                   le32_to_cpu(reply.evt.rx_goodput),
+                   le16_to_cpu(reply.evt.my_rx_sector),
+                   le16_to_cpu(reply.evt.my_tx_sector),
+                   le16_to_cpu(reply.evt.other_rx_sector),
+                   le16_to_cpu(reply.evt.other_tx_sector));
+
        sinfo->generation = wil->sinfo_gen;
 
-       sinfo->filled |= STATION_INFO_TX_BITRATE;
+       sinfo->filled = STATION_INFO_RX_BYTES |
+                       STATION_INFO_TX_BYTES |
+                       STATION_INFO_RX_PACKETS |
+                       STATION_INFO_TX_PACKETS |
+                       STATION_INFO_RX_BITRATE |
+                       STATION_INFO_TX_BITRATE |
+                       STATION_INFO_RX_DROP_MISC |
+                       STATION_INFO_TX_FAILED;
+
        sinfo->txrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
-       sinfo->txrate.mcs = wil->stats.bf_mcs;
-       sinfo->filled |= STATION_INFO_RX_BITRATE;
+       sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
        sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS | RATE_INFO_FLAGS_60G;
-       sinfo->rxrate.mcs = wil->stats.last_mcs_rx;
+       sinfo->rxrate.mcs = stats->last_mcs_rx;
+       sinfo->rx_bytes = stats->rx_bytes;
+       sinfo->rx_packets = stats->rx_packets;
+       sinfo->rx_dropped_misc = stats->rx_dropped;
+       sinfo->tx_bytes = stats->tx_bytes;
+       sinfo->tx_packets = stats->tx_packets;
+       sinfo->tx_failed = stats->tx_errors;
 
        if (test_bit(wil_status_fwconnected, &wil->status)) {
                sinfo->filled |= STATION_INFO_SIGNAL;
-               sinfo->signal = 12; /* TODO: provide real value */
+               sinfo->signal = reply.evt.sqi;
        }
 
-       return 0;
+       return rc;
+}
+
+static int wil_cfg80211_get_station(struct wiphy *wiphy,
+                                   struct net_device *ndev,
+                                   u8 *mac, struct station_info *sinfo)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+
+       int cid = wil_find_cid(wil, mac);
+
+       wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+       if (cid < 0)
+               return cid;
+
+       rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+       return rc;
+}
+
+/*
+ * Find @idx-th active STA for station dump.
+ */
+static int wil_find_cid_by_idx(struct wil6210_priv *wil, int idx)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+               if (wil->sta[i].status == wil_sta_unused)
+                       continue;
+               if (idx == 0)
+                       return i;
+               idx--;
+       }
+
+       return -ENOENT;
+}
+
+static int wil_cfg80211_dump_station(struct wiphy *wiphy,
+                                    struct net_device *dev, int idx,
+                                    u8 *mac, struct station_info *sinfo)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+       int cid = wil_find_cid_by_idx(wil, idx);
+
+       if (cid < 0)
+               return -ENOENT;
+
+       memcpy(mac, wil->sta[cid].addr, ETH_ALEN);
+       wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+
+       rc = wil_cid_fill_sinfo(wil, cid, sinfo);
+
+       return rc;
 }
 
 static int wil_cfg80211_change_iface(struct wiphy *wiphy,
@@ -181,6 +265,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
                u16 chnl[4];
        } __packed cmd;
        uint i, n;
+       int rc;
 
        if (wil->scan_request) {
                wil_err(wil, "Already scanning\n");
@@ -198,7 +283,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
 
        /* FW don't support scan after connection attempt */
        if (test_bit(wil_status_dontscan, &wil->status)) {
-               wil_err(wil, "Scan after connect attempt not supported\n");
+               wil_err(wil, "Can't scan now\n");
                return -EBUSY;
        }
 
@@ -221,8 +306,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
                             request->channels[i]->center_freq);
        }
 
-       return wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
+       rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
                        cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+
+       if (rc)
+               wil->scan_request = NULL;
+
+       return rc;
 }
 
 static int wil_cfg80211_connect(struct wiphy *wiphy,
@@ -237,6 +327,10 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        int ch;
        int rc = 0;
 
+       if (test_bit(wil_status_fwconnecting, &wil->status) ||
+           test_bit(wil_status_fwconnected, &wil->status))
+               return -EALREADY;
+
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
                               sme->ssid, sme->ssid_len,
                               WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
@@ -318,10 +412,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
 
        memcpy(conn.bssid, bss->bssid, ETH_ALEN);
        memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
-       /*
-        * FW don't support scan after connection attempt
-        */
-       set_bit(wil_status_dontscan, &wil->status);
+
        set_bit(wil_status_fwconnecting, &wil->status);
 
        rc = wmi_send(wil, WMI_CONNECT_CMDID, &conn, sizeof(conn));
@@ -330,7 +421,6 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                mod_timer(&wil->connect_timer,
                          jiffies + msecs_to_jiffies(2000));
        } else {
-               clear_bit(wil_status_dontscan, &wil->status);
                clear_bit(wil_status_fwconnecting, &wil->status);
        }
 
@@ -352,6 +442,40 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
        return rc;
 }
 
+static int wil_cfg80211_mgmt_tx(struct wiphy *wiphy,
+                               struct wireless_dev *wdev,
+                               struct cfg80211_mgmt_tx_params *params,
+                               u64 *cookie)
+{
+       const u8 *buf = params->buf;
+       size_t len = params->len;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+       struct ieee80211_mgmt *mgmt_frame = (void *)buf;
+       struct wmi_sw_tx_req_cmd *cmd;
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_sw_tx_complete_event evt;
+       } __packed evt;
+
+       cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN);
+       cmd->len = cpu_to_le16(len);
+       memcpy(cmd->payload, buf, len);
+
+       rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, cmd, sizeof(*cmd) + len,
+                     WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
+       if (rc == 0)
+               rc = evt.evt.status;
+
+       kfree(cmd);
+
+       return rc;
+}
+
 static int wil_cfg80211_set_channel(struct wiphy *wiphy,
                                    struct cfg80211_chan_def *chandef)
 {
@@ -402,6 +526,41 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
        return 0;
 }
 
+static int wil_remain_on_channel(struct wiphy *wiphy,
+                                struct wireless_dev *wdev,
+                                struct ieee80211_channel *chan,
+                                unsigned int duration,
+                                u64 *cookie)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+
+       /* TODO: handle duration */
+       wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+
+       rc = wmi_set_channel(wil, chan->hw_value);
+       if (rc)
+               return rc;
+
+       rc = wmi_rxon(wil, true);
+
+       return rc;
+}
+
+static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
+                                       struct wireless_dev *wdev,
+                                       u64 cookie)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+
+       wil_info(wil, "%s()\n", __func__);
+
+       rc = wmi_rxon(wil, false);
+
+       return rc;
+}
+
 static int wil_fix_bcon(struct wil6210_priv *wil,
                        struct cfg80211_beacon_data *bcon)
 {
@@ -450,18 +609,20 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        if (wil_fix_bcon(wil, bcon))
                wil_dbg_misc(wil, "Fixed bcon\n");
 
+       mutex_lock(&wil->mutex);
+
        rc = wil_reset(wil);
        if (rc)
-               return rc;
+               goto out;
 
        /* Rx VRING. */
        rc = wil_rx_init(wil);
        if (rc)
-               return rc;
+               goto out;
 
        rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
        if (rc)
-               return rc;
+               goto out;
 
        /* MAC address - pre-requisite for other commands */
        wmi_set_mac_address(wil, ndev->dev_addr);
@@ -485,11 +646,13 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
                           channel->hw_value);
        if (rc)
-               return rc;
+               goto out;
 
 
        netif_carrier_on(ndev);
 
+out:
+       mutex_unlock(&wil->mutex);
        return rc;
 }
 
@@ -499,17 +662,36 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
        int rc = 0;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
+       mutex_lock(&wil->mutex);
+
        rc = wmi_pcp_stop(wil);
 
+       mutex_unlock(&wil->mutex);
        return rc;
 }
 
+static int wil_cfg80211_del_station(struct wiphy *wiphy,
+                                   struct net_device *dev, u8 *mac)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       mutex_lock(&wil->mutex);
+       wil6210_disconnect(wil, mac);
+       mutex_unlock(&wil->mutex);
+
+       return 0;
+}
+
 static struct cfg80211_ops wil_cfg80211_ops = {
        .scan = wil_cfg80211_scan,
        .connect = wil_cfg80211_connect,
        .disconnect = wil_cfg80211_disconnect,
        .change_virtual_intf = wil_cfg80211_change_iface,
        .get_station = wil_cfg80211_get_station,
+       .dump_station = wil_cfg80211_dump_station,
+       .remain_on_channel = wil_remain_on_channel,
+       .cancel_remain_on_channel = wil_cancel_remain_on_channel,
+       .mgmt_tx = wil_cfg80211_mgmt_tx,
        .set_monitor_channel = wil_cfg80211_set_channel,
        .add_key = wil_cfg80211_add_key,
        .del_key = wil_cfg80211_del_key,
@@ -517,6 +699,7 @@ static struct cfg80211_ops wil_cfg80211_ops = {
        /* AP mode */
        .start_ap = wil_cfg80211_start_ap,
        .stop_ap = wil_cfg80211_stop_ap,
+       .del_station = wil_cfg80211_del_station,
 };
 
 static void wil_wiphy_init(struct wiphy *wiphy)
@@ -542,7 +725,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
        wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
 
        /* TODO: figure this out */
-       wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+       wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
 
        wiphy->cipher_suites = wil_cipher_suites;
        wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
index 1caa31992a7e1ecc25b03d32d78aaf12894fcd78..ecdabe4adec3bb59b90cce9f00571edf2ca5bad6 100644 (file)
 /* Nasty hack. Better have per device instances */
 static u32 mem_addr;
 static u32 dbg_txdesc_index;
+static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */
 
 static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
-                           const char *name, struct vring *vring)
+                           const char *name, struct vring *vring,
+                           char _s, char _h)
 {
        void __iomem *x = wmi_addr(wil, vring->hwtail);
 
@@ -50,8 +52,8 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
                        volatile struct vring_tx_desc *d = &vring->va[i].tx;
                        if ((i % 64) == 0 && (i != 0))
                                seq_printf(s, "\n");
-                       seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
-                                       "S" : (vring->ctx[i].skb ? "H" : "h"));
+                       seq_printf(s, "%c", (d->dma.status & BIT(0)) ?
+                                       _s : (vring->ctx[i].skb ? _h : 'h'));
                }
                seq_printf(s, "\n");
        }
@@ -63,14 +65,19 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
        uint i;
        struct wil6210_priv *wil = s->private;
 
-       wil_print_vring(s, wil, "rx", &wil->vring_rx);
+       wil_print_vring(s, wil, "rx", &wil->vring_rx, 'S', '_');
 
        for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
                struct vring *vring = &(wil->vring_tx[i]);
                if (vring->va) {
+                       int cid = wil->vring2cid_tid[i][0];
+                       int tid = wil->vring2cid_tid[i][1];
                        char name[10];
                        snprintf(name, sizeof(name), "tx_%2d", i);
-                       wil_print_vring(s, wil, name, vring);
+
+                       seq_printf(s, "\n%pM CID %d TID %d\n",
+                                  wil->sta[cid].addr, cid, tid);
+                       wil_print_vring(s, wil, name, vring, '_', 'H');
                }
        }
 
@@ -390,25 +397,78 @@ static const struct file_operations fops_reset = {
        .write = wil_write_file_reset,
        .open  = simple_open,
 };
-/*---------Tx descriptor------------*/
 
+static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
+                           const char *prefix)
+{
+       char printbuf[16 * 3 + 2];
+       int i = 0;
+       while (i < len) {
+               int l = min(len - i, 16);
+               hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
+                                  sizeof(printbuf), false);
+               seq_printf(s, "%s%s\n", prefix, printbuf);
+               i += l;
+       }
+}
+
+static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
+{
+       int i = 0;
+       int len = skb_headlen(skb);
+       void *p = skb->data;
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+
+       seq_printf(s, "    len = %d\n", len);
+       wil_seq_hexdump(s, p, len, "      : ");
+
+       if (nr_frags) {
+               seq_printf(s, "    nr_frags = %d\n", nr_frags);
+               for (i = 0; i < nr_frags; i++) {
+                       const struct skb_frag_struct *frag =
+                                       &skb_shinfo(skb)->frags[i];
+
+                       len = skb_frag_size(frag);
+                       p = skb_frag_address_safe(frag);
+                       seq_printf(s, "    [%2d] : len = %d\n", i, len);
+                       wil_seq_hexdump(s, p, len, "      : ");
+               }
+       }
+}
+
+/*---------Tx/Rx descriptor------------*/
 static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
 {
        struct wil6210_priv *wil = s->private;
-       struct vring *vring = &(wil->vring_tx[0]);
+       struct vring *vring;
+       bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
+       if (tx)
+               vring = &(wil->vring_tx[dbg_vring_index]);
+       else
+               vring = &wil->vring_rx;
 
        if (!vring->va) {
-               seq_printf(s, "No Tx VRING\n");
+               if (tx)
+                       seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
+               else
+                       seq_puts(s, "No Rx VRING\n");
                return 0;
        }
 
        if (dbg_txdesc_index < vring->size) {
+               /* use struct vring_tx_desc for Rx as well,
+                * only field used, .dma.length, is the same
+                */
                volatile struct vring_tx_desc *d =
                                &(vring->va[dbg_txdesc_index].tx);
                volatile u32 *u = (volatile u32 *)d;
                struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
 
-               seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
+               if (tx)
+                       seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
+                                  dbg_txdesc_index);
+               else
+                       seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
                seq_printf(s, "  MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
                           u[0], u[1], u[2], u[3]);
                seq_printf(s, "  DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -416,31 +476,19 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
                seq_printf(s, "  SKB = %p\n", skb);
 
                if (skb) {
-                       char printbuf[16 * 3 + 2];
-                       int i = 0;
-                       int len = le16_to_cpu(d->dma.length);
-                       void *p = skb->data;
-
-                       if (len != skb_headlen(skb)) {
-                               seq_printf(s, "!!! len: desc = %d skb = %d\n",
-                                          len, skb_headlen(skb));
-                               len = min_t(int, len, skb_headlen(skb));
-                       }
-
-                       seq_printf(s, "    len = %d\n", len);
-
-                       while (i < len) {
-                               int l = min(len - i, 16);
-                               hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
-                                                  sizeof(printbuf), false);
-                               seq_printf(s, "      : %s\n", printbuf);
-                               i += l;
-                       }
+                       skb_get(skb);
+                       wil_seq_print_skb(s, skb);
+                       kfree_skb(skb);
                }
                seq_printf(s, "}\n");
        } else {
-               seq_printf(s, "TxDesc index (%d) >= size (%d)\n",
-                          dbg_txdesc_index, vring->size);
+               if (tx)
+                       seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
+                                  dbg_vring_index, dbg_txdesc_index,
+                                  vring->size);
+               else
+                       seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
+                                  dbg_txdesc_index, vring->size);
        }
 
        return 0;
@@ -570,6 +618,69 @@ static const struct file_operations fops_temp = {
        .llseek         = seq_lseek,
 };
 
+/*---------Station matrix------------*/
+static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
+{
+       int i;
+       u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+       seq_printf(s, "0x%03x [", r->head_seq_num);
+       for (i = 0; i < r->buf_size; i++) {
+               if (i == index)
+                       seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|');
+               else
+                       seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
+       }
+       seq_puts(s, "]\n");
+}
+
+static int wil_sta_debugfs_show(struct seq_file *s, void *data)
+{
+       struct wil6210_priv *wil = s->private;
+       int i, tid;
+
+       for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+               struct wil_sta_info *p = &wil->sta[i];
+               char *status = "unknown";
+               switch (p->status) {
+               case wil_sta_unused:
+                       status = "unused   ";
+                       break;
+               case wil_sta_conn_pending:
+                       status = "pending  ";
+                       break;
+               case wil_sta_connected:
+                       status = "connected";
+                       break;
+               }
+               seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
+                          (p->data_port_open ? " data_port_open" : ""));
+
+               if (p->status == wil_sta_connected) {
+                       for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+                               struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+                               if (r) {
+                                       seq_printf(s, "[%2d] ", tid);
+                                       wil_print_rxtid(s, r);
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int wil_sta_seq_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wil_sta_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_sta = {
+       .open           = wil_sta_seq_open,
+       .release        = single_release,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+};
+
 /*----------------*/
 int wil6210_debugfs_init(struct wil6210_priv *wil)
 {
@@ -581,9 +692,13 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
 
        debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox);
        debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring);
-       debugfs_create_file("txdesc", S_IRUGO, dbg, wil, &fops_txdesc);
-       debugfs_create_u32("txdesc_index", S_IRUGO | S_IWUSR, dbg,
+       debugfs_create_file("stations", S_IRUGO, dbg, wil, &fops_sta);
+       debugfs_create_file("desc", S_IRUGO, dbg, wil, &fops_txdesc);
+       debugfs_create_u32("desc_index", S_IRUGO | S_IWUSR, dbg,
                           &dbg_txdesc_index);
+       debugfs_create_u32("vring_index", S_IRUGO | S_IWUSR, dbg,
+                          &dbg_vring_index);
+
        debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf);
        debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid);
        debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg,
index 10919f95a83c19cd744003a2a49022ecda3b2a8c..5824cd41e4bac6d387087ab84739df3225b8f799 100644 (file)
@@ -195,8 +195,12 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
        if (isr & BIT_DMA_EP_RX_ICR_RX_DONE) {
                wil_dbg_irq(wil, "RX done\n");
                isr &= ~BIT_DMA_EP_RX_ICR_RX_DONE;
-               wil_dbg_txrx(wil, "NAPI schedule\n");
-               napi_schedule(&wil->napi_rx);
+               if (test_bit(wil_status_reset_done, &wil->status)) {
+                       wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
+                       napi_schedule(&wil->napi_rx);
+               } else {
+                       wil_err(wil, "Got Rx interrupt while in reset\n");
+               }
        }
 
        if (isr)
@@ -226,10 +230,15 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
 
        if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) {
                wil_dbg_irq(wil, "TX done\n");
-               napi_schedule(&wil->napi_tx);
                isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
                /* clear also all VRING interrupts */
                isr &= ~(BIT(25) - 1UL);
+               if (test_bit(wil_status_reset_done, &wil->status)) {
+                       wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
+                       napi_schedule(&wil->napi_tx);
+               } else {
+                       wil_err(wil, "Got Tx interrupt while in reset\n");
+               }
        }
 
        if (isr)
@@ -319,6 +328,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
        if (isr & ISR_MISC_FW_ERROR) {
                wil_notify_fw_error(wil);
                isr &= ~ISR_MISC_FW_ERROR;
+               wil_fw_error_recovery(wil);
        }
 
        if (isr & ISR_MISC_MBOX_EVT) {
@@ -493,6 +503,23 @@ free0:
 
        return rc;
 }
+/* can't use wil_ioread32_and_clear because ICC value is not ser yet */
+static inline void wil_clear32(void __iomem *addr)
+{
+       u32 x = ioread32(addr);
+
+       iowrite32(x, addr);
+}
+
+void wil6210_clear_irq(struct wil6210_priv *wil)
+{
+       wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
+                   offsetof(struct RGF_ICR, ICR));
+       wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
+                   offsetof(struct RGF_ICR, ICR));
+       wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
+                   offsetof(struct RGF_ICR, ICR));
+}
 
 int wil6210_init_irq(struct wil6210_priv *wil, int irq)
 {
index fd30cddd58821f9c602a3bba0fcb91bbfc88aeef..95f4efe9ef37c652722a5ca381af4529a6ea65e9 100644 (file)
 
 #include <linux/moduleparam.h>
 #include <linux/if_arp.h>
+#include <linux/etherdevice.h>
 
 #include "wil6210.h"
+#include "txrx.h"
+
+static bool no_fw_recovery;
+module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_fw_recovery, " disable FW error recovery");
 
 /*
  * Due to a hardware issue,
@@ -52,29 +58,74 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
                __raw_writel(*s++, d++);
 }
 
-static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+static void wil_disconnect_cid(struct wil6210_priv *wil, int cid)
 {
        uint i;
-       struct net_device *ndev = wil_to_ndev(wil);
+       struct wil_sta_info *sta = &wil->sta[cid];
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       sta->data_port_open = false;
+       if (sta->status != wil_sta_unused) {
+               wmi_disconnect_sta(wil, sta->addr, WLAN_REASON_DEAUTH_LEAVING);
+               sta->status = wil_sta_unused;
+       }
 
-       wil_link_off(wil);
-       if (test_bit(wil_status_fwconnected, &wil->status)) {
-               clear_bit(wil_status_fwconnected, &wil->status);
-               cfg80211_disconnected(ndev,
-                                     WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                     NULL, 0, GFP_KERNEL);
-       } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
-               cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
-                                       WLAN_STATUS_UNSPECIFIED_FAILURE,
-                                       GFP_KERNEL);
+       for (i = 0; i < WIL_STA_TID_NUM; i++) {
+               struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
+               sta->tid_rx[i] = NULL;
+               wil_tid_ampdu_rx_free(wil, r);
+       }
+       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
+               if (wil->vring2cid_tid[i][0] == cid)
+                       wil_vring_fini_tx(wil, i);
        }
-       clear_bit(wil_status_fwconnecting, &wil->status);
-       for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++)
-               wil_vring_fini_tx(wil, i);
+       memset(&sta->stats, 0, sizeof(sta->stats));
+}
 
-       clear_bit(wil_status_dontscan, &wil->status);
+static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
+{
+       int cid = -ENOENT;
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct wireless_dev *wdev = wil->wdev;
+
+       might_sleep();
+       if (bssid) {
+               cid = wil_find_cid(wil, bssid);
+               wil_dbg_misc(wil, "%s(%pM, CID %d)\n", __func__, bssid, cid);
+       } else {
+               wil_dbg_misc(wil, "%s(all)\n", __func__);
+       }
+
+       if (cid >= 0) /* disconnect 1 peer */
+               wil_disconnect_cid(wil, cid);
+       else /* disconnect all */
+               for (cid = 0; cid < WIL6210_MAX_CID; cid++)
+                       wil_disconnect_cid(wil, cid);
+
+       /* link state */
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               wil_link_off(wil);
+               if (test_bit(wil_status_fwconnected, &wil->status)) {
+                       clear_bit(wil_status_fwconnected, &wil->status);
+                       cfg80211_disconnected(ndev,
+                                             WLAN_STATUS_UNSPECIFIED_FAILURE,
+                                             NULL, 0, GFP_KERNEL);
+               } else if (test_bit(wil_status_fwconnecting, &wil->status)) {
+                       cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
+                                               WLAN_STATUS_UNSPECIFIED_FAILURE,
+                                               GFP_KERNEL);
+               }
+               clear_bit(wil_status_fwconnecting, &wil->status);
+               break;
+       default:
+               /* AP-like interface and monitor:
+                * never scan, always connected
+                */
+               if (bssid)
+                       cfg80211_del_sta(ndev, bssid, GFP_KERNEL);
+               break;
+       }
 }
 
 static void wil_disconnect_worker(struct work_struct *work)
@@ -82,7 +133,9 @@ static void wil_disconnect_worker(struct work_struct *work)
        struct wil6210_priv *wil = container_of(work,
                        struct wil6210_priv, disconnect_worker);
 
+       mutex_lock(&wil->mutex);
        _wil6210_disconnect(wil, NULL);
+       mutex_unlock(&wil->mutex);
 }
 
 static void wil_connect_timer_fn(ulong x)
@@ -97,12 +150,55 @@ static void wil_connect_timer_fn(ulong x)
        schedule_work(&wil->disconnect_worker);
 }
 
+static void wil_fw_error_worker(struct work_struct *work)
+{
+       struct wil6210_priv *wil = container_of(work,
+                       struct wil6210_priv, fw_error_worker);
+       struct wireless_dev *wdev = wil->wdev;
+
+       wil_dbg_misc(wil, "fw error worker\n");
+
+       if (no_fw_recovery)
+               return;
+
+       mutex_lock(&wil->mutex);
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+       case NL80211_IFTYPE_MONITOR:
+               wil_info(wil, "fw error recovery started...\n");
+               wil_reset(wil);
+
+               /* need to re-allocate Rx ring after reset */
+               wil_rx_init(wil);
+               break;
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+               /* recovery in these modes is done by upper layers */
+               break;
+       default:
+               break;
+       }
+       mutex_unlock(&wil->mutex);
+}
+
+static int wil_find_free_vring(struct wil6210_priv *wil)
+{
+       int i;
+       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+               if (!wil->vring_tx[i].va)
+                       return i;
+       }
+       return -EINVAL;
+}
+
 static void wil_connect_worker(struct work_struct *work)
 {
        int rc;
        struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
                                                connect_worker);
        int cid = wil->pending_connect_cid;
+       int ringid = wil_find_free_vring(wil);
 
        if (cid < 0) {
                wil_err(wil, "No connection pending\n");
@@ -111,16 +207,22 @@ static void wil_connect_worker(struct work_struct *work)
 
        wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
 
-       rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0);
+       rc = wil_vring_init_tx(wil, ringid, WIL6210_TX_RING_SIZE, cid, 0);
        wil->pending_connect_cid = -1;
-       if (rc == 0)
+       if (rc == 0) {
+               wil->sta[cid].status = wil_sta_connected;
                wil_link_on(wil);
+       } else {
+               wil->sta[cid].status = wil_sta_unused;
+       }
 }
 
 int wil_priv_init(struct wil6210_priv *wil)
 {
        wil_dbg_misc(wil, "%s()\n", __func__);
 
+       memset(wil->sta, 0, sizeof(wil->sta));
+
        mutex_init(&wil->mutex);
        mutex_init(&wil->wmi_mutex);
 
@@ -132,6 +234,7 @@ int wil_priv_init(struct wil6210_priv *wil)
        INIT_WORK(&wil->connect_worker, wil_connect_worker);
        INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
        INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
+       INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
 
        INIT_LIST_HEAD(&wil->pending_wmi_ev);
        spin_lock_init(&wil->wmi_ev_lock);
@@ -158,7 +261,10 @@ void wil6210_disconnect(struct wil6210_priv *wil, void *bssid)
 void wil_priv_deinit(struct wil6210_priv *wil)
 {
        cancel_work_sync(&wil->disconnect_worker);
+       cancel_work_sync(&wil->fw_error_worker);
+       mutex_lock(&wil->mutex);
        wil6210_disconnect(wil, NULL);
+       mutex_unlock(&wil->mutex);
        wmi_event_flush(wil);
        destroy_workqueue(wil->wmi_wq_conn);
        destroy_workqueue(wil->wmi_wq);
@@ -166,40 +272,78 @@ void wil_priv_deinit(struct wil6210_priv *wil)
 
 static void wil_target_reset(struct wil6210_priv *wil)
 {
+       int delay = 0;
+       u32 hw_state;
+       u32 rev_id;
+
        wil_dbg_misc(wil, "Resetting...\n");
 
+       /* register read */
+#define R(a) ioread32(wil->csr + HOSTADDR(a))
        /* register write */
 #define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a))
        /* register set = read, OR, write */
-#define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \
-               wil->csr + HOSTADDR(a))
+#define S(a, v) W(a, R(a) | v)
+       /* register clear = read, AND with inverted, write */
+#define C(a, v) W(a, R(a) & ~v)
 
+       wil->hw_version = R(RGF_USER_FW_REV_ID);
+       rev_id = wil->hw_version & 0xff;
        /* hpal_perst_from_pad_src_n_mask */
        S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6));
        /* car_perst_rst_src_n_mask */
        S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7));
+       wmb(); /* order is important here */
 
        W(RGF_USER_MAC_CPU_0,  BIT(1)); /* mac_cpu_man_rst */
        W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
+       wmb(); /* order is important here */
 
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
+       wmb(); /* order is important here */
 
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+       wmb(); /* order is important here */
 
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+       if (rev_id == 1) {
+               W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
+       } else {
+               W(RGF_PCIE_LOS_COUNTER_CTL, BIT(6) | BIT(8));
+               W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
+       }
        W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+       wmb(); /* order is important here */
+
+       /* wait until device ready */
+       do {
+               msleep(1);
+               hw_state = R(RGF_USER_HW_MACHINE_STATE);
+               if (delay++ > 100) {
+                       wil_err(wil, "Reset not completed, hw_state 0x%08x\n",
+                               hw_state);
+                       return;
+               }
+       } while (hw_state != HW_MACHINE_BOOT_DONE);
+
+       if (rev_id == 2)
+               W(RGF_PCIE_LOS_COUNTER_CTL, BIT(8));
+
+       C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+       wmb(); /* order is important here */
 
-       wil_dbg_misc(wil, "Reset completed\n");
+       wil_dbg_misc(wil, "Reset completed in %d ms\n", delay);
 
+#undef R
 #undef W
 #undef S
+#undef C
 }
 
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -234,11 +378,24 @@ int wil_reset(struct wil6210_priv *wil)
 {
        int rc;
 
+       WARN_ON(!mutex_is_locked(&wil->mutex));
+
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL);
 
+       wil->status = 0; /* prevent NAPI from being scheduled */
+       if (test_bit(wil_status_napi_en, &wil->status)) {
+               napi_synchronize(&wil->napi_rx);
+       }
+
+       if (wil->scan_request) {
+               wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
+                            wil->scan_request);
+               cfg80211_scan_done(wil->scan_request, true);
+               wil->scan_request = NULL;
+       }
+
        wil6210_disable_irq(wil);
-       wil->status = 0;
 
        wmi_event_flush(wil);
 
@@ -248,6 +405,8 @@ int wil_reset(struct wil6210_priv *wil)
        /* TODO: put MAC in reset */
        wil_target_reset(wil);
 
+       wil_rx_fini(wil);
+
        /* init after reset */
        wil->pending_connect_cid = -1;
        reinit_completion(&wil->wmi_ready);
@@ -261,6 +420,11 @@ int wil_reset(struct wil6210_priv *wil)
        return rc;
 }
 
+void wil_fw_error_recovery(struct wil6210_priv *wil)
+{
+       wil_dbg_misc(wil, "starting fw error recovery\n");
+       schedule_work(&wil->fw_error_worker);
+}
 
 void wil_link_on(struct wil6210_priv *wil)
 {
@@ -288,6 +452,8 @@ static int __wil_up(struct wil6210_priv *wil)
        struct wireless_dev *wdev = wil->wdev;
        int rc;
 
+       WARN_ON(!mutex_is_locked(&wil->mutex));
+
        rc = wil_reset(wil);
        if (rc)
                return rc;
@@ -329,6 +495,7 @@ static int __wil_up(struct wil6210_priv *wil)
 
        napi_enable(&wil->napi_rx);
        napi_enable(&wil->napi_tx);
+       set_bit(wil_status_napi_en, &wil->status);
 
        return 0;
 }
@@ -346,6 +513,9 @@ int wil_up(struct wil6210_priv *wil)
 
 static int __wil_down(struct wil6210_priv *wil)
 {
+       WARN_ON(!mutex_is_locked(&wil->mutex));
+
+       clear_bit(wil_status_napi_en, &wil->status);
        napi_disable(&wil->napi_rx);
        napi_disable(&wil->napi_tx);
 
@@ -370,3 +540,19 @@ int wil_down(struct wil6210_priv *wil)
 
        return rc;
 }
+
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
+{
+       int i;
+       int rc = -ENOENT;
+
+       for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+               if ((wil->sta[i].status != wil_sta_unused) &&
+                   ether_addr_equal(wil->sta[i].addr, mac)) {
+                       rc = i;
+                       break;
+               }
+       }
+
+       return rc;
+}
index 717178f09aa8e0a2f9eb5196eb1c8654d2295cb2..fdcaeb820e75857469ab4d0a6d854cdb4d6aa373 100644 (file)
@@ -127,8 +127,9 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
 
        ndev->netdev_ops = &wil_netdev_ops;
        ndev->ieee80211_ptr = wdev;
-       ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
-       ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+                           NETIF_F_SG | NETIF_F_GRO;
+       ndev->features |= ndev->hw_features;
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
 
index eeceab39cda22aee81e2fcbb86fc0c9520766a5f..f1e1bb338d681e71c96b130363bf04387c3ffa85 100644 (file)
@@ -41,39 +41,41 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
        switch (use_msi) {
        case 3:
        case 1:
+               wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+               break;
        case 0:
+               wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
                break;
        default:
-               wil_err(wil, "Invalid use_msi=%d, default to 1\n",
-                       use_msi);
+               wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
                use_msi = 1;
        }
-       wil->n_msi = use_msi;
-       if (wil->n_msi) {
-               wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
-               rc = pci_enable_msi_block(pdev, wil->n_msi);
-               if (rc && (wil->n_msi == 3)) {
-                       wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
-                       wil->n_msi = 1;
-                       rc = pci_enable_msi_block(pdev, wil->n_msi);
-               }
-               if (rc) {
-                       wil_err(wil, "pci_enable_msi failed, use INTx\n");
-                       wil->n_msi = 0;
-               }
-       } else {
-               wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+
+       if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
+               wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+               use_msi = 1;
+       }
+
+       if (use_msi == 1 && pci_enable_msi(pdev)) {
+               wil_err(wil, "pci_enable_msi failed, use INTx\n");
+               use_msi = 0;
        }
 
+       wil->n_msi = use_msi;
+
        rc = wil6210_init_irq(wil, pdev->irq);
        if (rc)
                goto stop_master;
 
        /* need reset here to obtain MAC */
+       mutex_lock(&wil->mutex);
        rc = wil_reset(wil);
+       mutex_unlock(&wil->mutex);
        if (rc)
                goto release_irq;
 
+       wil_info(wil, "HW version: 0x%08x\n", wil->hw_version);
+
        return 0;
 
  release_irq:
@@ -151,6 +153,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        pci_set_drvdata(pdev, wil);
        wil->pdev = pdev;
 
+       wil6210_clear_irq(wil);
        /* FW should raise IRQ when ready */
        rc = wil_if_pcie_enable(wil);
        if (rc) {
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
new file mode 100644 (file)
index 0000000..d04629f
--- /dev/null
@@ -0,0 +1,177 @@
+#include "wil6210.h"
+#include "txrx.h"
+
+#define SEQ_MODULO 0x1000
+#define SEQ_MASK   0xfff
+
+static inline int seq_less(u16 sq1, u16 sq2)
+{
+       return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
+}
+
+static inline u16 seq_inc(u16 sq)
+{
+       return (sq + 1) & SEQ_MASK;
+}
+
+static inline u16 seq_sub(u16 sq1, u16 sq2)
+{
+       return (sq1 - sq2) & SEQ_MASK;
+}
+
+static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq)
+{
+       return seq_sub(seq, r->ssn) % r->buf_size;
+}
+
+static void wil_release_reorder_frame(struct wil6210_priv *wil,
+                                     struct wil_tid_ampdu_rx *r,
+                                     int index)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct sk_buff *skb = r->reorder_buf[index];
+
+       if (!skb)
+               goto no_frame;
+
+       /* release the frame from the reorder ring buffer */
+       r->stored_mpdu_num--;
+       r->reorder_buf[index] = NULL;
+       wil_netif_rx_any(skb, ndev);
+
+no_frame:
+       r->head_seq_num = seq_inc(r->head_seq_num);
+}
+
+static void wil_release_reorder_frames(struct wil6210_priv *wil,
+                                      struct wil_tid_ampdu_rx *r,
+                                      u16 hseq)
+{
+       int index;
+
+       while (seq_less(r->head_seq_num, hseq)) {
+               index = reorder_index(r, r->head_seq_num);
+               wil_release_reorder_frame(wil, r, index);
+       }
+}
+
+static void wil_reorder_release(struct wil6210_priv *wil,
+                               struct wil_tid_ampdu_rx *r)
+{
+       int index = reorder_index(r, r->head_seq_num);
+
+       while (r->reorder_buf[index]) {
+               wil_release_reorder_frame(wil, r, index);
+               index = reorder_index(r, r->head_seq_num);
+       }
+}
+
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+       struct net_device *ndev = wil_to_ndev(wil);
+       struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+       int tid = wil_rxdesc_tid(d);
+       int cid = wil_rxdesc_cid(d);
+       int mid = wil_rxdesc_mid(d);
+       u16 seq = wil_rxdesc_seq(d);
+       struct wil_sta_info *sta = &wil->sta[cid];
+       struct wil_tid_ampdu_rx *r = sta->tid_rx[tid];
+       u16 hseq;
+       int index;
+
+       wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x\n",
+                    mid, cid, tid, seq);
+
+       if (!r) {
+               wil_netif_rx_any(skb, ndev);
+               return;
+       }
+
+       hseq = r->head_seq_num;
+
+       spin_lock(&r->reorder_lock);
+
+       /* frame with out of date sequence number */
+       if (seq_less(seq, r->head_seq_num)) {
+               dev_kfree_skb(skb);
+               goto out;
+       }
+
+       /*
+        * If frame the sequence number exceeds our buffering window
+        * size release some previous frames to make room for this one.
+        */
+       if (!seq_less(seq, r->head_seq_num + r->buf_size)) {
+               hseq = seq_inc(seq_sub(seq, r->buf_size));
+               /* release stored frames up to new head to stack */
+               wil_release_reorder_frames(wil, r, hseq);
+       }
+
+       /* Now the new frame is always in the range of the reordering buffer */
+
+       index = reorder_index(r, seq);
+
+       /* check if we already stored this frame */
+       if (r->reorder_buf[index]) {
+               dev_kfree_skb(skb);
+               goto out;
+       }
+
+       /*
+        * If the current MPDU is in the right order and nothing else
+        * is stored we can process it directly, no need to buffer it.
+        * If it is first but there's something stored, we may be able
+        * to release frames after this one.
+        */
+       if (seq == r->head_seq_num && r->stored_mpdu_num == 0) {
+               r->head_seq_num = seq_inc(r->head_seq_num);
+               wil_netif_rx_any(skb, ndev);
+               goto out;
+       }
+
+       /* put the frame in the reordering buffer */
+       r->reorder_buf[index] = skb;
+       r->reorder_time[index] = jiffies;
+       r->stored_mpdu_num++;
+       wil_reorder_release(wil, r);
+
+out:
+       spin_unlock(&r->reorder_lock);
+}
+
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+                                               int size, u16 ssn)
+{
+       struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL);
+       if (!r)
+               return NULL;
+
+       r->reorder_buf =
+               kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL);
+       r->reorder_time =
+               kcalloc(size, sizeof(unsigned long), GFP_KERNEL);
+       if (!r->reorder_buf || !r->reorder_time) {
+               kfree(r->reorder_buf);
+               kfree(r->reorder_time);
+               kfree(r);
+               return NULL;
+       }
+
+       spin_lock_init(&r->reorder_lock);
+       r->ssn = ssn;
+       r->head_seq_num = ssn;
+       r->buf_size = size;
+       r->stored_mpdu_num = 0;
+       return r;
+}
+
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+                          struct wil_tid_ampdu_rx *r)
+{
+       if (!r)
+               return;
+       wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size);
+       kfree(r->reorder_buf);
+       kfree(r->reorder_time);
+       kfree(r);
+}
index 0b0975d88b431644f172514442eea9dd272a1d38..c8c547457eb4f73dd5751df4324c5b41f18971e2 100644 (file)
@@ -104,6 +104,23 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
        return 0;
 }
 
+static void wil_txdesc_unmap(struct device *dev, struct vring_tx_desc *d,
+                            struct wil_ctx *ctx)
+{
+       dma_addr_t pa = wil_desc_addr(&d->dma.addr);
+       u16 dmalen = le16_to_cpu(d->dma.length);
+       switch (ctx->mapped_as) {
+       case wil_mapped_as_single:
+               dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+               break;
+       case wil_mapped_as_page:
+               dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+               break;
+       default:
+               break;
+       }
+}
+
 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
                           int tx)
 {
@@ -122,15 +139,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
 
                        ctx = &vring->ctx[vring->swtail];
                        *d = *_d;
-                       pa = wil_desc_addr(&d->dma.addr);
-                       dmalen = le16_to_cpu(d->dma.length);
-                       if (vring->ctx[vring->swtail].mapped_as_page) {
-                               dma_unmap_page(dev, pa, dmalen,
-                                              DMA_TO_DEVICE);
-                       } else {
-                               dma_unmap_single(dev, pa, dmalen,
-                                                DMA_TO_DEVICE);
-                       }
+                       wil_txdesc_unmap(dev, d, ctx);
                        if (ctx->skb)
                                dev_kfree_skb_any(ctx->skb);
                        vring->swtail = wil_vring_next_tail(vring);
@@ -344,6 +353,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        u16 dmalen;
        u8 ftype;
        u8 ds_bits;
+       int cid;
+       struct wil_net_stats *stats;
+
 
        BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
 
@@ -383,8 +395,10 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
        wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
                          skb->data, skb_headlen(skb), false);
 
-
-       wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
+       cid = wil_rxdesc_cid(d);
+       stats = &wil->sta[cid].stats;
+       stats->last_mcs_rx = wil_rxdesc_mcs(d);
+       wil->stats.last_mcs_rx = stats->last_mcs_rx;
 
        /* use radiotap header only if required */
        if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
@@ -472,21 +486,28 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
  * Pass Rx packet to the netif. Update statistics.
  * Called in softirq context (NAPI poll).
  */
-static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
 {
-       int rc;
+       gro_result_t rc;
+       struct wil6210_priv *wil = ndev_to_wil(ndev);
        unsigned int len = skb->len;
+       struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+       int cid = wil_rxdesc_cid(d);
+       struct wil_net_stats *stats = &wil->sta[cid].stats;
 
        skb_orphan(skb);
 
-       rc = netif_receive_skb(skb);
+       rc = napi_gro_receive(&wil->napi_rx, skb);
 
-       if (likely(rc == NET_RX_SUCCESS)) {
+       if (unlikely(rc == GRO_DROP)) {
+               ndev->stats.rx_dropped++;
+               stats->rx_dropped++;
+               wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+       } else {
                ndev->stats.rx_packets++;
+               stats->rx_packets++;
                ndev->stats.rx_bytes += len;
-
-       } else {
-               ndev->stats.rx_dropped++;
+               stats->rx_bytes += len;
        }
 }
 
@@ -515,12 +536,18 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        skb->pkt_type = PACKET_OTHERHOST;
                        skb->protocol = htons(ETH_P_802_2);
-
+                       wil_netif_rx_any(skb, ndev);
                } else {
+                       struct ethhdr *eth = (void *)skb->data;
+
                        skb->protocol = eth_type_trans(skb, ndev);
+
+                       if (is_unicast_ether_addr(eth->h_dest))
+                               wil_rx_reorder(wil, skb);
+                       else
+                               wil_netif_rx_any(skb, ndev);
                }
 
-               wil_netif_rx_any(skb, ndev);
        }
        wil_rx_refill(wil, v->size);
 }
@@ -530,6 +557,11 @@ int wil_rx_init(struct wil6210_priv *wil)
        struct vring *vring = &wil->vring_rx;
        int rc;
 
+       if (vring->va) {
+               wil_err(wil, "Rx ring already allocated\n");
+               return -EINVAL;
+       }
+
        vring->size = WIL6210_RX_RING_SIZE;
        rc = wil_vring_alloc(wil, vring);
        if (rc)
@@ -570,7 +602,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
                                .ring_size = cpu_to_le16(size),
                        },
                        .ringid = id,
-                       .cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
+                       .cidxtid = mk_cidxtid(cid, tid),
                        .encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
                        .mac_ctrl = 0,
                        .to_resolution = 0,
@@ -586,6 +618,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
                struct wmi_vring_cfg_done_event cmd;
        } __packed reply;
        struct vring *vring = &wil->vring_tx[id];
+       struct vring_tx_data *txdata = &wil->vring_tx_data[id];
 
        if (vring->va) {
                wil_err(wil, "Tx ring [%d] already allocated\n", id);
@@ -593,11 +626,15 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
                goto out;
        }
 
+       memset(txdata, 0, sizeof(*txdata));
        vring->size = size;
        rc = wil_vring_alloc(wil, vring);
        if (rc)
                goto out;
 
+       wil->vring2cid_tid[id][0] = cid;
+       wil->vring2cid_tid[id][1] = tid;
+
        cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
        rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
@@ -613,6 +650,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
        }
        vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
 
+       txdata->enabled = 1;
+
        return 0;
  out_free:
        wil_vring_free(wil, vring, 1);
@@ -625,23 +664,116 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
 {
        struct vring *vring = &wil->vring_tx[id];
 
+       WARN_ON(!mutex_is_locked(&wil->mutex));
+
        if (!vring->va)
                return;
 
+       /* make sure NAPI won't touch this vring */
+       wil->vring_tx_data[id].enabled = 0;
+       if (test_bit(wil_status_napi_en, &wil->status))
+               napi_synchronize(&wil->napi_tx);
+
        wil_vring_free(wil, vring, 1);
 }
 
 static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
                                       struct sk_buff *skb)
 {
-       struct vring *v = &wil->vring_tx[0];
+       int i;
+       struct ethhdr *eth = (void *)skb->data;
+       int cid = wil_find_cid(wil, eth->h_dest);
+
+       if (cid < 0)
+               return NULL;
 
-       if (v->va)
-               return v;
+       if (!wil->sta[cid].data_port_open &&
+           (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+               return NULL;
+
+       /* TODO: fix for multiple TID */
+       for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+               if (wil->vring2cid_tid[i][0] == cid) {
+                       struct vring *v = &wil->vring_tx[i];
+                       wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
+                                    __func__, eth->h_dest, i);
+                       if (v->va) {
+                               return v;
+                       } else {
+                               wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+                               return NULL;
+                       }
+               }
+       }
 
        return NULL;
 }
 
+static void wil_set_da_for_vring(struct wil6210_priv *wil,
+                                struct sk_buff *skb, int vring_index)
+{
+       struct ethhdr *eth = (void *)skb->data;
+       int cid = wil->vring2cid_tid[vring_index][0];
+       memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN);
+}
+
+static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
+                       struct sk_buff *skb);
+/*
+ * Find 1-st vring and return it; set dest address for this vring in skb
+ * duplicate skb and send it to other active vrings
+ */
+static struct vring *wil_tx_bcast(struct wil6210_priv *wil,
+                                      struct sk_buff *skb)
+{
+       struct vring *v, *v2;
+       struct sk_buff *skb2;
+       int i;
+       u8 cid;
+
+       /* find 1-st vring eligible for data */
+       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
+               v = &wil->vring_tx[i];
+               if (!v->va)
+                       continue;
+
+               cid = wil->vring2cid_tid[i][0];
+               if (!wil->sta[cid].data_port_open)
+                       continue;
+
+               goto found;
+       }
+
+       wil_err(wil, "Tx while no vrings active?\n");
+
+       return NULL;
+
+found:
+       wil_dbg_txrx(wil, "BCAST -> ring %d\n", i);
+       wil_set_da_for_vring(wil, skb, i);
+
+       /* find other active vrings and duplicate skb for each */
+       for (i++; i < WIL6210_MAX_TX_RINGS; i++) {
+               v2 = &wil->vring_tx[i];
+               if (!v2->va)
+                       continue;
+               cid = wil->vring2cid_tid[i][0];
+               if (!wil->sta[cid].data_port_open)
+                       continue;
+
+               skb2 = skb_copy(skb, GFP_ATOMIC);
+               if (skb2) {
+                       wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
+                       wil_set_da_for_vring(wil, skb2, i);
+                       wil_tx_vring(wil, v2, skb2);
+               } else {
+                       wil_err(wil, "skb_copy failed\n");
+               }
+       }
+
+       return v;
+}
+
 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
                           int vring_index)
 {
@@ -667,6 +799,13 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
        return 0;
 }
 
+static inline
+void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
+{
+       d->mac.d[2] |= ((nr_frags + 1) <<
+                      MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+}
+
 static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
                                struct vring_tx_desc *d,
                                struct sk_buff *skb)
@@ -731,8 +870,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
 
        wil_dbg_txrx(wil, "%s()\n", __func__);
 
-       if (avail < vring->size/8)
-               netif_tx_stop_all_queues(wil_to_ndev(wil));
        if (avail < 1 + nr_frags) {
                wil_err(wil, "Tx ring full. No space for %d fragments\n",
                        1 + nr_frags);
@@ -740,9 +877,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        }
        _d = &(vring->va[i].tx);
 
-       /* FIXME FW can accept only unicast frames for the peer */
-       memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
-
        pa = dma_map_single(dev, skb->data,
                        skb_headlen(skb), DMA_TO_DEVICE);
 
@@ -753,6 +887,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
 
        if (unlikely(dma_mapping_error(dev, pa)))
                return -EINVAL;
+       vring->ctx[i].mapped_as = wil_mapped_as_single;
        /* 1-st segment */
        wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
        /* Process TCP/UDP checksum offloading */
@@ -762,8 +897,8 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                goto dma_error;
        }
 
-       d->mac.d[2] |= ((nr_frags + 1) <<
-                      MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+       vring->ctx[i].nr_frags = nr_frags;
+       wil_tx_desc_set_nr_frags(d, nr_frags);
        if (nr_frags)
                *_d = *d;
 
@@ -778,8 +913,13 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                                DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(dev, pa)))
                        goto dma_error;
+               vring->ctx[i].mapped_as = wil_mapped_as_page;
                wil_tx_desc_map(d, pa, len, vring_index);
-               vring->ctx[i].mapped_as_page = 1;
+               /* no need to check return code -
+                * if it succeeded for 1-st descriptor,
+                * it will succeed here too
+                */
+               wil_tx_desc_offload_cksum_set(wil, d, skb);
                *_d = *d;
        }
        /* for the last seg only */
@@ -808,7 +948,6 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        /* unmap what we have mapped */
        nr_frags = f + 1; /* frags mapped + one for skb head */
        for (f = 0; f < nr_frags; f++) {
-               u16 dmalen;
                struct wil_ctx *ctx;
 
                i = (swhead + f) % vring->size;
@@ -816,12 +955,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                _d = &(vring->va[i].tx);
                *d = *_d;
                _d->dma.status = TX_DMA_STATUS_DU;
-               pa = wil_desc_addr(&d->dma.addr);
-               dmalen = le16_to_cpu(d->dma.length);
-               if (ctx->mapped_as_page)
-                       dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
-               else
-                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+               wil_txdesc_unmap(dev, d, ctx);
 
                if (ctx->skb)
                        dev_kfree_skb_any(ctx->skb);
@@ -836,12 +970,17 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
+       struct ethhdr *eth = (void *)skb->data;
        struct vring *vring;
+       static bool pr_once_fw;
        int rc;
 
        wil_dbg_txrx(wil, "%s()\n", __func__);
        if (!test_bit(wil_status_fwready, &wil->status)) {
-               wil_err(wil, "FW not ready\n");
+               if (!pr_once_fw) {
+                       wil_err(wil, "FW not ready\n");
+                       pr_once_fw = true;
+               }
                goto drop;
        }
        if (!test_bit(wil_status_fwconnected, &wil->status)) {
@@ -852,16 +991,25 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                wil_err(wil, "Xmit in monitor mode not supported\n");
                goto drop;
        }
+       pr_once_fw = false;
 
        /* find vring */
-       vring = wil_find_tx_vring(wil, skb);
+       if (is_unicast_ether_addr(eth->h_dest)) {
+               vring = wil_find_tx_vring(wil, skb);
+       } else {
+               vring = wil_tx_bcast(wil, skb);
+       }
        if (!vring) {
-               wil_err(wil, "No Tx VRING available\n");
+               wil_err(wil, "No Tx VRING found for %pM\n", eth->h_dest);
                goto drop;
        }
        /* set up vring entry */
        rc = wil_tx_vring(wil, vring, skb);
 
+       /* do we still have enough room in the vring? */
+       if (wil_vring_avail_tx(vring) < vring->size/8)
+               netif_tx_stop_all_queues(wil_to_ndev(wil));
+
        switch (rc) {
        case 0:
                /* statistics will be updated on the tx_complete */
@@ -891,64 +1039,82 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
        struct net_device *ndev = wil_to_ndev(wil);
        struct device *dev = wil_to_dev(wil);
        struct vring *vring = &wil->vring_tx[ringid];
+       struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
        int done = 0;
+       int cid = wil->vring2cid_tid[ringid][0];
+       struct wil_net_stats *stats = &wil->sta[cid].stats;
+       volatile struct vring_tx_desc *_d;
 
        if (!vring->va) {
                wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
                return 0;
        }
 
+       if (!txdata->enabled) {
+               wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid);
+               return 0;
+       }
+
        wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
 
        while (!wil_vring_is_empty(vring)) {
-               volatile struct vring_tx_desc *_d =
-                                             &vring->va[vring->swtail].tx;
-               struct vring_tx_desc dd, *d = &dd;
-               dma_addr_t pa;
-               u16 dmalen;
+               int new_swtail;
                struct wil_ctx *ctx = &vring->ctx[vring->swtail];
-               struct sk_buff *skb = ctx->skb;
-
-               *d = *_d;
+               /**
+                * For the fragmented skb, HW will set DU bit only for the
+                * last fragment. look for it
+                */
+               int lf = (vring->swtail + ctx->nr_frags) % vring->size;
+               /* TODO: check we are not past head */
 
-               if (!(d->dma.status & TX_DMA_STATUS_DU))
+               _d = &vring->va[lf].tx;
+               if (!(_d->dma.status & TX_DMA_STATUS_DU))
                        break;
 
-               dmalen = le16_to_cpu(d->dma.length);
-               trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
-                                     d->dma.error);
-               wil_dbg_txrx(wil,
-                            "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
-                            vring->swtail, dmalen, d->dma.status,
-                            d->dma.error);
-               wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
-                                 (const void *)d, sizeof(*d), false);
-
-               pa = wil_desc_addr(&d->dma.addr);
-               if (ctx->mapped_as_page)
-                       dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
-               else
-                       dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
-
-               if (skb) {
-                       if (d->dma.error == 0) {
-                               ndev->stats.tx_packets++;
-                               ndev->stats.tx_bytes += skb->len;
-                       } else {
-                               ndev->stats.tx_errors++;
-                       }
+               new_swtail = (lf + 1) % vring->size;
+               while (vring->swtail != new_swtail) {
+                       struct vring_tx_desc dd, *d = &dd;
+                       u16 dmalen;
+                       struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+                       struct sk_buff *skb = ctx->skb;
+                       _d = &vring->va[vring->swtail].tx;
+
+                       *d = *_d;
+
+                       dmalen = le16_to_cpu(d->dma.length);
+                       trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
+                                             d->dma.error);
+                       wil_dbg_txrx(wil,
+                                    "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
+                                    vring->swtail, dmalen, d->dma.status,
+                                    d->dma.error);
+                       wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
+                                         (const void *)d, sizeof(*d), false);
+
+                       wil_txdesc_unmap(dev, d, ctx);
+
+                       if (skb) {
+                               if (d->dma.error == 0) {
+                                       ndev->stats.tx_packets++;
+                                       stats->tx_packets++;
+                                       ndev->stats.tx_bytes += skb->len;
+                                       stats->tx_bytes += skb->len;
+                               } else {
+                                       ndev->stats.tx_errors++;
+                                       stats->tx_errors++;
+                               }
 
-                       dev_kfree_skb_any(skb);
+                               dev_kfree_skb_any(skb);
+                       }
+                       memset(ctx, 0, sizeof(*ctx));
+                       /* There is no need to touch HW descriptor:
+                        * - ststus bit TX_DMA_STATUS_DU is set by design,
+                        *   so hardware will not try to process this desc.,
+                        * - rest of descriptor will be initialized on Tx.
+                        */
+                       vring->swtail = wil_vring_next_tail(vring);
+                       done++;
                }
-               memset(ctx, 0, sizeof(*ctx));
-               /*
-                * There is no need to touch HW descriptor:
-                * - ststus bit TX_DMA_STATUS_DU is set by design,
-                *   so hardware will not try to process this desc.,
-                * - rest of descriptor will be initialized on Tx.
-                */
-               vring->swtail = wil_vring_next_tail(vring);
-               done++;
        }
        if (wil_vring_avail_tx(vring) > vring->size/4)
                netif_tx_wake_all_queues(wil_to_ndev(wil));
index b3828279204c97d4057c7fc8253b5c54d43deed5..bc5706a4f007b11eadd4b53faece30b45e49266f 100644 (file)
@@ -436,4 +436,11 @@ static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb)
        return (void *)skb->cb;
 }
 
+void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
+void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb);
+struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil,
+                                               int size, u16 ssn);
+void wil_tid_ampdu_rx_free(struct wil6210_priv *wil,
+                          struct wil_tid_ampdu_rx *r);
+
 #endif /* WIL6210_TXRX_H */
index 1f91eaf95bbebd0dfd70d316943558c5882ed54d..2a2dec75f02606c9ea71ecd95d846287520157d5 100644 (file)
@@ -74,23 +74,21 @@ struct RGF_ICR {
 } __packed;
 
 /* registers - FW addresses */
-#define RGF_USER_USER_SCRATCH_PAD      (0x8802bc)
-#define RGF_USER_USER_ICR              (0x880b4c) /* struct RGF_ICR */
-       #define BIT_USER_USER_ICR_SW_INT_2      BIT(18)
-#define RGF_USER_CLKS_CTL_SW_RST_MASK_0        (0x880b14)
-#define RGF_USER_MAC_CPU_0             (0x8801fc)
+#define RGF_USER_HW_MACHINE_STATE      (0x8801dc)
+       #define HW_MACHINE_BOOT_DONE    (0x3fffffd)
 #define RGF_USER_USER_CPU_0            (0x8801e0)
+#define RGF_USER_MAC_CPU_0             (0x8801fc)
+#define RGF_USER_USER_SCRATCH_PAD      (0x8802bc)
+#define RGF_USER_FW_REV_ID             (0x880a8c) /* chip revision */
+#define RGF_USER_CLKS_CTL_0            (0x880abc)
+       #define BIT_USER_CLKS_RST_PWGD  BIT(11) /* reset on "power good" */
 #define RGF_USER_CLKS_CTL_SW_RST_VEC_0 (0x880b04)
 #define RGF_USER_CLKS_CTL_SW_RST_VEC_1 (0x880b08)
 #define RGF_USER_CLKS_CTL_SW_RST_VEC_2 (0x880b0c)
 #define RGF_USER_CLKS_CTL_SW_RST_VEC_3 (0x880b10)
-
-#define RGF_DMA_PSEUDO_CAUSE           (0x881c68)
-#define RGF_DMA_PSEUDO_CAUSE_MASK_SW   (0x881c6c)
-#define RGF_DMA_PSEUDO_CAUSE_MASK_FW   (0x881c70)
-       #define BIT_DMA_PSEUDO_CAUSE_RX         BIT(0)
-       #define BIT_DMA_PSEUDO_CAUSE_TX         BIT(1)
-       #define BIT_DMA_PSEUDO_CAUSE_MISC       BIT(2)
+#define RGF_USER_CLKS_CTL_SW_RST_MASK_0        (0x880b14)
+#define RGF_USER_USER_ICR              (0x880b4c) /* struct RGF_ICR */
+       #define BIT_USER_USER_ICR_SW_INT_2      BIT(18)
 
 #define RGF_DMA_EP_TX_ICR              (0x881bb4) /* struct RGF_ICR */
        #define BIT_DMA_EP_TX_ICR_TX_DONE       BIT(0)
@@ -105,13 +103,22 @@ struct RGF_ICR {
 /* Interrupt moderation control */
 #define RGF_DMA_ITR_CNT_TRSH           (0x881c5c)
 #define RGF_DMA_ITR_CNT_DATA           (0x881c60)
-#define RGF_DMA_ITR_CNT_CRL            (0x881C64)
+#define RGF_DMA_ITR_CNT_CRL            (0x881c64)
        #define BIT_DMA_ITR_CNT_CRL_EN          BIT(0)
        #define BIT_DMA_ITR_CNT_CRL_EXT_TICK    BIT(1)
        #define BIT_DMA_ITR_CNT_CRL_FOREVER     BIT(2)
        #define BIT_DMA_ITR_CNT_CRL_CLR         BIT(3)
        #define BIT_DMA_ITR_CNT_CRL_REACH_TRSH  BIT(4)
 
+#define RGF_DMA_PSEUDO_CAUSE           (0x881c68)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_SW   (0x881c6c)
+#define RGF_DMA_PSEUDO_CAUSE_MASK_FW   (0x881c70)
+       #define BIT_DMA_PSEUDO_CAUSE_RX         BIT(0)
+       #define BIT_DMA_PSEUDO_CAUSE_TX         BIT(1)
+       #define BIT_DMA_PSEUDO_CAUSE_MISC       BIT(2)
+
+#define RGF_PCIE_LOS_COUNTER_CTL       (0x882dc4)
+
 /* popular locations */
 #define HOST_MBOX   HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
 #define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
@@ -125,6 +132,31 @@ struct RGF_ICR {
 
 /* Hardware definitions end */
 
+/**
+ * mk_cidxtid - construct @cidxtid field
+ * @cid: CID value
+ * @tid: TID value
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline u8 mk_cidxtid(u8 cid, u8 tid)
+{
+       return ((tid & 0xf) << 4) | (cid & 0xf);
+}
+
+/**
+ * parse_cidxtid - parse @cidxtid field
+ * @cid: store CID value here
+ * @tid: store TID value here
+ *
+ * @cidxtid field encoded as bits 0..3 - CID; 4..7 - TID
+ */
+static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid)
+{
+       *cid = cidxtid & 0xf;
+       *tid = (cidxtid >> 4) & 0xf;
+}
+
 struct wil6210_mbox_ring {
        u32 base;
        u16 entry_size; /* max. size of mbox entry, incl. all headers */
@@ -184,12 +216,19 @@ struct pending_wmi_event {
        } __packed event;
 };
 
+enum { /* for wil_ctx.mapped_as */
+       wil_mapped_as_none = 0,
+       wil_mapped_as_single = 1,
+       wil_mapped_as_page = 2,
+};
+
 /**
  * struct wil_ctx - software context for Vring descriptor
  */
 struct wil_ctx {
        struct sk_buff *skb;
-       u8 mapped_as_page:1;
+       u8 nr_frags;
+       u8 mapped_as;
 };
 
 union vring_desc;
@@ -204,6 +243,14 @@ struct vring {
        struct wil_ctx *ctx; /* ctx[size] - software context */
 };
 
+/**
+ * Additional data for Tx Vring
+ */
+struct vring_tx_data {
+       int enabled;
+
+};
+
 enum { /* for wil6210_priv.status */
        wil_status_fwready = 0,
        wil_status_fwconnecting,
@@ -211,10 +258,51 @@ enum { /* for wil6210_priv.status */
        wil_status_dontscan,
        wil_status_reset_done,
        wil_status_irqen, /* FIXME: interrupts enabled - for debug */
+       wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
 };
 
 struct pci_dev;
 
+/**
+ * struct tid_ampdu_rx - TID aggregation information (Rx).
+ *
+ * @reorder_buf: buffer to reorder incoming aggregated MPDUs
+ * @reorder_time: jiffies when skb was added
+ * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
+ * @reorder_timer: releases expired frames from the reorder buffer.
+ * @last_rx: jiffies of last rx activity
+ * @head_seq_num: head sequence number in reordering buffer.
+ * @stored_mpdu_num: number of MPDUs in reordering buffer
+ * @ssn: Starting Sequence Number expected to be aggregated.
+ * @buf_size: buffer size for incoming A-MPDUs
+ * @timeout: reset timer value (in TUs).
+ * @dialog_token: dialog token for aggregation session
+ * @rcu_head: RCU head used for freeing this struct
+ * @reorder_lock: serializes access to reorder buffer, see below.
+ *
+ * This structure's lifetime is managed by RCU, assignments to
+ * the array holding it must hold the aggregation mutex.
+ *
+ * The @reorder_lock is used to protect the members of this
+ * struct, except for @timeout, @buf_size and @dialog_token,
+ * which are constant across the lifetime of the struct (the
+ * dialog token being used only for debugging).
+ */
+struct wil_tid_ampdu_rx {
+       spinlock_t reorder_lock; /* see above */
+       struct sk_buff **reorder_buf;
+       unsigned long *reorder_time;
+       struct timer_list session_timer;
+       struct timer_list reorder_timer;
+       unsigned long last_rx;
+       u16 head_seq_num;
+       u16 stored_mpdu_num;
+       u16 ssn;
+       u16 buf_size;
+       u16 timeout;
+       u8 dialog_token;
+};
+
 struct wil6210_stats {
        u64 tsf;
        u32 snr;
@@ -226,6 +314,43 @@ struct wil6210_stats {
        u16 peer_tx_sector;
 };
 
+enum wil_sta_status {
+       wil_sta_unused = 0,
+       wil_sta_conn_pending = 1,
+       wil_sta_connected = 2,
+};
+
+#define WIL_STA_TID_NUM (16)
+
+struct wil_net_stats {
+       unsigned long   rx_packets;
+       unsigned long   tx_packets;
+       unsigned long   rx_bytes;
+       unsigned long   tx_bytes;
+       unsigned long   tx_errors;
+       unsigned long   rx_dropped;
+       u16 last_mcs_rx;
+};
+
+/**
+ * struct wil_sta_info - data for peer
+ *
+ * Peer identified by its CID (connection ID)
+ * NIC performs beam forming for each peer;
+ * if no beam forming done, frame exchange is not
+ * possible.
+ */
+struct wil_sta_info {
+       u8 addr[ETH_ALEN];
+       enum wil_sta_status status;
+       struct wil_net_stats stats;
+       bool data_port_open; /* can send any data, not only EAPOL */
+       /* Rx BACK */
+       struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
+       unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+       unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+};
+
 struct wil6210_priv {
        struct pci_dev *pdev;
        int n_msi;
@@ -233,6 +358,7 @@ struct wil6210_priv {
        void __iomem *csr;
        ulong status;
        u32 fw_version;
+       u32 hw_version;
        u8 n_mids; /* number of additional MIDs as reported by FW */
        /* profile */
        u32 monitor_flags;
@@ -253,6 +379,7 @@ struct wil6210_priv {
        struct workqueue_struct *wmi_wq_conn; /* for connect worker */
        struct work_struct connect_worker;
        struct work_struct disconnect_worker;
+       struct work_struct fw_error_worker;     /* for FW error recovery */
        struct timer_list connect_timer;
        int pending_connect_cid;
        struct list_head pending_wmi_ev;
@@ -267,7 +394,9 @@ struct wil6210_priv {
        /* DMA related */
        struct vring vring_rx;
        struct vring vring_tx[WIL6210_MAX_TX_RINGS];
-       u8 dst_addr[WIL6210_MAX_TX_RINGS][ETH_ALEN];
+       struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS];
+       u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */
+       struct wil_sta_info sta[WIL6210_MAX_CID];
        /* scan */
        struct cfg80211_scan_request *scan_request;
 
@@ -329,11 +458,13 @@ void wil_if_remove(struct wil6210_priv *wil);
 int wil_priv_init(struct wil6210_priv *wil);
 void wil_priv_deinit(struct wil6210_priv *wil);
 int wil_reset(struct wil6210_priv *wil);
+void wil_fw_error_recovery(struct wil6210_priv *wil);
 void wil_link_on(struct wil6210_priv *wil);
 void wil_link_off(struct wil6210_priv *wil);
 int wil_up(struct wil6210_priv *wil);
 int wil_down(struct wil6210_priv *wil);
 void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r);
+int wil_find_cid(struct wil6210_priv *wil, const u8 *mac);
 
 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr);
 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr);
@@ -357,8 +488,11 @@ int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
 int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
+int wmi_rxon(struct wil6210_priv *wil, bool on);
 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason);
 
+void wil6210_clear_irq(struct wil6210_priv *wil);
 int wil6210_init_irq(struct wil6210_priv *wil, int irq);
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
 void wil6210_disable_irq(struct wil6210_priv *wil);
index 063963ee422a497ff3ec7dd1dce815f8411690b7..2ba56eef0c457d4397c27fa84a1291b011d3884f 100644 (file)
@@ -307,14 +307,14 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
        u32 freq = ieee80211_channel_to_frequency(ch_no,
                        IEEE80211_BAND_60GHZ);
        struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
-       /* TODO convert LE to CPU */
-       s32 signal = 0; /* TODO */
+       s32 signal = data->info.sqi;
        __le16 fc = rx_mgmt_frame->frame_control;
        u32 d_len = le32_to_cpu(data->info.len);
        u16 d_status = le16_to_cpu(data->info.status);
 
-       wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
-                   data->info.channel, data->info.mcs, data->info.snr);
+       wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+                   data->info.channel, data->info.mcs, data->info.snr,
+                   data->info.sqi);
        wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
                    le16_to_cpu(fc));
        wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
@@ -384,6 +384,11 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
                        evt->assoc_req_len, evt->assoc_resp_len);
                return;
        }
+       if (evt->cid >= WIL6210_MAX_CID) {
+               wil_err(wil, "Connect CID invalid : %d\n", evt->cid);
+               return;
+       }
+
        ch = evt->channel + 1;
        wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
                    evt->bssid, ch, evt->cid);
@@ -439,7 +444,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 
        /* FIXME FW can transmit only ucast frames to peer */
        /* FIXME real ring_id instead of hard coded 0 */
-       memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
+       memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN);
+       wil->sta[evt->cid].status = wil_sta_conn_pending;
 
        wil->pending_connect_cid = evt->cid;
        queue_work(wil->wmi_wq_conn, &wil->connect_worker);
@@ -456,7 +462,9 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
 
        wil->sinfo_gen++;
 
+       mutex_lock(&wil->mutex);
        wil6210_disconnect(wil, evt->bssid);
+       mutex_unlock(&wil->mutex);
 }
 
 static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
@@ -476,11 +484,11 @@ static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
        wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
        wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
        wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
-                   "BF status 0x%08x SNR 0x%08x\n"
+                   "BF status 0x%08x SNR 0x%08x SQI %d%%\n"
                    "Tx Tpt %d goodput %d Rx goodput %d\n"
                    "Sectors(rx:tx) my %d:%d peer %d:%d\n",
                    wil->stats.bf_mcs, wil->stats.tsf, evt->status,
-                   wil->stats.snr, le32_to_cpu(evt->tx_tpt),
+                   wil->stats.snr, evt->sqi, le32_to_cpu(evt->tx_tpt),
                    le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
                    wil->stats.my_rx_sector, wil->stats.my_tx_sector,
                    wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
@@ -499,10 +507,16 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
        int sz = eapol_len + ETH_HLEN;
        struct sk_buff *skb;
        struct ethhdr *eth;
+       int cid;
+       struct wil_net_stats *stats = NULL;
 
        wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
                    evt->src_mac);
 
+       cid = wil_find_cid(wil, evt->src_mac);
+       if (cid >= 0)
+               stats = &wil->sta[cid].stats;
+
        if (eapol_len > 196) { /* TODO: revisit size limit */
                wil_err(wil, "EAPOL too large\n");
                return;
@@ -513,6 +527,7 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
                wil_err(wil, "Failed to allocate skb\n");
                return;
        }
+
        eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
        memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
        memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
@@ -521,9 +536,15 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
        skb->protocol = eth_type_trans(skb, ndev);
        if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
                ndev->stats.rx_packets++;
-               ndev->stats.rx_bytes += skb->len;
+               ndev->stats.rx_bytes += sz;
+               if (stats) {
+                       stats->rx_packets++;
+                       stats->rx_bytes += sz;
+               }
        } else {
                ndev->stats.rx_dropped++;
+               if (stats)
+                       stats->rx_dropped++;
        }
 }
 
@@ -531,9 +552,16 @@ static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
 {
        struct net_device *ndev = wil_to_ndev(wil);
        struct wmi_data_port_open_event *evt = d;
+       u8 cid = evt->cid;
 
-       wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
+       wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
 
+       if (cid >= ARRAY_SIZE(wil->sta)) {
+               wil_err(wil, "Link UP for invalid CID %d\n", cid);
+               return;
+       }
+
+       wil->sta[cid].data_port_open = true;
        netif_carrier_on(ndev);
 }
 
@@ -541,10 +569,17 @@ static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
 {
        struct net_device *ndev = wil_to_ndev(wil);
        struct wmi_wbe_link_down_event *evt = d;
+       u8 cid = evt->cid;
 
        wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
-                   evt->cid, le32_to_cpu(evt->reason));
+                   cid, le32_to_cpu(evt->reason));
+
+       if (cid >= ARRAY_SIZE(wil->sta)) {
+               wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
+               return;
+       }
 
+       wil->sta[cid].data_port_open = false;
        netif_carrier_off(ndev);
 }
 
@@ -552,10 +587,42 @@ static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
                              int len)
 {
        struct wmi_vring_ba_status_event *evt = d;
+       struct wil_sta_info *sta;
+       uint i, cid;
+
+       /* TODO: use Rx BA status, not Tx one */
 
        wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
-                   evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
-                   __le16_to_cpu(evt->ba_timeout));
+                   evt->ringid,
+                   evt->status == WMI_BA_AGREED ? "OK" : "N/A",
+                   evt->agg_wsize, __le16_to_cpu(evt->ba_timeout));
+
+       if (evt->ringid >= WIL6210_MAX_TX_RINGS) {
+               wil_err(wil, "invalid ring id %d\n", evt->ringid);
+               return;
+       }
+
+       cid = wil->vring2cid_tid[evt->ringid][0];
+       if (cid >= WIL6210_MAX_CID) {
+               wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid);
+               return;
+       }
+
+       sta = &wil->sta[cid];
+       if (sta->status == wil_sta_unused) {
+               wil_err(wil, "CID %d unused\n", cid);
+               return;
+       }
+
+       wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr);
+       for (i = 0; i < WIL_STA_TID_NUM; i++) {
+               struct wil_tid_ampdu_rx *r = sta->tid_rx[i];
+               sta->tid_rx[i] = NULL;
+               wil_tid_ampdu_rx_free(wil, r);
+               if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize)
+                       sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil,
+                                               evt->agg_wsize, 0);
+       }
 }
 
 static const struct {
@@ -893,6 +960,38 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
        return rc;
 }
 
+/**
+ * wmi_rxon - turn radio on/off
+ * @on:                turn on if true, off otherwise
+ *
+ * Only switch radio. Channel should be set separately.
+ * No timeout for rxon - radio turned on forever unless some other call
+ * turns it off
+ */
+int wmi_rxon(struct wil6210_priv *wil, bool on)
+{
+       int rc;
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_listen_started_event evt;
+       } __packed reply;
+
+       wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+
+       if (on) {
+               rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+                             WMI_LISTEN_STARTED_EVENTID,
+                             &reply, sizeof(reply), 100);
+               if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS))
+                       rc = -EINVAL;
+       } else {
+               rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+                             WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20);
+       }
+
+       return rc;
+}
+
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
 {
        struct wireless_dev *wdev = wil->wdev;
@@ -906,6 +1005,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
                },
                .mid = 0, /* TODO - what is it? */
                .decap_trans_type = WMI_DECAP_TYPE_802_3,
+               .reorder_type = WMI_RX_SW_REORDER,
        };
        struct {
                struct wil6210_mbox_hdr_wmi wmi;
@@ -973,6 +1073,18 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
        return 0;
 }
 
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
+{
+       struct wmi_disconnect_sta_cmd cmd = {
+               .disconnect_reason = cpu_to_le16(reason),
+       };
+       memcpy(cmd.dst_mac, mac, ETH_ALEN);
+
+       wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+
+       return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+}
+
 void wmi_event_flush(struct wil6210_priv *wil)
 {
        struct pending_wmi_event *evt, *t;
index bf93ea859f2d7f1fc05257a86a0b5b1525dd62c3..1fe41af81a597a4a90c75aac5557567725b09fc5 100644 (file)
@@ -67,7 +67,7 @@
 #include <linux/moduleparam.h>
 #include <linux/firmware.h>
 #include <linux/jiffies.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
 #include "atmel.h"
 
 #define DRIVER_MAJOR 0
@@ -2273,7 +2273,7 @@ static int atmel_set_freq(struct net_device *dev,
 
                /* Hack to fall through... */
                fwrq->e = 0;
-               fwrq->m = ieee80211_freq_to_dsss_chan(f);
+               fwrq->m = ieee80211_frequency_to_channel(f);
        }
        /* Setting by channel number */
        if ((fwrq->m > 1000) || (fwrq->e > 0))
@@ -2434,8 +2434,8 @@ static int atmel_get_range(struct net_device *dev,
                        range->freq[k].i = i; /* List index */
 
                        /* Values in MHz -> * 10^5 * 10 */
-                       range->freq[k].m = (ieee80211_dsss_chan_to_freq(i) *
-                                           100000);
+                       range->freq[k].m = 100000 *
+                        ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
                        range->freq[k++].e = 1;
                }
                range->num_frequency = k;
index 51ff0b198d0a94c6486bec11136a6b08ea9d29c2..088d544ec63f940b2a7b2234f1eac9b7458a19ce 100644 (file)
@@ -92,7 +92,7 @@ config B43_SDIO
 # if we can do DMA.
 config B43_BCMA_PIO
        bool
-       depends on B43_BCMA
+       depends on B43 && B43_BCMA
        select BCMA_BLOCKIO
        default y
 
index 822aad8842f4defbe8b24af07b4f0559f53223b5..50517b801cb42d40137d2c0739440220ce8e2151 100644 (file)
@@ -86,7 +86,7 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev,
 
 static inline bool b43_debug(struct b43_wldev *dev, enum b43_dyndbg feature)
 {
-       return 0;
+       return false;
 }
 
 static inline void b43_debugfs_init(void)
index c75237eb55a16b7e17de051ff7f51f1ca62c4fe1..69fc3d65531a7eeb88c4901f804ab197dbbb939e 100644 (file)
@@ -1549,7 +1549,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
 
        bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
-       len = min((size_t) dev->wl->current_beacon->len,
+       len = min_t(size_t, dev->wl->current_beacon->len,
                  0x200 - sizeof(struct b43_plcp_hdr6));
        rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
 
index abac25ee958dad54f140c550ef62ba05bf2bc3ae..f476fc337d64c8098ff7397bff7fedd2a9bcc4b0 100644 (file)
@@ -58,41 +58,6 @@ enum b43_verbosity {
 #endif
 };
 
-
-/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
-static inline u8 b43_freq_to_channel_5ghz(int freq)
-{
-       return ((freq - 5000) / 5);
-}
-static inline u8 b43_freq_to_channel_2ghz(int freq)
-{
-       u8 channel;
-
-       if (freq == 2484)
-               channel = 14;
-       else
-               channel = (freq - 2407) / 5;
-
-       return channel;
-}
-
-/* Lightweight function to convert a channel number to a frequency (in Mhz). */
-static inline int b43_channel_to_freq_5ghz(u8 channel)
-{
-       return (5000 + (5 * channel));
-}
-static inline int b43_channel_to_freq_2ghz(u8 channel)
-{
-       int freq;
-
-       if (channel == 14)
-               freq = 2484;
-       else
-               freq = 2407 + (5 * channel);
-
-       return freq;
-}
-
 static inline int b43_is_cck_rate(int rate)
 {
        return (rate == B43_CCK_RATE_1MB ||
index f01676ac481b25071e9f7aae9ad790dbd5836ff0..dbaa51890198945b7552ed506bc3c8bfe4ba2359 100644 (file)
@@ -133,9 +133,9 @@ void b43_phy_exit(struct b43_wldev *dev)
 bool b43_has_hardware_pctl(struct b43_wldev *dev)
 {
        if (!dev->phy.hardware_power_control)
-               return 0;
+               return false;
        if (!dev->phy.ops->supports_hwpctl)
-               return 0;
+               return false;
        return dev->phy.ops->supports_hwpctl(dev);
 }
 
index a73ff8c9deb55ff320f5e3ba46ac6b0fc8dabef1..a4ff5e2a42b95da1fedc6c64f1ef3ac8042afb5b 100644 (file)
@@ -637,7 +637,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
 
                ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
                if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
-                       return 0;
+                       return false;
                b43_piorx_write32(q, B43_PIO8_RXCTL,
                                  B43_PIO8_RXCTL_FRAMERDY);
                for (i = 0; i < 10; i++) {
@@ -651,7 +651,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
 
                ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
                if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
-                       return 0;
+                       return false;
                b43_piorx_write16(q, B43_PIO_RXCTL,
                                  B43_PIO_RXCTL_FRAMERDY);
                for (i = 0; i < 10; i++) {
@@ -662,7 +662,7 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
                }
        }
        b43dbg(q->dev->wl, "PIO RX timed out\n");
-       return 1;
+       return true;
 data_ready:
 
        /* Get the preamble (RX header) */
@@ -759,7 +759,7 @@ data_ready:
 
        b43_rx(q->dev, skb, rxhdr);
 
-       return 1;
+       return true;
 
 rx_error:
        if (err_msg)
@@ -769,7 +769,7 @@ rx_error:
        else
                b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
 
-       return 1;
+       return true;
 }
 
 void b43_pio_rx(struct b43_pio_rxqueue *q)
index 8e8431d4eb0c0d9c4ee86463e5823a91cca952dc..3190493bd07f4ce272ba90fbe61aa0a32f9d0535 100644 (file)
@@ -40,7 +40,7 @@ static int get_integer(const char *buf, size_t count)
 
        if (count == 0)
                goto out;
-       count = min(count, (size_t) 10);
+       count = min_t(size_t, count, 10);
        memcpy(tmp, buf, count);
        ret = simple_strtol(tmp, NULL, 10);
       out:
index 50e5ddb12fb3f6bf97217819a862c5d508ec0bd4..31adb8cf0291fb0bbebf1a6896c8f4ff4d7288b9 100644 (file)
@@ -337,7 +337,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
                        /* iv16 */
                        memcpy(txhdr->iv + 10, ((u8 *) wlhdr) + wlhdr_len, 3);
                } else {
-                       iv_len = min((size_t) info->control.hw_key->iv_len,
+                       iv_len = min_t(size_t, info->control.hw_key->iv_len,
                                     ARRAY_SIZE(txhdr->iv));
                        memcpy(txhdr->iv, ((u8 *) wlhdr) + wlhdr_len, iv_len);
                }
@@ -806,7 +806,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
                B43_WARN_ON(1);
                /* FIXME: We don't really know which value the "chanid" contains.
                 *        So the following assignment might be wrong. */
-               status.freq = b43_channel_to_freq_5ghz(chanid);
+               status.freq =
+                       ieee80211_channel_to_frequency(chanid, status.band);
                break;
        case B43_PHYTYPE_G:
                status.band = IEEE80211_BAND_2GHZ;
@@ -819,13 +820,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
        case B43_PHYTYPE_HT:
                /* chanid is the SHM channel cookie. Which is the plain
                 * channel number in b43. */
-               if (chanstat & B43_RX_CHAN_5GHZ) {
+               if (chanstat & B43_RX_CHAN_5GHZ)
                        status.band = IEEE80211_BAND_5GHZ;
-                       status.freq = b43_channel_to_freq_5ghz(chanid);
-               } else {
+               else
                        status.band = IEEE80211_BAND_2GHZ;
-                       status.freq = b43_channel_to_freq_2ghz(chanid);
-               }
+               status.freq =
+                       ieee80211_channel_to_frequency(chanid, status.band);
                break;
        default:
                B43_WARN_ON(1);
index 349c77605231a660dbf398b3a93a13978715de3d..1aec2146a2bfb5ecfe3fe6d8452236799058d9c8 100644 (file)
@@ -978,7 +978,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon);
 
        bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
-       len = min((size_t)dev->wl->current_beacon->len,
+       len = min_t(size_t, dev->wl->current_beacon->len,
                  0x200 - sizeof(struct b43legacy_plcp_hdr6));
        rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value;
 
@@ -1155,7 +1155,7 @@ static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
        b43legacy_write_probe_resp_plcp(dev, 0x350, size,
                                        &b43legacy_b_ratetable[3]);
 
-       size = min((size_t)size,
+       size = min_t(size_t, size,
                   0x200 - sizeof(struct b43legacy_plcp_hdr6));
        b43legacy_write_template_common(dev, probe_resp_data,
                                        size, ram_offset,
index 57f8b089767ca0f7d9700e16fee71bbe76998855..2a1da15c913b0704eb5aa04d179e6eef70e0cfa6 100644 (file)
@@ -42,7 +42,7 @@ static int get_integer(const char *buf, size_t count)
 
        if (count == 0)
                goto out;
-       count = min(count, (size_t)10);
+       count = min_t(size_t, count, 10);
        memcpy(tmp, buf, count);
        ret = simple_strtol(tmp, NULL, 10);
 out:
index 86588c9ff0f2b6cddee0b3a7da5455fd05b29a9b..34bf3f0b729f79c901ba5351bd47043591118563 100644 (file)
@@ -254,7 +254,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
                                   B43legacy_TX4_MAC_KEYALG_SHIFT) &
                                   B43legacy_TX4_MAC_KEYALG;
                        wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
-                       iv_len = min((size_t)info->control.hw_key->iv_len,
+                       iv_len = min_t(size_t, info->control.hw_key->iv_len,
                                     ARRAY_SIZE(txhdr->iv));
                        memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
                } else {
index 57cddee03252b02b2112e59d868ec9285d87b210..1d2ceac3a221bd779320daf540bc49ecc9b219c5 100644 (file)
@@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
 obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
 brcmfmac-objs += \
                wl_cfg80211.o \
+               chip.o \
                fwil.o \
                fweh.o \
                fwsignal.o \
@@ -36,8 +37,7 @@ brcmfmac-objs += \
                btcoex.o
 brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
                dhd_sdio.o \
-               bcmsdh.o \
-               sdio_chip.o
+               bcmsdh.o
 brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
                usb.o
 brcmfmac-$(CONFIG_BRCMDBG) += \
index fa35b23bbaa736de6fcad84efc1a6cd93f0669b8..a16e644e7c08826fed5c6d75a6413b8ff5959bdd 100644 (file)
@@ -43,7 +43,6 @@
 #include "dhd_bus.h"
 #include "dhd_dbg.h"
 #include "sdio_host.h"
-#include "sdio_chip.h"
 
 #define SDIOH_API_ACCESS_RETRY_LIMIT   2
 
 /* Maximum milliseconds to wait for F2 to come up */
 #define SDIO_WAIT_F2RDY        3000
 
+#define BRCMF_DEFAULT_TXGLOM_SIZE      32  /* max tx frames in glom chain */
+#define BRCMF_DEFAULT_RXGLOM_SIZE      32  /* max rx frames in glom chain */
+
+static int brcmf_sdiod_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
+module_param_named(txglomsz, brcmf_sdiod_txglomsz, int, 0);
+MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
 
 static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
 {
@@ -264,26 +269,17 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
                break;
        }
 
-       if (ret) {
-               /*
-                * SleepCSR register access can fail when
-                * waking up the device so reduce this noise
-                * in the logs.
-                */
-               if (addr != SBSDIO_FUNC1_SLEEPCSR)
-                       brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
-                                 write ? "write" : "read", fn, addr, ret);
-               else
-                       brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
-                                 write ? "write" : "read", fn, addr, ret);
-       }
+       if (ret)
+               brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+                         write ? "write" : "read", fn, addr, ret);
+
        return ret;
 }
 
 static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                                   u8 regsz, void *data, bool write)
 {
-       u8 func_num;
+       u8 func;
        s32 retry = 0;
        int ret;
 
@@ -297,9 +293,9 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
         * The rest: function 1 silicon backplane core registers
         */
        if ((addr & ~REG_F0_REG_MASK) == 0)
-               func_num = SDIO_FUNC_0;
+               func = SDIO_FUNC_0;
        else
-               func_num = SDIO_FUNC_1;
+               func = SDIO_FUNC_1;
 
        do {
                if (!write)
@@ -307,16 +303,26 @@ static int brcmf_sdiod_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                /* for retry wait for 1 ms till bus get settled down */
                if (retry)
                        usleep_range(1000, 2000);
-               ret = brcmf_sdiod_request_data(sdiodev, func_num, addr, regsz,
+               ret = brcmf_sdiod_request_data(sdiodev, func, addr, regsz,
                                               data, write);
        } while (ret != 0 && ret != -ENOMEDIUM &&
                 retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
 
        if (ret == -ENOMEDIUM)
                brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_NOMEDIUM);
-       else if (ret != 0)
-               brcmf_err("failed with %d\n", ret);
-
+       else if (ret != 0) {
+               /*
+                * SleepCSR register access can fail when
+                * waking up the device so reduce this noise
+                * in the logs.
+                */
+               if (addr != SBSDIO_FUNC1_SLEEPCSR)
+                       brcmf_err("failed to %s data F%d@0x%05x, err: %d\n",
+                                 write ? "write" : "read", func, addr, ret);
+               else
+                       brcmf_dbg(SDIO, "failed to %s data F%d@0x%05x, err: %d\n",
+                                 write ? "write" : "read", func, addr, ret);
+       }
        return ret;
 }
 
@@ -488,7 +494,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
        struct mmc_request mmc_req;
        struct mmc_command mmc_cmd;
        struct mmc_data mmc_dat;
-       struct sg_table st;
        struct scatterlist *sgl;
        int ret = 0;
 
@@ -533,16 +538,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
        pkt_offset = 0;
        pkt_next = target_list->next;
 
-       if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
-               ret = -ENOMEM;
-               goto exit;
-       }
-
        memset(&mmc_req, 0, sizeof(struct mmc_request));
        memset(&mmc_cmd, 0, sizeof(struct mmc_command));
        memset(&mmc_dat, 0, sizeof(struct mmc_data));
 
-       mmc_dat.sg = st.sgl;
+       mmc_dat.sg = sdiodev->sgtable.sgl;
        mmc_dat.blksz = func_blk_sz;
        mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
        mmc_cmd.opcode = SD_IO_RW_EXTENDED;
@@ -558,7 +558,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
        while (seg_sz) {
                req_sz = 0;
                sg_cnt = 0;
-               sgl = st.sgl;
+               sgl = sdiodev->sgtable.sgl;
                /* prep sg table */
                while (pkt_next != (struct sk_buff *)target_list) {
                        pkt_data = pkt_next->data + pkt_offset;
@@ -640,7 +640,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
        }
 
 exit:
-       sg_free_table(&st);
+       sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents);
        while ((pkt_next = __skb_dequeue(&local_list)) != NULL)
                brcmu_pkt_buf_free_skb(pkt_next);
 
@@ -827,7 +827,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                }
                if (!write)
                        memcpy(data, pkt->data, dsize);
-               skb_trim(pkt, dsize);
+               skb_trim(pkt, 0);
 
                /* Adjust for next transfer (if any) */
                size -= dsize;
@@ -864,6 +864,29 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
        return 0;
 }
 
+static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+{
+       uint nents;
+       int err;
+
+       if (!sdiodev->sg_support)
+               return;
+
+       nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, brcmf_sdiod_txglomsz);
+       nents += (nents >> 4) + 1;
+
+       WARN_ON(nents > sdiodev->max_segment_count);
+
+       brcmf_dbg(TRACE, "nents=%d\n", nents);
+       err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL);
+       if (err < 0) {
+               brcmf_err("allocation failed: disable scatter-gather");
+               sdiodev->sg_support = false;
+       }
+
+       sdiodev->txglomsz = brcmf_sdiod_txglomsz;
+}
+
 static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
        if (sdiodev->bus) {
@@ -881,6 +904,7 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
        sdio_disable_func(sdiodev->func[1]);
        sdio_release_host(sdiodev->func[1]);
 
+       sg_free_table(&sdiodev->sgtable);
        sdiodev->sbwad = 0;
 
        return 0;
@@ -936,6 +960,11 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
                                           SG_MAX_SINGLE_ALLOC);
        sdiodev->max_segment_size = host->max_seg_size;
 
+       /* allocate scatter-gather table. sg support
+        * will be disabled upon allocation failure.
+        */
+       brcmf_sdiod_sgtable_alloc(sdiodev);
+
        /* try to attach to the target device */
        sdiodev->bus = brcmf_sdio_probe(sdiodev);
        if (!sdiodev->bus) {
@@ -960,6 +989,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_43362)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
                     SDIO_DEVICE_ID_BROADCOM_4335_4339)},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4354)},
        { /* end: all zeroes */ },
 };
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -1073,9 +1103,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        int ret = 0;
 
-       brcmf_dbg(SDIO, "\n");
-
-       atomic_set(&sdiodev->suspend, true);
+       brcmf_dbg(SDIO, "Enter\n");
 
        sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
        if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
@@ -1083,9 +1111,12 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
                return -EINVAL;
        }
 
+       atomic_set(&sdiodev->suspend, true);
+
        ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
        if (ret) {
                brcmf_err("Failed to set pm_flags\n");
+               atomic_set(&sdiodev->suspend, false);
                return ret;
        }
 
@@ -1099,6 +1130,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
 
+       brcmf_dbg(SDIO, "Enter\n");
        brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
        atomic_set(&sdiodev->suspend, false);
        return 0;
@@ -1115,14 +1147,15 @@ static struct sdio_driver brcmf_sdmmc_driver = {
        .remove = brcmf_ops_sdio_remove,
        .name = BRCMFMAC_SDIO_PDATA_NAME,
        .id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
        .drv = {
+               .owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
                .pm = &brcmf_sdio_pm_ops,
-       },
 #endif /* CONFIG_PM_SLEEP */
+       },
 };
 
-static int brcmf_sdio_pd_probe(struct platform_device *pdev)
+static int __init brcmf_sdio_pd_probe(struct platform_device *pdev)
 {
        brcmf_dbg(SDIO, "Enter\n");
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
new file mode 100644 (file)
index 0000000..df130ef
--- /dev/null
@@ -0,0 +1,1034 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+
+#include <defs.h>
+#include <soc.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <chipcommon.h>
+#include "dhd_dbg.h"
+#include "chip.h"
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB                0
+#define SOCI_AI                1
+
+/* PL-368 DMP definitions */
+#define DMP_DESC_TYPE_MSK      0x0000000F
+#define  DMP_DESC_EMPTY                0x00000000
+#define  DMP_DESC_VALID                0x00000001
+#define  DMP_DESC_COMPONENT    0x00000001
+#define  DMP_DESC_MASTER_PORT  0x00000003
+#define  DMP_DESC_ADDRESS      0x00000005
+#define  DMP_DESC_ADDRSIZE_GT32        0x00000008
+#define  DMP_DESC_EOT          0x0000000F
+
+#define DMP_COMP_DESIGNER      0xFFF00000
+#define DMP_COMP_DESIGNER_S    20
+#define DMP_COMP_PARTNUM       0x000FFF00
+#define DMP_COMP_PARTNUM_S     8
+#define DMP_COMP_CLASS         0x000000F0
+#define DMP_COMP_CLASS_S       4
+#define DMP_COMP_REVISION      0xFF000000
+#define DMP_COMP_REVISION_S    24
+#define DMP_COMP_NUM_SWRAP     0x00F80000
+#define DMP_COMP_NUM_SWRAP_S   19
+#define DMP_COMP_NUM_MWRAP     0x0007C000
+#define DMP_COMP_NUM_MWRAP_S   14
+#define DMP_COMP_NUM_SPORT     0x00003E00
+#define DMP_COMP_NUM_SPORT_S   9
+#define DMP_COMP_NUM_MPORT     0x000001F0
+#define DMP_COMP_NUM_MPORT_S   4
+
+#define DMP_MASTER_PORT_UID    0x0000FF00
+#define DMP_MASTER_PORT_UID_S  8
+#define DMP_MASTER_PORT_NUM    0x000000F0
+#define DMP_MASTER_PORT_NUM_S  4
+
+#define DMP_SLAVE_ADDR_BASE    0xFFFFF000
+#define DMP_SLAVE_ADDR_BASE_S  12
+#define DMP_SLAVE_PORT_NUM     0x00000F00
+#define DMP_SLAVE_PORT_NUM_S   8
+#define DMP_SLAVE_TYPE         0x000000C0
+#define DMP_SLAVE_TYPE_S       6
+#define  DMP_SLAVE_TYPE_SLAVE  0
+#define  DMP_SLAVE_TYPE_BRIDGE 1
+#define  DMP_SLAVE_TYPE_SWRAP  2
+#define  DMP_SLAVE_TYPE_MWRAP  3
+#define DMP_SLAVE_SIZE_TYPE    0x00000030
+#define DMP_SLAVE_SIZE_TYPE_S  4
+#define  DMP_SLAVE_SIZE_4K     0
+#define  DMP_SLAVE_SIZE_8K     1
+#define  DMP_SLAVE_SIZE_16K    2
+#define  DMP_SLAVE_SIZE_DESC   3
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK           0xff000000
+#define CIB_REV_SHIFT          24
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT      0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN      0x0004
+#define D11_BCMA_IOCTL_PHYRESET                0x0008
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE          0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE       0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE          0x18002000
+#define BCM4329_RAMSIZE                        0x48000
+
+/* bcm43143 */
+/* SDIO device core */
+#define BCM43143_CORE_BUS_BASE         0x18002000
+/* internal memory core */
+#define BCM43143_CORE_SOCRAM_BASE      0x18004000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM43143_CORE_ARM_BASE         0x18003000
+#define BCM43143_RAMSIZE               0x70000
+
+#define CORE_SB(base, field) \
+               (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+#define        SBCOREREV(sbidh) \
+       ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+         ((sbidh) & SSB_IDHIGH_RCLO))
+
+struct sbconfig {
+       u32 PAD[2];
+       u32 sbipsflag;  /* initiator port ocp slave flag */
+       u32 PAD[3];
+       u32 sbtpsflag;  /* target port ocp slave flag */
+       u32 PAD[11];
+       u32 sbtmerrloga;        /* (sonics >= 2.3) */
+       u32 PAD;
+       u32 sbtmerrlog; /* (sonics >= 2.3) */
+       u32 PAD[3];
+       u32 sbadmatch3; /* address match3 */
+       u32 PAD;
+       u32 sbadmatch2; /* address match2 */
+       u32 PAD;
+       u32 sbadmatch1; /* address match1 */
+       u32 PAD[7];
+       u32 sbimstate;  /* initiator agent state */
+       u32 sbintvec;   /* interrupt mask */
+       u32 sbtmstatelow;       /* target state */
+       u32 sbtmstatehigh;      /* target state */
+       u32 sbbwa0;             /* bandwidth allocation table0 */
+       u32 PAD;
+       u32 sbimconfiglow;      /* initiator configuration */
+       u32 sbimconfighigh;     /* initiator configuration */
+       u32 sbadmatch0; /* address match0 */
+       u32 PAD;
+       u32 sbtmconfiglow;      /* target configuration */
+       u32 sbtmconfighigh;     /* target configuration */
+       u32 sbbconfig;  /* broadcast configuration */
+       u32 PAD;
+       u32 sbbstate;   /* broadcast state */
+       u32 PAD[3];
+       u32 sbactcnfg;  /* activate configuration */
+       u32 PAD[3];
+       u32 sbflagst;   /* current sbflags */
+       u32 PAD[3];
+       u32 sbidlow;            /* identification */
+       u32 sbidhigh;   /* identification */
+};
+
+struct brcmf_core_priv {
+       struct brcmf_core pub;
+       u32 wrapbase;
+       struct list_head list;
+       struct brcmf_chip_priv *chip;
+};
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT      0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN      0x0004
+#define D11_BCMA_IOCTL_PHYRESET                0x0008
+
+struct brcmf_chip_priv {
+       struct brcmf_chip pub;
+       const struct brcmf_buscore_ops *ops;
+       void *ctx;
+       /* assured first core is chipcommon, second core is buscore */
+       struct list_head cores;
+       u16 num_cores;
+
+       bool (*iscoreup)(struct brcmf_core_priv *core);
+       void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
+                           u32 reset);
+       void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
+                         u32 postreset);
+};
+
+static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
+                                 struct brcmf_core *core)
+{
+       u32 regdata;
+
+       regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
+       core->rev = SBCOREREV(regdata);
+}
+
+static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
+{
+       struct brcmf_chip_priv *ci;
+       u32 regdata;
+       u32 address;
+
+       ci = core->chip;
+       address = CORE_SB(core->pub.base, sbtmstatelow);
+       regdata = ci->ops->read32(ci->ctx, address);
+       regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+                   SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+       return SSB_TMSLOW_CLOCK == regdata;
+}
+
+static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
+{
+       struct brcmf_chip_priv *ci;
+       u32 regdata;
+       bool ret;
+
+       ci = core->chip;
+       regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+       ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+       regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+       ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+       return ret;
+}
+
+static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
+                                     u32 prereset, u32 reset)
+{
+       struct brcmf_chip_priv *ci;
+       u32 val, base;
+
+       ci = core->chip;
+       base = core->pub.base;
+       val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+       if (val & SSB_TMSLOW_RESET)
+               return;
+
+       val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+       if ((val & SSB_TMSLOW_CLOCK) != 0) {
+               /*
+                * set target reject and spin until busy is clear
+                * (preserve core-specific bits)
+                */
+               val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+               ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+                                        val | SSB_TMSLOW_REJECT);
+
+               val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+               udelay(1);
+               SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
+                         & SSB_TMSHIGH_BUSY), 100000);
+
+               val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+               if (val & SSB_TMSHIGH_BUSY)
+                       brcmf_err("core state still busy\n");
+
+               val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+               if (val & SSB_IDLOW_INITIATOR) {
+                       val = ci->ops->read32(ci->ctx,
+                                             CORE_SB(base, sbimstate));
+                       val |= SSB_IMSTATE_REJECT;
+                       ci->ops->write32(ci->ctx,
+                                        CORE_SB(base, sbimstate), val);
+                       val = ci->ops->read32(ci->ctx,
+                                             CORE_SB(base, sbimstate));
+                       udelay(1);
+                       SPINWAIT((ci->ops->read32(ci->ctx,
+                                                 CORE_SB(base, sbimstate)) &
+                                 SSB_IMSTATE_BUSY), 100000);
+               }
+
+               /* set reset and reject while enabling the clocks */
+               val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+                     SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+               ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
+               val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+               udelay(10);
+
+               /* clear the initiator reject bit */
+               val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+               if (val & SSB_IDLOW_INITIATOR) {
+                       val = ci->ops->read32(ci->ctx,
+                                             CORE_SB(base, sbimstate));
+                       val &= ~SSB_IMSTATE_REJECT;
+                       ci->ops->write32(ci->ctx,
+                                        CORE_SB(base, sbimstate), val);
+               }
+       }
+
+       /* leave reset and reject asserted */
+       ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+                        (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+       udelay(1);
+}
+
+static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
+                                     u32 prereset, u32 reset)
+{
+       struct brcmf_chip_priv *ci;
+       u32 regdata;
+
+       ci = core->chip;
+
+       /* if core is already in reset, just return */
+       regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+       if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+               return;
+
+       /* configure reset */
+       ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+                        prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+       ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+       /* put in reset */
+       ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
+                        BCMA_RESET_CTL_RESET);
+       usleep_range(10, 20);
+
+       /* wait till reset is 1 */
+       SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
+                BCMA_RESET_CTL_RESET, 300);
+
+       /* in-reset configure */
+       ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+                        reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+       ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
+                                   u32 reset, u32 postreset)
+{
+       struct brcmf_chip_priv *ci;
+       u32 regdata;
+       u32 base;
+
+       ci = core->chip;
+       base = core->pub.base;
+       /*
+        * Must do the disable sequence first to work for
+        * arbitrary current core state.
+        */
+       brcmf_chip_sb_coredisable(core, 0, 0);
+
+       /*
+        * Now do the initialization sequence.
+        * set reset while enabling the clock and
+        * forcing them on throughout the core
+        */
+       ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+                        SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+                        SSB_TMSLOW_RESET);
+       regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+       udelay(1);
+
+       /* clear any serror */
+       regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+       if (regdata & SSB_TMSHIGH_SERR)
+               ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
+
+       regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
+       if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
+               regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
+               ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
+       }
+
+       /* clear reset and allow it to propagate throughout the core */
+       ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+                        SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+       regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+       udelay(1);
+
+       /* leave clock enabled */
+       ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+                        SSB_TMSLOW_CLOCK);
+       regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+       udelay(1);
+}
+
+static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
+                                   u32 reset, u32 postreset)
+{
+       struct brcmf_chip_priv *ci;
+       int count;
+
+       ci = core->chip;
+
+       /* must disable first to work for arbitrary current core state */
+       brcmf_chip_ai_coredisable(core, prereset, reset);
+
+       count = 0;
+       while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
+              BCMA_RESET_CTL_RESET) {
+               ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
+               count++;
+               if (count > 50)
+                       break;
+               usleep_range(40, 60);
+       }
+
+       ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+                        postreset | BCMA_IOCTL_CLK);
+       ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static char *brcmf_chip_name(uint chipid, char *buf, uint len)
+{
+       const char *fmt;
+
+       fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+       snprintf(buf, len, fmt, chipid);
+       return buf;
+}
+
+static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
+                                             u16 coreid, u32 base,
+                                             u32 wrapbase)
+{
+       struct brcmf_core_priv *core;
+
+       core = kzalloc(sizeof(*core), GFP_KERNEL);
+       if (!core)
+               return ERR_PTR(-ENOMEM);
+
+       core->pub.id = coreid;
+       core->pub.base = base;
+       core->chip = ci;
+       core->wrapbase = wrapbase;
+
+       list_add_tail(&core->list, &ci->cores);
+       return &core->pub;
+}
+
+#ifdef DEBUG
+/* safety check for chipinfo */
+static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+       struct brcmf_core_priv *core;
+       bool need_socram = false;
+       bool has_socram = false;
+       int idx = 1;
+
+       list_for_each_entry(core, &ci->cores, list) {
+               brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
+                         idx++, core->pub.id, core->pub.rev, core->pub.base,
+                         core->wrapbase);
+
+               switch (core->pub.id) {
+               case BCMA_CORE_ARM_CM3:
+                       need_socram = true;
+                       break;
+               case BCMA_CORE_INTERNAL_MEM:
+                       has_socram = true;
+                       break;
+               case BCMA_CORE_ARM_CR4:
+                       if (ci->pub.rambase == 0) {
+                               brcmf_err("RAM base not provided with ARM CR4 core\n");
+                               return -ENOMEM;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /* check RAM core presence for ARM CM3 core */
+       if (need_socram && !has_socram) {
+               brcmf_err("RAM core not provided with ARM CM3 core\n");
+               return -ENODEV;
+       }
+       return 0;
+}
+#else  /* DEBUG */
+static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+       return 0;
+}
+#endif
+
+static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+       switch (ci->pub.chip) {
+       case BCM4329_CHIP_ID:
+               ci->pub.ramsize = BCM4329_RAMSIZE;
+               break;
+       case BCM43143_CHIP_ID:
+               ci->pub.ramsize = BCM43143_RAMSIZE;
+               break;
+       case BCM43241_CHIP_ID:
+               ci->pub.ramsize = 0x90000;
+               break;
+       case BCM4330_CHIP_ID:
+               ci->pub.ramsize = 0x48000;
+               break;
+       case BCM4334_CHIP_ID:
+               ci->pub.ramsize = 0x80000;
+               break;
+       case BCM4335_CHIP_ID:
+               ci->pub.ramsize = 0xc0000;
+               ci->pub.rambase = 0x180000;
+               break;
+       case BCM43362_CHIP_ID:
+               ci->pub.ramsize = 0x3c000;
+               break;
+       case BCM4339_CHIP_ID:
+       case BCM4354_CHIP_ID:
+               ci->pub.ramsize = 0xc0000;
+               ci->pub.rambase = 0x180000;
+               break;
+       default:
+               brcmf_err("unknown chip: %s\n", ci->pub.name);
+               break;
+       }
+}
+
+static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
+                                  u8 *type)
+{
+       u32 val;
+
+       /* read next descriptor */
+       val = ci->ops->read32(ci->ctx, *eromaddr);
+       *eromaddr += 4;
+
+       if (!type)
+               return val;
+
+       /* determine descriptor type */
+       *type = (val & DMP_DESC_TYPE_MSK);
+       if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
+               *type = DMP_DESC_ADDRESS;
+
+       return val;
+}
+
+static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
+                                     u32 *regbase, u32 *wrapbase)
+{
+       u8 desc;
+       u32 val;
+       u8 mpnum = 0;
+       u8 stype, sztype, wraptype;
+
+       *regbase = 0;
+       *wrapbase = 0;
+
+       val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+       if (desc == DMP_DESC_MASTER_PORT) {
+               mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
+               wraptype = DMP_SLAVE_TYPE_MWRAP;
+       } else if (desc == DMP_DESC_ADDRESS) {
+               /* revert erom address */
+               *eromaddr -= 4;
+               wraptype = DMP_SLAVE_TYPE_SWRAP;
+       } else {
+               *eromaddr -= 4;
+               return -EILSEQ;
+       }
+
+       do {
+               /* locate address descriptor */
+               do {
+                       val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+                       /* unexpected table end */
+                       if (desc == DMP_DESC_EOT) {
+                               *eromaddr -= 4;
+                               return -EFAULT;
+                       }
+               } while (desc != DMP_DESC_ADDRESS);
+
+               /* skip upper 32-bit address descriptor */
+               if (val & DMP_DESC_ADDRSIZE_GT32)
+                       brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+
+               sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
+
+               /* next size descriptor can be skipped */
+               if (sztype == DMP_SLAVE_SIZE_DESC) {
+                       val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+                       /* skip upper size descriptor if present */
+                       if (val & DMP_DESC_ADDRSIZE_GT32)
+                               brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+               }
+
+               /* only look for 4K register regions */
+               if (sztype != DMP_SLAVE_SIZE_4K)
+                       continue;
+
+               stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
+
+               /* only regular slave and wrapper */
+               if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
+                       *regbase = val & DMP_SLAVE_ADDR_BASE;
+               if (*wrapbase == 0 && stype == wraptype)
+                       *wrapbase = val & DMP_SLAVE_ADDR_BASE;
+       } while (*regbase == 0 || *wrapbase == 0);
+
+       return 0;
+}
+
+static
+int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
+{
+       struct brcmf_core *core;
+       u32 eromaddr;
+       u8 desc_type = 0;
+       u32 val;
+       u16 id;
+       u8 nmp, nsp, nmw, nsw, rev;
+       u32 base, wrap;
+       int err;
+
+       eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
+
+       while (desc_type != DMP_DESC_EOT) {
+               val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+               if (!(val & DMP_DESC_VALID))
+                       continue;
+
+               if (desc_type == DMP_DESC_EMPTY)
+                       continue;
+
+               /* need a component descriptor */
+               if (desc_type != DMP_DESC_COMPONENT)
+                       continue;
+
+               id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
+
+               /* next descriptor must be component as well */
+               val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+               if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
+                       return -EFAULT;
+
+               /* only look at cores with master port(s) */
+               nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
+               nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
+               nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
+               nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
+               rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
+
+               /* need core with ports */
+               if (nmw + nsw == 0)
+                       continue;
+
+               /* try to obtain register address info */
+               err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
+               if (err)
+                       continue;
+
+               /* finally a core to be added */
+               core = brcmf_chip_add_core(ci, id, base, wrap);
+               if (IS_ERR(core))
+                       return PTR_ERR(core);
+
+               core->rev = rev;
+       }
+
+       return 0;
+}
+
+static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
+{
+       struct brcmf_core *core;
+       u32 regdata;
+       u32 socitype;
+
+       /* Get CC core rev
+        * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+        * For different chiptypes or old sdio hosts w/o chipcommon,
+        * other ways of recognition should be added here.
+        */
+       regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
+       ci->pub.chip = regdata & CID_ID_MASK;
+       ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+       socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+       brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
+       brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
+                 socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
+                 ci->pub.chiprev);
+
+       if (socitype == SOCI_SB) {
+               if (ci->pub.chip != BCM4329_CHIP_ID) {
+                       brcmf_err("SB chip is not supported\n");
+                       return -ENODEV;
+               }
+               ci->iscoreup = brcmf_chip_sb_iscoreup;
+               ci->coredisable = brcmf_chip_sb_coredisable;
+               ci->resetcore = brcmf_chip_sb_resetcore;
+
+               core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
+                                          SI_ENUM_BASE, 0);
+               brcmf_chip_sb_corerev(ci, core);
+               core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
+                                          BCM4329_CORE_BUS_BASE, 0);
+               brcmf_chip_sb_corerev(ci, core);
+               core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
+                                          BCM4329_CORE_SOCRAM_BASE, 0);
+               brcmf_chip_sb_corerev(ci, core);
+               core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
+                                          BCM4329_CORE_ARM_BASE, 0);
+               brcmf_chip_sb_corerev(ci, core);
+
+               core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
+               brcmf_chip_sb_corerev(ci, core);
+       } else if (socitype == SOCI_AI) {
+               ci->iscoreup = brcmf_chip_ai_iscoreup;
+               ci->coredisable = brcmf_chip_ai_coredisable;
+               ci->resetcore = brcmf_chip_ai_resetcore;
+
+               brcmf_chip_dmp_erom_scan(ci);
+       } else {
+               brcmf_err("chip backplane type %u is not supported\n",
+                         socitype);
+               return -ENODEV;
+       }
+
+       brcmf_chip_get_raminfo(ci);
+
+       return brcmf_chip_cores_check(ci);
+}
+
+static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
+{
+       struct brcmf_core *core;
+       struct brcmf_core_priv *cr4;
+       u32 val;
+
+
+       core = brcmf_chip_get_core(&chip->pub, id);
+       if (!core)
+               return;
+
+       switch (id) {
+       case BCMA_CORE_ARM_CM3:
+               brcmf_chip_coredisable(core, 0, 0);
+               break;
+       case BCMA_CORE_ARM_CR4:
+               cr4 = container_of(core, struct brcmf_core_priv, pub);
+
+               /* clear all IOCTL bits except HALT bit */
+               val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
+               val &= ARMCR4_BCMA_IOCTL_CPUHALT;
+               brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
+                                    ARMCR4_BCMA_IOCTL_CPUHALT);
+               break;
+       default:
+               brcmf_err("unknown id: %u\n", id);
+               break;
+       }
+}
+
+static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
+{
+       struct brcmf_chip *pub;
+       struct brcmf_core_priv *cc;
+       u32 base;
+       u32 val;
+       int ret = 0;
+
+       pub = &chip->pub;
+       cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+       base = cc->pub.base;
+
+       /* get chipcommon capabilites */
+       pub->cc_caps = chip->ops->read32(chip->ctx,
+                                        CORE_CC_REG(base, capabilities));
+
+       /* get pmu caps & rev */
+       if (pub->cc_caps & CC_CAP_PMU) {
+               val = chip->ops->read32(chip->ctx,
+                                       CORE_CC_REG(base, pmucapabilities));
+               pub->pmurev = val & PCAP_REV_MASK;
+               pub->pmucaps = val;
+       }
+
+       brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
+                 cc->pub.rev, pub->pmurev, pub->pmucaps);
+
+       /* execute bus core specific setup */
+       if (chip->ops->setup)
+               ret = chip->ops->setup(chip->ctx, pub);
+
+       /*
+        * Make sure any on-chip ARM is off (in case strapping is wrong),
+        * or downloaded code was already running.
+        */
+       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+       return ret;
+}
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+                                    const struct brcmf_buscore_ops *ops)
+{
+       struct brcmf_chip_priv *chip;
+       int err = 0;
+
+       if (WARN_ON(!ops->read32))
+               err = -EINVAL;
+       if (WARN_ON(!ops->write32))
+               err = -EINVAL;
+       if (WARN_ON(!ops->prepare))
+               err = -EINVAL;
+       if (WARN_ON(!ops->exit_dl))
+               err = -EINVAL;
+       if (err < 0)
+               return ERR_PTR(-EINVAL);
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&chip->cores);
+       chip->num_cores = 0;
+       chip->ops = ops;
+       chip->ctx = ctx;
+
+       err = ops->prepare(ctx);
+       if (err < 0)
+               goto fail;
+
+       err = brcmf_chip_recognition(chip);
+       if (err < 0)
+               goto fail;
+
+       err = brcmf_chip_setup(chip);
+       if (err < 0)
+               goto fail;
+
+       return &chip->pub;
+
+fail:
+       brcmf_chip_detach(&chip->pub);
+       return ERR_PTR(err);
+}
+
+void brcmf_chip_detach(struct brcmf_chip *pub)
+{
+       struct brcmf_chip_priv *chip;
+       struct brcmf_core_priv *core;
+       struct brcmf_core_priv *tmp;
+
+       chip = container_of(pub, struct brcmf_chip_priv, pub);
+       list_for_each_entry_safe(core, tmp, &chip->cores, list) {
+               list_del(&core->list);
+               kfree(core);
+       }
+       kfree(chip);
+}
+
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
+{
+       struct brcmf_chip_priv *chip;
+       struct brcmf_core_priv *core;
+
+       chip = container_of(pub, struct brcmf_chip_priv, pub);
+       list_for_each_entry(core, &chip->cores, list)
+               if (core->pub.id == coreid)
+                       return &core->pub;
+
+       return NULL;
+}
+
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
+{
+       struct brcmf_chip_priv *chip;
+       struct brcmf_core_priv *cc;
+
+       chip = container_of(pub, struct brcmf_chip_priv, pub);
+       cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+       if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
+               return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
+       return &cc->pub;
+}
+
+bool brcmf_chip_iscoreup(struct brcmf_core *pub)
+{
+       struct brcmf_core_priv *core;
+
+       core = container_of(pub, struct brcmf_core_priv, pub);
+       return core->chip->iscoreup(core);
+}
+
+void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
+{
+       struct brcmf_core_priv *core;
+
+       core = container_of(pub, struct brcmf_core_priv, pub);
+       core->chip->coredisable(core, prereset, reset);
+}
+
+void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
+                         u32 postreset)
+{
+       struct brcmf_core_priv *core;
+
+       core = container_of(pub, struct brcmf_core_priv, pub);
+       core->chip->resetcore(core, prereset, reset, postreset);
+}
+
+static void
+brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+{
+       struct brcmf_core *core;
+
+       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+       core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+       brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+                                  D11_BCMA_IOCTL_PHYCLOCKEN,
+                            D11_BCMA_IOCTL_PHYCLOCKEN,
+                            D11_BCMA_IOCTL_PHYCLOCKEN);
+       core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+       brcmf_chip_resetcore(core, 0, 0, 0);
+}
+
+static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+{
+       struct brcmf_core *core;
+
+       core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+       if (!brcmf_chip_iscoreup(core)) {
+               brcmf_err("SOCRAM core is down after reset?\n");
+               return false;
+       }
+
+       chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+
+       core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
+       brcmf_chip_resetcore(core, 0, 0, 0);
+
+       return true;
+}
+
+static inline void
+brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+{
+       struct brcmf_core *core;
+
+       brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+
+       core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+       brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+                                  D11_BCMA_IOCTL_PHYCLOCKEN,
+                            D11_BCMA_IOCTL_PHYCLOCKEN,
+                            D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+       struct brcmf_core *core;
+
+       chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+
+       /* restore ARM */
+       core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
+       brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+       return true;
+}
+
+void brcmf_chip_enter_download(struct brcmf_chip *pub)
+{
+       struct brcmf_chip_priv *chip;
+       struct brcmf_core *arm;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       chip = container_of(pub, struct brcmf_chip_priv, pub);
+       arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+       if (arm) {
+               brcmf_chip_cr4_enterdl(chip);
+               return;
+       }
+
+       brcmf_chip_cm3_enterdl(chip);
+}
+
+bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+{
+       struct brcmf_chip_priv *chip;
+       struct brcmf_core *arm;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       chip = container_of(pub, struct brcmf_chip_priv, pub);
+       arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+       if (arm)
+               return brcmf_chip_cr4_exitdl(chip, rstvec);
+
+       return brcmf_chip_cm3_exitdl(chip);
+}
+
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
+{
+       u32 base, addr, reg, pmu_cc3_mask = ~0;
+       struct brcmf_chip_priv *chip;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       /* old chips with PMU version less than 17 don't support save restore */
+       if (pub->pmurev < 17)
+               return false;
+
+       base = brcmf_chip_get_chipcommon(pub)->base;
+       chip = container_of(pub, struct brcmf_chip_priv, pub);
+
+       switch (pub->chip) {
+       case BCM4354_CHIP_ID:
+               /* explicitly check SR engine enable bit */
+               pmu_cc3_mask = BIT(2);
+               /* fall-through */
+       case BCM43241_CHIP_ID:
+       case BCM4335_CHIP_ID:
+       case BCM4339_CHIP_ID:
+               /* read PMU chipcontrol register 3 */
+               addr = CORE_CC_REG(base, chipcontrol_addr);
+               chip->ops->write32(chip->ctx, addr, 3);
+               addr = CORE_CC_REG(base, chipcontrol_data);
+               reg = chip->ops->read32(chip->ctx, addr);
+               return (reg & pmu_cc3_mask) != 0;
+       default:
+               addr = CORE_CC_REG(base, pmucapabilities_ext);
+               reg = chip->ops->read32(chip->ctx, addr);
+               if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+                       return false;
+
+               addr = CORE_CC_REG(base, retention_ctl);
+               reg = chip->ops->read32(chip->ctx, addr);
+               return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+                              PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+       }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
new file mode 100644 (file)
index 0000000..c32908d
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMF_CHIP_H
+#define BRCMF_CHIP_H
+
+#include <linux/types.h>
+
+#define CORE_CC_REG(base, field) \
+               (base + offsetof(struct chipcregs, field))
+
+/**
+ * struct brcmf_chip - chip level information.
+ *
+ * @chip: chip identifier.
+ * @chiprev: chip revision.
+ * @cc_caps: chipcommon core capabilities.
+ * @pmucaps: PMU capabilities.
+ * @pmurev: PMU revision.
+ * @rambase: RAM base address (only applicable for ARM CR4 chips).
+ * @ramsize: amount of RAM on chip.
+ * @name: string representation of the chip identifier.
+ */
+struct brcmf_chip {
+       u32 chip;
+       u32 chiprev;
+       u32 cc_caps;
+       u32 pmucaps;
+       u32 pmurev;
+       u32 rambase;
+       u32 ramsize;
+       char name[8];
+};
+
+/**
+ * struct brcmf_core - core related information.
+ *
+ * @id: core identifier.
+ * @rev: core revision.
+ * @base: base address of core register space.
+ */
+struct brcmf_core {
+       u16 id;
+       u16 rev;
+       u32 base;
+};
+
+/**
+ * struct brcmf_buscore_ops - buscore specific callbacks.
+ *
+ * @read32: read 32-bit value over bus.
+ * @write32: write 32-bit value over bus.
+ * @prepare: prepare bus for core configuration.
+ * @setup: bus-specific core setup.
+ * @exit_dl: exit download state.
+ *     The callback should use the provided @rstvec when non-zero.
+ */
+struct brcmf_buscore_ops {
+       u32 (*read32)(void *ctx, u32 addr);
+       void (*write32)(void *ctx, u32 addr, u32 value);
+       int (*prepare)(void *ctx);
+       int (*setup)(void *ctx, struct brcmf_chip *chip);
+       void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+};
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+                                    const struct brcmf_buscore_ops *ops);
+void brcmf_chip_detach(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+bool brcmf_chip_iscoreup(struct brcmf_core *core);
+void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
+void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
+                         u32 postreset);
+void brcmf_chip_enter_download(struct brcmf_chip *ci);
+bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
+
+#endif /* BRCMF_AXIDMP_H */
index d4d966beb840b832ecc079a9317d6a3b8baa22d8..7d28cd3850925a7af0a4b9f34c2888afa83e0d03 100644 (file)
@@ -1040,12 +1040,12 @@ void brcmf_detach(struct device *dev)
 
        brcmf_cfg80211_detach(drvr->config);
 
+       brcmf_fws_deinit(drvr);
+
        brcmf_bus_detach(drvr);
 
        brcmf_proto_detach(drvr);
 
-       brcmf_fws_deinit(drvr);
-
        brcmf_debugfs_detach(drvr);
        bus_if->drvr = NULL;
        kfree(drvr);
index ddaa9efd053df3a61e404ca96c60cd82dad3f0f2..13c89a0c4ba7ec63f8d7a33688caafa3d407b9f7 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/sched.h>
 #include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/card.h>
 #include <linux/semaphore.h>
@@ -40,7 +41,7 @@
 #include <brcm_hw_ids.h>
 #include <soc.h>
 #include "sdio_host.h"
-#include "sdio_chip.h"
+#include "chip.h"
 #include "nvram.h"
 
 #define DCMD_RESP_TIMEOUT  2000        /* In milli second */
@@ -112,8 +113,6 @@ struct rte_console {
 #define BRCMF_TXBOUND  20      /* Default for max tx frames in
                                 one scheduling */
 
-#define BRCMF_DEFAULT_TXGLOM_SIZE      32  /* max tx frames in glom chain */
-
 #define BRCMF_TXMINMAX 1       /* Max tx frames if rx still pending */
 
 #define MEMBLOCK       2048    /* Block size used for downloading
@@ -156,6 +155,34 @@ struct rte_console {
 /* manfid tuple length, include tuple, link bytes */
 #define SBSDIO_CIS_MANFID_TUPLE_LEN    6
 
+#define CORE_BUS_REG(base, field) \
+               (base + offsetof(struct sdpcmd_regs, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP               0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT                        0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP               0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ           0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ            0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF     0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL               0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL                        0x80
+#define SBSDIO_CSR_MASK                        0x1F
+#define SBSDIO_AVBITS          (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval)   ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval)    (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+       (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
 /* intstatus */
 #define I_SMB_SW0      (1 << 0)        /* To SB Mail S/W interrupt 0 */
 #define I_SMB_SW1      (1 << 1)        /* To SB Mail S/W interrupt 1 */
@@ -276,7 +303,6 @@ struct rte_console {
 /* Flags for SDH calls */
 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
 
-#define BRCMF_IDLE_IMMEDIATE   (-1)    /* Enter idle immediately */
 #define BRCMF_IDLE_ACTIVE      0       /* Do not request any SD clock change
                                         * when idle
                                         */
@@ -433,10 +459,11 @@ struct brcmf_sdio {
        bool alp_only;          /* Don't use HT clock (ALP only) */
 
        u8 *ctrl_frame_buf;
-       u32 ctrl_frame_len;
+       u16 ctrl_frame_len;
        bool ctrl_frame_stat;
 
-       spinlock_t txqlock;
+       spinlock_t txq_lock;            /* protect bus->txq */
+       struct semaphore tx_seq_lock;   /* protect bus->tx_seq */
        wait_queue_head_t ctrl_wait;
        wait_queue_head_t dcmd_resp_wait;
 
@@ -483,16 +510,58 @@ static const uint max_roundup = 512;
 
 #define ALIGNMENT  4
 
-static int brcmf_sdio_txglomsz = BRCMF_DEFAULT_TXGLOM_SIZE;
-module_param_named(txglomsz, brcmf_sdio_txglomsz, int, 0);
-MODULE_PARM_DESC(txglomsz, "maximum tx packet chain size [SDIO]");
-
 enum brcmf_sdio_frmtype {
        BRCMF_SDIO_FT_NORMAL,
        BRCMF_SDIO_FT_SUPER,
        BRCMF_SDIO_FT_SUB,
 };
 
+#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+       u8 strength;    /* Pad Drive Strength in mA */
+       u8 sel;         /* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+       {32, 0x6},
+       {26, 0x7},
+       {22, 0x4},
+       {16, 0x5},
+       {12, 0x2},
+       {8, 0x3},
+       {4, 0x0},
+       {0, 0x1}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+       {6, 0x7},
+       {5, 0x6},
+       {4, 0x5},
+       {3, 0x4},
+       {2, 0x2},
+       {1, 0x1},
+       {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+       {3, 0x3},
+       {2, 0x2},
+       {1, 0x1},
+       {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+       {16, 0x7},
+       {12, 0x5},
+       {8,  0x3},
+       {4,  0x1}
+};
+
 #define BCM43143_FIRMWARE_NAME         "brcm/brcmfmac43143-sdio.bin"
 #define BCM43143_NVRAM_NAME            "brcm/brcmfmac43143-sdio.txt"
 #define BCM43241B0_FIRMWARE_NAME       "brcm/brcmfmac43241b0-sdio.bin"
@@ -511,6 +580,8 @@ enum brcmf_sdio_frmtype {
 #define BCM43362_NVRAM_NAME            "brcm/brcmfmac43362-sdio.txt"
 #define BCM4339_FIRMWARE_NAME          "brcm/brcmfmac4339-sdio.bin"
 #define BCM4339_NVRAM_NAME             "brcm/brcmfmac4339-sdio.txt"
+#define BCM4354_FIRMWARE_NAME          "brcm/brcmfmac4354-sdio.bin"
+#define BCM4354_NVRAM_NAME             "brcm/brcmfmac4354-sdio.txt"
 
 MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
@@ -530,6 +601,8 @@ MODULE_FIRMWARE(BCM43362_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43362_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4339_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4354_NVRAM_NAME);
 
 struct brcmf_firmware_names {
        u32 chipid;
@@ -555,7 +628,8 @@ static const struct brcmf_firmware_names brcmf_fwname_data[] = {
        { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
        { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) },
        { BCM43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) },
-       { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }
+       { BCM4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) },
+       { BCM4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) }
 };
 
 
@@ -618,27 +692,24 @@ static bool data_ok(struct brcmf_sdio *bus)
  * Reads a register in the SDIO hardware block. This block occupies a series of
  * adresses on the 32 bit backplane bus.
  */
-static int
-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
 {
-       u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+       struct brcmf_core *core;
        int ret;
 
-       *regvar = brcmf_sdiod_regrl(bus->sdiodev,
-                                   bus->ci->c_inf[idx].base + offset, &ret);
+       core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+       *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
 
        return ret;
 }
 
-static int
-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
 {
-       u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+       struct brcmf_core *core;
        int ret;
 
-       brcmf_sdiod_regwl(bus->sdiodev,
-                         bus->ci->c_inf[idx].base + reg_offset,
-                         regval, &ret);
+       core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+       brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
 
        return ret;
 }
@@ -650,16 +721,12 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
        int err = 0;
        int try_cnt = 0;
 
-       brcmf_dbg(TRACE, "Enter\n");
+       brcmf_dbg(TRACE, "Enter: on=%d\n", on);
 
        wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
        /* 1st KSO write goes to AOS wake up core if device is asleep  */
        brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
                          wr_val, &err);
-       if (err) {
-               brcmf_err("SDIO_AOS KSO write error: %d\n", err);
-               return err;
-       }
 
        if (on) {
                /* device WAKEUP through KSO:
@@ -689,18 +756,22 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
                                           &err);
                if (((rd_val & bmask) == cmp_val) && !err)
                        break;
-               brcmf_dbg(SDIO, "KSO wr/rd retry:%d (max: %d) ERR:%x\n",
-                         try_cnt, MAX_KSO_ATTEMPTS, err);
+
                udelay(KSO_WAIT_US);
                brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
                                  wr_val, &err);
        } while (try_cnt++ < MAX_KSO_ATTEMPTS);
 
+       if (try_cnt > 2)
+               brcmf_dbg(SDIO, "try_cnt=%d rd_val=0x%x err=%d\n", try_cnt,
+                         rd_val, err);
+
+       if (try_cnt > MAX_KSO_ATTEMPTS)
+               brcmf_err("max tries: rd_val=0x%x err=%d\n", rd_val, err);
+
        return err;
 }
 
-#define PKT_AVAILABLE()                (intstatus & I_HMB_FRAME_IND)
-
 #define HOSTINTMASK            (I_HMB_SW_MASK | I_CHIPACTIVE)
 
 /* Turn backplane clock on or off */
@@ -799,7 +870,6 @@ static int brcmf_sdio_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
                }
 #endif                         /* defined (DEBUG) */
 
-               bus->activity = true;
        } else {
                clkreq = 0;
 
@@ -899,8 +969,9 @@ static int
 brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
 {
        int err = 0;
-       brcmf_dbg(TRACE, "Enter\n");
-       brcmf_dbg(SDIO, "request %s currently %s\n",
+       u8 clkcsr;
+
+       brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
                  (sleep ? "SLEEP" : "WAKE"),
                  (bus->sleeping ? "SLEEP" : "WAKE"));
 
@@ -917,8 +988,20 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
                            atomic_read(&bus->ipend) > 0 ||
                            (!atomic_read(&bus->fcstate) &&
                            brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
-                           data_ok(bus)))
-                                return -EBUSY;
+                           data_ok(bus))) {
+                                err = -EBUSY;
+                                goto done;
+                       }
+
+                       clkcsr = brcmf_sdiod_regrb(bus->sdiodev,
+                                                  SBSDIO_FUNC1_CHIPCLKCSR,
+                                                  &err);
+                       if ((clkcsr & SBSDIO_CSR_MASK) == 0) {
+                               brcmf_dbg(SDIO, "no clock, set ALP\n");
+                               brcmf_sdiod_regwb(bus->sdiodev,
+                                                 SBSDIO_FUNC1_CHIPCLKCSR,
+                                                 SBSDIO_ALP_AVAIL_REQ, &err);
+                       }
                        err = brcmf_sdio_kso_control(bus, false);
                        /* disable watchdog */
                        if (!err)
@@ -935,7 +1018,7 @@ brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
                } else {
                        brcmf_err("error while changing bus sleep state %d\n",
                                  err);
-                       return err;
+                       goto done;
                }
        }
 
@@ -947,11 +1030,92 @@ end:
        } else {
                brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok);
        }
-
+done:
+       brcmf_dbg(SDIO, "Exit: err=%d\n", err);
        return err;
 
 }
 
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+       return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+                                struct sdpcm_shared *sh)
+{
+       u32 addr;
+       int rv;
+       u32 shaddr = 0;
+       struct sdpcm_shared_le sh_le;
+       __le32 addr_le;
+
+       shaddr = bus->ci->rambase + bus->ramsize - 4;
+
+       /*
+        * Read last word in socram to determine
+        * address of sdpcm_shared structure
+        */
+       sdio_claim_host(bus->sdiodev->func[1]);
+       brcmf_sdio_bus_sleep(bus, false, false);
+       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+       sdio_release_host(bus->sdiodev->func[1]);
+       if (rv < 0)
+               return rv;
+
+       addr = le32_to_cpu(addr_le);
+
+       brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+
+       /*
+        * Check if addr is valid.
+        * NVRAM length at the end of memory should have been overwritten.
+        */
+       if (!brcmf_sdio_valid_shared_address(addr)) {
+                       brcmf_err("invalid sdpcm_shared address 0x%08X\n",
+                                 addr);
+                       return -EINVAL;
+       }
+
+       /* Read hndrte_shared structure */
+       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+                              sizeof(struct sdpcm_shared_le));
+       if (rv < 0)
+               return rv;
+
+       /* Endianness */
+       sh->flags = le32_to_cpu(sh_le.flags);
+       sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+       sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+       sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+       sh->assert_line = le32_to_cpu(sh_le.assert_line);
+       sh->console_addr = le32_to_cpu(sh_le.console_addr);
+       sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+       if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+               brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+                         SDPCM_SHARED_VERSION,
+                         sh->flags & SDPCM_SHARED_VERSION_MASK);
+               return -EPROTO;
+       }
+
+       return 0;
+}
+
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+       struct sdpcm_shared sh;
+
+       if (brcmf_sdio_readshared(bus, &sh) == 0)
+               bus->console_addr = sh.console_addr;
+}
+#else
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
 static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
 {
        u32 intstatus = 0;
@@ -995,6 +1159,12 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
                else
                        brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
                                  bus->sdpcm_ver);
+
+               /*
+                * Retrieve console state address now that firmware should have
+                * updated it.
+                */
+               brcmf_sdio_get_console_addr(bus);
        }
 
        /*
@@ -1083,6 +1253,28 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
        bus->cur_read.len = 0;
 }
 
+static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
+{
+       struct brcmf_sdio_dev *sdiodev = bus->sdiodev;
+       u8 i, hi, lo;
+
+       /* On failure, abort the command and terminate the frame */
+       brcmf_err("sdio error, abort command and terminate frame\n");
+       bus->sdcnt.tx_sderrs++;
+
+       brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
+       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
+       bus->sdcnt.f1regdata++;
+
+       for (i = 0; i < 3; i++) {
+               hi = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL);
+               lo = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL);
+               bus->sdcnt.f1regdata += 2;
+               if ((hi == 0) && (lo == 0))
+                       break;
+       }
+}
+
 /* return total length of buffer chain */
 static uint brcmf_sdio_glom_len(struct brcmf_sdio *bus)
 {
@@ -1955,7 +2147,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
                memcpy(pkt_pad->data,
                       pkt->data + pkt->len - tail_chop,
                       tail_chop);
-               *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+               *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
                skb_trim(pkt, pkt->len - tail_chop);
                skb_trim(pkt_pad, tail_pad + tail_chop);
                __skb_queue_after(pktq, pkt, pkt_pad);
@@ -2003,7 +2195,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
                 * already properly aligned and does not
                 * need an sdpcm header.
                 */
-               if (*(u32 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
+               if (*(u16 *)(pkt_next->cb) & ALIGN_SKB_FLAG)
                        continue;
 
                /* align packet data pointer */
@@ -2037,10 +2229,10 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
                if (BRCMF_BYTES_ON() &&
                    ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
                     (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
-                       brcmf_dbg_hex_dump(true, pkt_next, hd_info.len,
+                       brcmf_dbg_hex_dump(true, pkt_next->data, hd_info.len,
                                           "Tx Frame:\n");
                else if (BRCMF_HDRS_ON())
-                       brcmf_dbg_hex_dump(true, pkt_next,
+                       brcmf_dbg_hex_dump(true, pkt_next->data,
                                           head_pad + bus->tx_hdrlen,
                                           "Tx Header:\n");
        }
@@ -2067,11 +2259,11 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
        u8 *hdr;
        u32 dat_offset;
        u16 tail_pad;
-       u32 dummy_flags, chop_len;
+       u16 dummy_flags, chop_len;
        struct sk_buff *pkt_next, *tmp, *pkt_prev;
 
        skb_queue_walk_safe(pktq, pkt_next, tmp) {
-               dummy_flags = *(u32 *)(pkt_next->cb);
+               dummy_flags = *(u16 *)(pkt_next->cb);
                if (dummy_flags & ALIGN_SKB_FLAG) {
                        chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
                        if (chop_len) {
@@ -2100,7 +2292,6 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
                            uint chan)
 {
        int ret;
-       int i;
        struct sk_buff *pkt_next, *tmp;
 
        brcmf_dbg(TRACE, "Enter\n");
@@ -2113,28 +2304,9 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
        ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
        bus->sdcnt.f2txdata++;
 
-       if (ret < 0) {
-               /* On failure, abort the command and terminate the frame */
-               brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
-                         ret);
-               bus->sdcnt.tx_sderrs++;
+       if (ret < 0)
+               brcmf_sdio_txfail(bus);
 
-               brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
-               brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-                                 SFC_WF_TERM, NULL);
-               bus->sdcnt.f1regdata++;
-
-               for (i = 0; i < 3; i++) {
-                       u8 hi, lo;
-                       hi = brcmf_sdiod_regrb(bus->sdiodev,
-                                              SBSDIO_FUNC1_WFRAMEBCHI, NULL);
-                       lo = brcmf_sdiod_regrb(bus->sdiodev,
-                                              SBSDIO_FUNC1_WFRAMEBCLO, NULL);
-                       bus->sdcnt.f1regdata += 2;
-                       if ((hi == 0) && (lo == 0))
-                               break;
-               }
-       }
        sdio_release_host(bus->sdiodev->func[1]);
 
 done:
@@ -2164,13 +2336,15 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        /* Send frames until the limit or some other event */
        for (cnt = 0; (cnt < maxframes) && data_ok(bus);) {
                pkt_num = 1;
-               __skb_queue_head_init(&pktq);
+               if (down_interruptible(&bus->tx_seq_lock))
+                       return cnt;
                if (bus->txglom)
                        pkt_num = min_t(u8, bus->tx_max - bus->tx_seq,
-                                       brcmf_sdio_txglomsz);
+                                       bus->sdiodev->txglomsz);
                pkt_num = min_t(u32, pkt_num,
                                brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol));
-               spin_lock_bh(&bus->txqlock);
+               __skb_queue_head_init(&pktq);
+               spin_lock_bh(&bus->txq_lock);
                for (i = 0; i < pkt_num; i++) {
                        pkt = brcmu_pktq_mdeq(&bus->txq, tx_prec_map,
                                              &prec_out);
@@ -2178,15 +2352,19 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
                                break;
                        __skb_queue_tail(&pktq, pkt);
                }
-               spin_unlock_bh(&bus->txqlock);
-               if (i == 0)
+               spin_unlock_bh(&bus->txq_lock);
+               if (i == 0) {
+                       up(&bus->tx_seq_lock);
                        break;
+               }
 
                ret = brcmf_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL);
+               up(&bus->tx_seq_lock);
+
                cnt += i;
 
                /* In poll mode, need to check for other events */
-               if (!bus->intr && cnt) {
+               if (!bus->intr) {
                        /* Check device status, signal pending interrupt */
                        sdio_claim_host(bus->sdiodev->func[1]);
                        ret = r_sdreg32(bus, &intstatus,
@@ -2211,6 +2389,68 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
        return cnt;
 }
 
+static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
+{
+       u8 doff;
+       u16 pad;
+       uint retries = 0;
+       struct brcmf_sdio_hdrinfo hd_info = {0};
+       int ret;
+
+       brcmf_dbg(TRACE, "Enter\n");
+
+       /* Back the pointer to make room for bus header */
+       frame -= bus->tx_hdrlen;
+       len += bus->tx_hdrlen;
+
+       /* Add alignment padding (optional for ctl frames) */
+       doff = ((unsigned long)frame % bus->head_align);
+       if (doff) {
+               frame -= doff;
+               len += doff;
+               memset(frame + bus->tx_hdrlen, 0, doff);
+       }
+
+       /* Round send length to next SDIO block */
+       pad = 0;
+       if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
+               pad = bus->blocksize - (len % bus->blocksize);
+               if ((pad > bus->roundup) || (pad >= bus->blocksize))
+                       pad = 0;
+       } else if (len % bus->head_align) {
+               pad = bus->head_align - (len % bus->head_align);
+       }
+       len += pad;
+
+       hd_info.len = len - pad;
+       hd_info.channel = SDPCM_CONTROL_CHANNEL;
+       hd_info.dat_offset = doff + bus->tx_hdrlen;
+       hd_info.seq_num = bus->tx_seq;
+       hd_info.lastfrm = true;
+       hd_info.tail_pad = pad;
+       brcmf_sdio_hdpack(bus, frame, &hd_info);
+
+       if (bus->txglom)
+               brcmf_sdio_update_hwhdr(frame, len);
+
+       brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
+                          frame, len, "Tx Frame:\n");
+       brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
+                          BRCMF_HDRS_ON(),
+                          frame, min_t(u16, len, 16), "TxHdr:\n");
+
+       do {
+               ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
+
+               if (ret < 0)
+                       brcmf_sdio_txfail(bus);
+               else
+                       bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+       } while (ret < 0 && retries++ < TXRETRIES);
+
+       return ret;
+}
+
 static void brcmf_sdio_bus_stop(struct device *dev)
 {
        u32 local_hostintmask;
@@ -2292,21 +2532,29 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
        }
 }
 
+static void atomic_orr(int val, atomic_t *v)
+{
+       int old_val;
+
+       old_val = atomic_read(v);
+       while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
+               old_val = atomic_read(v);
+}
+
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
-       u8 idx;
+       struct brcmf_core *buscore;
        u32 addr;
        unsigned long val;
-       int n, ret;
+       int ret;
 
-       idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
-       addr = bus->ci->c_inf[idx].base +
-              offsetof(struct sdpcmd_regs, intstatus);
+       buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+       addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
 
        val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
        bus->sdcnt.f1regdata++;
        if (ret != 0)
-               val = 0;
+               return ret;
 
        val &= bus->hostintmask;
        atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE));
@@ -2315,13 +2563,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
        if (val) {
                brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
                bus->sdcnt.f1regdata++;
-       }
-
-       if (ret) {
-               atomic_set(&bus->intstatus, 0);
-       } else if (val) {
-               for_each_set_bit(n, &val, 32)
-                       set_bit(n, (unsigned long *)&bus->intstatus.counter);
+               atomic_orr(val, &bus->intstatus);
        }
 
        return ret;
@@ -2331,10 +2573,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 {
        u32 newstatus = 0;
        unsigned long intstatus;
-       uint rxlimit = bus->rxbound;    /* Rx frames to read before resched */
        uint txlimit = bus->txbound;    /* Tx frames to send before resched */
-       uint framecnt = 0;      /* Temporary counter of tx/rx frames */
-       int err = 0, n;
+       uint framecnt;                  /* Temporary counter of tx/rx frames */
+       int err = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -2431,70 +2672,38 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
                intstatus &= ~I_HMB_FRAME_IND;
 
        /* On frame indication, read available frames */
-       if (PKT_AVAILABLE() && bus->clkstate == CLK_AVAIL) {
-               framecnt = brcmf_sdio_readframes(bus, rxlimit);
+       if ((intstatus & I_HMB_FRAME_IND) && (bus->clkstate == CLK_AVAIL)) {
+               brcmf_sdio_readframes(bus, bus->rxbound);
                if (!bus->rxpending)
                        intstatus &= ~I_HMB_FRAME_IND;
-               rxlimit -= min(framecnt, rxlimit);
        }
 
        /* Keep still-pending events for next scheduling */
-       if (intstatus) {
-               for_each_set_bit(n, &intstatus, 32)
-                       set_bit(n, (unsigned long *)&bus->intstatus.counter);
-       }
+       if (intstatus)
+               atomic_orr(intstatus, &bus->intstatus);
 
        brcmf_sdio_clrintr(bus);
 
-       if (data_ok(bus) && bus->ctrl_frame_stat &&
-               (bus->clkstate == CLK_AVAIL)) {
-               int i;
-
-               sdio_claim_host(bus->sdiodev->func[1]);
-               err = brcmf_sdiod_send_buf(bus->sdiodev, bus->ctrl_frame_buf,
-                                          (u32)bus->ctrl_frame_len);
-
-               if (err < 0) {
-                       /* On failure, abort the command and
-                               terminate the frame */
-                       brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
-                                 err);
-                       bus->sdcnt.tx_sderrs++;
-
-                       brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
-
-                       brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-                                         SFC_WF_TERM, &err);
-                       bus->sdcnt.f1regdata++;
-
-                       for (i = 0; i < 3; i++) {
-                               u8 hi, lo;
-                               hi = brcmf_sdiod_regrb(bus->sdiodev,
-                                                      SBSDIO_FUNC1_WFRAMEBCHI,
-                                                      &err);
-                               lo = brcmf_sdiod_regrb(bus->sdiodev,
-                                                      SBSDIO_FUNC1_WFRAMEBCLO,
-                                                      &err);
-                               bus->sdcnt.f1regdata += 2;
-                               if ((hi == 0) && (lo == 0))
-                                       break;
-                       }
+       if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
+           (down_interruptible(&bus->tx_seq_lock) == 0)) {
+               if (data_ok(bus)) {
+                       sdio_claim_host(bus->sdiodev->func[1]);
+                       err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
+                                                     bus->ctrl_frame_len);
+                       sdio_release_host(bus->sdiodev->func[1]);
 
-               } else {
-                       bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
+                       bus->ctrl_frame_stat = false;
+                       brcmf_sdio_wait_event_wakeup(bus);
                }
-               sdio_release_host(bus->sdiodev->func[1]);
-               bus->ctrl_frame_stat = false;
-               brcmf_sdio_wait_event_wakeup(bus);
+               up(&bus->tx_seq_lock);
        }
        /* Send queued frames (limit 1 if rx may still be pending) */
-       else if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
-                brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit
-                && data_ok(bus)) {
+       if ((bus->clkstate == CLK_AVAIL) && !atomic_read(&bus->fcstate) &&
+           brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit &&
+           data_ok(bus)) {
                framecnt = bus->rxpending ? min(txlimit, bus->txminmax) :
                                            txlimit;
-               framecnt = brcmf_sdio_sendfromq(bus, framecnt);
-               txlimit -= framecnt;
+               brcmf_sdio_sendfromq(bus, framecnt);
        }
 
        if (!brcmf_bus_ready(bus->sdiodev->bus_if) || (err != 0)) {
@@ -2504,19 +2713,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
                   atomic_read(&bus->ipend) > 0 ||
                   (!atomic_read(&bus->fcstate) &&
                    brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) &&
-                   data_ok(bus)) || PKT_AVAILABLE()) {
+                   data_ok(bus))) {
                atomic_inc(&bus->dpc_tskcnt);
        }
-
-       /* If we're done for now, turn off clock request. */
-       if ((bus->clkstate != CLK_PENDING)
-           && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
-               bus->activity = false;
-               brcmf_dbg(SDIO, "idle state\n");
-               sdio_claim_host(bus->sdiodev->func[1]);
-               brcmf_sdio_bus_sleep(bus, true, false);
-               sdio_release_host(bus->sdiodev->func[1]);
-       }
 }
 
 static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
@@ -2531,15 +2730,12 @@ static struct pktq *brcmf_sdio_bus_gettxq(struct device *dev)
 static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
 {
        int ret = -EBADE;
-       uint datalen, prec;
+       uint prec;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
-       ulong flags;
 
-       brcmf_dbg(TRACE, "Enter\n");
-
-       datalen = pkt->len;
+       brcmf_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len);
 
        /* Add space for the header */
        skb_push(pkt, bus->tx_hdrlen);
@@ -2553,7 +2749,9 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
        bus->sdcnt.fcqueued++;
 
        /* Priority based enq */
-       spin_lock_irqsave(&bus->txqlock, flags);
+       spin_lock_bh(&bus->txq_lock);
+       /* reset bus_flags in packet cb */
+       *(u16 *)(pkt->cb) = 0;
        if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
                skb_pull(pkt, bus->tx_hdrlen);
                brcmf_err("out of bus->txq !!!\n");
@@ -2566,7 +2764,7 @@ static int brcmf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
                bus->txoff = true;
                brcmf_txflowblock(bus->sdiodev->dev, true);
        }
-       spin_unlock_irqrestore(&bus->txqlock, flags);
+       spin_unlock_bh(&bus->txq_lock);
 
 #ifdef DEBUG
        if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2661,110 +2859,27 @@ break2:
 }
 #endif                         /* DEBUG */
 
-static int brcmf_sdio_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
-{
-       int i;
-       int ret;
-
-       bus->ctrl_frame_stat = false;
-       ret = brcmf_sdiod_send_buf(bus->sdiodev, frame, len);
-
-       if (ret < 0) {
-               /* On failure, abort the command and terminate the frame */
-               brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
-                         ret);
-               bus->sdcnt.tx_sderrs++;
-
-               brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
-
-               brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
-                                 SFC_WF_TERM, NULL);
-               bus->sdcnt.f1regdata++;
-
-               for (i = 0; i < 3; i++) {
-                       u8 hi, lo;
-                       hi = brcmf_sdiod_regrb(bus->sdiodev,
-                                              SBSDIO_FUNC1_WFRAMEBCHI, NULL);
-                       lo = brcmf_sdiod_regrb(bus->sdiodev,
-                                              SBSDIO_FUNC1_WFRAMEBCLO, NULL);
-                       bus->sdcnt.f1regdata += 2;
-                       if (hi == 0 && lo == 0)
-                               break;
-               }
-               return ret;
-       }
-
-       bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
-
-       return ret;
-}
-
 static int
 brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 {
-       u8 *frame;
-       u16 len, pad;
-       uint retries = 0;
-       u8 doff = 0;
-       int ret = -1;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
        struct brcmf_sdio *bus = sdiodev->bus;
-       struct brcmf_sdio_hdrinfo hd_info = {0};
+       int ret = -1;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       /* Back the pointer to make a room for bus header */
-       frame = msg - bus->tx_hdrlen;
-       len = (msglen += bus->tx_hdrlen);
-
-       /* Add alignment padding (optional for ctl frames) */
-       doff = ((unsigned long)frame % bus->head_align);
-       if (doff) {
-               frame -= doff;
-               len += doff;
-               msglen += doff;
-               memset(frame, 0, doff + bus->tx_hdrlen);
-       }
-       /* precondition: doff < bus->head_align */
-       doff += bus->tx_hdrlen;
-
-       /* Round send length to next SDIO block */
-       pad = 0;
-       if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
-               pad = bus->blocksize - (len % bus->blocksize);
-               if ((pad > bus->roundup) || (pad >= bus->blocksize))
-                       pad = 0;
-       } else if (len % bus->head_align) {
-               pad = bus->head_align - (len % bus->head_align);
-       }
-       len += pad;
-
-       /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
-
-       /* Make sure backplane clock is on */
-       sdio_claim_host(bus->sdiodev->func[1]);
-       brcmf_sdio_bus_sleep(bus, false, false);
-       sdio_release_host(bus->sdiodev->func[1]);
-
-       hd_info.len = (u16)msglen;
-       hd_info.channel = SDPCM_CONTROL_CHANNEL;
-       hd_info.dat_offset = doff;
-       hd_info.seq_num = bus->tx_seq;
-       hd_info.lastfrm = true;
-       hd_info.tail_pad = pad;
-       brcmf_sdio_hdpack(bus, frame, &hd_info);
-
-       if (bus->txglom)
-               brcmf_sdio_update_hwhdr(frame, len);
+       if (down_interruptible(&bus->tx_seq_lock))
+               return -EINTR;
 
        if (!data_ok(bus)) {
                brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
                          bus->tx_max, bus->tx_seq);
-               bus->ctrl_frame_stat = true;
+               up(&bus->tx_seq_lock);
                /* Send from dpc */
-               bus->ctrl_frame_buf = frame;
-               bus->ctrl_frame_len = len;
+               bus->ctrl_frame_buf = msg;
+               bus->ctrl_frame_len = msglen;
+               bus->ctrl_frame_stat = true;
 
                wait_event_interruptible_timeout(bus->ctrl_wait,
                                                 !bus->ctrl_frame_stat,
@@ -2775,31 +2890,18 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
                        ret = 0;
                } else {
                        brcmf_dbg(SDIO, "ctrl_frame_stat == true\n");
+                       bus->ctrl_frame_stat = false;
+                       if (down_interruptible(&bus->tx_seq_lock))
+                               return -EINTR;
                        ret = -1;
                }
        }
-
        if (ret == -1) {
-               brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
-                                  frame, len, "Tx Frame:\n");
-               brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() && BRCMF_CTL_ON()) &&
-                                  BRCMF_HDRS_ON(),
-                                  frame, min_t(u16, len, 16), "TxHdr:\n");
-
-               do {
-                       sdio_claim_host(bus->sdiodev->func[1]);
-                       ret = brcmf_sdio_tx_frame(bus, frame, len);
-                       sdio_release_host(bus->sdiodev->func[1]);
-               } while (ret < 0 && retries++ < TXRETRIES);
-       }
-
-       if ((bus->idletime == BRCMF_IDLE_IMMEDIATE) &&
-           atomic_read(&bus->dpc_tskcnt) == 0) {
-               bus->activity = false;
                sdio_claim_host(bus->sdiodev->func[1]);
-               brcmf_dbg(INFO, "idle\n");
-               brcmf_sdio_clkctl(bus, CLK_NONE, true);
+               brcmf_sdio_bus_sleep(bus, false, false);
+               ret = brcmf_sdio_tx_ctrlframe(bus, msg, msglen);
                sdio_release_host(bus->sdiodev->func[1]);
+               up(&bus->tx_seq_lock);
        }
 
        if (ret)
@@ -2811,72 +2913,6 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
 }
 
 #ifdef DEBUG
-static inline bool brcmf_sdio_valid_shared_address(u32 addr)
-{
-       return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
-}
-
-static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
-                                struct sdpcm_shared *sh)
-{
-       u32 addr;
-       int rv;
-       u32 shaddr = 0;
-       struct sdpcm_shared_le sh_le;
-       __le32 addr_le;
-
-       shaddr = bus->ci->rambase + bus->ramsize - 4;
-
-       /*
-        * Read last word in socram to determine
-        * address of sdpcm_shared structure
-        */
-       sdio_claim_host(bus->sdiodev->func[1]);
-       brcmf_sdio_bus_sleep(bus, false, false);
-       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
-       sdio_release_host(bus->sdiodev->func[1]);
-       if (rv < 0)
-               return rv;
-
-       addr = le32_to_cpu(addr_le);
-
-       brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
-
-       /*
-        * Check if addr is valid.
-        * NVRAM length at the end of memory should have been overwritten.
-        */
-       if (!brcmf_sdio_valid_shared_address(addr)) {
-                       brcmf_err("invalid sdpcm_shared address 0x%08X\n",
-                                 addr);
-                       return -EINVAL;
-       }
-
-       /* Read hndrte_shared structure */
-       rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
-                              sizeof(struct sdpcm_shared_le));
-       if (rv < 0)
-               return rv;
-
-       /* Endianness */
-       sh->flags = le32_to_cpu(sh_le.flags);
-       sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
-       sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
-       sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
-       sh->assert_line = le32_to_cpu(sh_le.assert_line);
-       sh->console_addr = le32_to_cpu(sh_le.console_addr);
-       sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
-
-       if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
-               brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
-                         SDPCM_SHARED_VERSION,
-                         sh->flags & SDPCM_SHARED_VERSION_MASK);
-               return -EPROTO;
-       }
-
-       return 0;
-}
-
 static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
                                   struct sdpcm_shared *sh, char __user *data,
                                   size_t count)
@@ -3106,6 +3142,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
        debugfs_create_file("forensics", S_IRUGO, dentry, bus,
                            &brcmf_sdio_forensic_ops);
        brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+       debugfs_create_u32("console_interval", 0644, dentry,
+                          &bus->console_interval);
 }
 #else
 static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@@ -3224,32 +3262,17 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
                                         const struct firmware *fw)
 {
        int err;
-       int offset;
-       int address;
-       int len;
 
        brcmf_dbg(TRACE, "Enter\n");
 
-       err = 0;
-       offset = 0;
-       address = bus->ci->rambase;
-       while (offset < fw->size) {
-               len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
-                     fw->size - offset;
-               err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
-                                       (u8 *)&fw->data[offset], len);
-               if (err) {
-                       brcmf_err("error %d on writing %d membytes at 0x%08x\n",
-                                 err, len, address);
-                       return err;
-               }
-               offset += len;
-               address += len;
-       }
-       if (!err)
-               if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
-                                            (u8 *)fw->data, fw->size))
-                       err = -EIO;
+       err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
+                               (u8 *)fw->data, fw->size);
+       if (err)
+               brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+                         err, (int)fw->size, bus->ci->rambase);
+       else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+                                         (u8 *)fw->data, fw->size))
+               err = -EIO;
 
        return err;
 }
@@ -3292,7 +3315,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
        brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
 
        /* Keep arm in reset */
-       brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+       brcmf_chip_enter_download(bus->ci);
 
        fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
        if (fw == NULL) {
@@ -3324,7 +3347,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
        }
 
        /* Take arm out of reset */
-       if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
+       if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
                brcmf_err("error getting out of ARM core reset\n");
                goto err;
        }
@@ -3339,40 +3362,6 @@ err:
        return bcmerror;
 }
 
-static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
-{
-       u32 addr, reg, pmu_cc3_mask = ~0;
-       int err;
-
-       brcmf_dbg(TRACE, "Enter\n");
-
-       /* old chips with PMU version less than 17 don't support save restore */
-       if (bus->ci->pmurev < 17)
-               return false;
-
-       switch (bus->ci->chip) {
-       case BCM43241_CHIP_ID:
-       case BCM4335_CHIP_ID:
-       case BCM4339_CHIP_ID:
-               /* read PMU chipcontrol register 3 */
-               addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
-               brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
-               addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
-               reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
-               return (reg & pmu_cc3_mask) != 0;
-       default:
-               addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
-               reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
-               if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
-                       return false;
-
-               addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
-               reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
-               return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
-                              PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
-       }
-}
-
 static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
 {
        int err = 0;
@@ -3424,7 +3413,7 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
        brcmf_dbg(TRACE, "Enter\n");
 
        /* KSO bit added in SDIO core rev 12 */
-       if (bus->ci->c_inf[1].rev < 12)
+       if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
                return 0;
 
        val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
@@ -3455,15 +3444,13 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
        struct brcmf_sdio *bus = sdiodev->bus;
        uint pad_size;
        u32 value;
-       u8 idx;
        int err;
 
        /* the commands below use the terms tx and rx from
         * a device perspective, ie. bus:txglom affects the
         * bus transfers from device to host.
         */
-       idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
-       if (bus->ci->c_inf[idx].rev < 12) {
+       if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
                /* for sdio core rev < 12, disable txgloming */
                value = 0;
                err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3570,7 +3557,7 @@ static int brcmf_sdio_bus_init(struct device *dev)
                ret = -ENODEV;
        }
 
-       if (brcmf_sdio_sr_capable(bus)) {
+       if (brcmf_chip_sr_capable(bus->ci)) {
                brcmf_sdio_sr_init(bus);
        } else {
                /* Restore previous clock setting */
@@ -3714,11 +3701,175 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
                                              datawork);
 
        while (atomic_read(&bus->dpc_tskcnt)) {
+               atomic_set(&bus->dpc_tskcnt, 0);
                brcmf_sdio_dpc(bus);
-               atomic_dec(&bus->dpc_tskcnt);
        }
 }
 
+static void
+brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+                            struct brcmf_chip *ci, u32 drivestrength)
+{
+       const struct sdiod_drive_str *str_tab = NULL;
+       u32 str_mask;
+       u32 str_shift;
+       u32 base;
+       u32 i;
+       u32 drivestrength_sel = 0;
+       u32 cc_data_temp;
+       u32 addr;
+
+       if (!(ci->cc_caps & CC_CAP_PMU))
+               return;
+
+       switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+       case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+               str_tab = sdiod_drvstr_tab1_1v8;
+               str_mask = 0x00003800;
+               str_shift = 11;
+               break;
+       case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+               str_tab = sdiod_drvstr_tab6_1v8;
+               str_mask = 0x00001800;
+               str_shift = 11;
+               break;
+       case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+               /* note: 43143 does not support tristate */
+               i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+               if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+                       str_tab = sdiod_drvstr_tab2_3v3;
+                       str_mask = 0x00000007;
+                       str_shift = 0;
+               } else
+                       brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+                                 ci->name, drivestrength);
+               break;
+       case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+               str_tab = sdiod_drive_strength_tab5_1v8;
+               str_mask = 0x00003800;
+               str_shift = 11;
+               break;
+       default:
+               brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+                         ci->name, ci->chiprev, ci->pmurev);
+               break;
+       }
+
+       if (str_tab != NULL) {
+               for (i = 0; str_tab[i].strength != 0; i++) {
+                       if (drivestrength >= str_tab[i].strength) {
+                               drivestrength_sel = str_tab[i].sel;
+                               break;
+                       }
+               }
+               base = brcmf_chip_get_chipcommon(ci)->base;
+               addr = CORE_CC_REG(base, chipcontrol_addr);
+               brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+               cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+               cc_data_temp &= ~str_mask;
+               drivestrength_sel <<= str_shift;
+               cc_data_temp |= drivestrength_sel;
+               brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+
+               brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+                         str_tab[i].strength, drivestrength, cc_data_temp);
+       }
+}
+
+static int brcmf_sdio_buscoreprep(void *ctx)
+{
+       struct brcmf_sdio_dev *sdiodev = ctx;
+       int err = 0;
+       u8 clkval, clkset;
+
+       /* Try forcing SDIO core to do ALPAvail request only */
+       clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+       if (err) {
+               brcmf_err("error writing for HT off\n");
+               return err;
+       }
+
+       /* If register supported, wait for ALPAvail and then force ALP */
+       /* This may take up to 15 milliseconds */
+       clkval = brcmf_sdiod_regrb(sdiodev,
+                                  SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+       if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+               brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+                         clkset, clkval);
+               return -EACCES;
+       }
+
+       SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+                                             SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+                       !SBSDIO_ALPAV(clkval)),
+                       PMU_MAX_TRANSITION_DLY);
+       if (!SBSDIO_ALPAV(clkval)) {
+               brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+                         clkval);
+               return -EBUSY;
+       }
+
+       clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+       udelay(65);
+
+       /* Also, disable the extra SDIO pull-ups */
+       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+       return 0;
+}
+
+static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
+                                     u32 rstvec)
+{
+       struct brcmf_sdio_dev *sdiodev = ctx;
+       struct brcmf_core *core;
+       u32 reg_addr;
+
+       /* clear all interrupts */
+       core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
+       reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
+       brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+
+       if (rstvec)
+               /* Write reset vector to address 0 */
+               brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+                                 sizeof(rstvec));
+}
+
+static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
+{
+       struct brcmf_sdio_dev *sdiodev = ctx;
+       u32 val, rev;
+
+       val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+       if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+           addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+               rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
+               if (rev >= 2) {
+                       val &= ~CID_ID_MASK;
+                       val |= BCM4339_CHIP_ID;
+               }
+       }
+       return val;
+}
+
+static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
+{
+       struct brcmf_sdio_dev *sdiodev = ctx;
+
+       brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+}
+
+static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
+       .prepare = brcmf_sdio_buscoreprep,
+       .exit_dl = brcmf_sdio_buscore_exitdl,
+       .read32 = brcmf_sdio_buscore_read32,
+       .write32 = brcmf_sdio_buscore_write32,
+};
+
 static bool
 brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
 {
@@ -3734,7 +3885,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                 brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
 
        /*
-        * Force PLL off until brcmf_sdio_chip_attach()
+        * Force PLL off until brcmf_chip_attach()
         * programs PLL control regs
         */
 
@@ -3755,8 +3906,10 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
         */
        brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
 
-       if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
-               brcmf_err("brcmf_sdio_chip_attach failed!\n");
+       bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
+       if (IS_ERR(bus->ci)) {
+               brcmf_err("brcmf_chip_attach failed!\n");
+               bus->ci = NULL;
                goto fail;
        }
 
@@ -3769,7 +3922,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                drivestrength = bus->sdiodev->pdata->drive_strength;
        else
                drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
-       brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+       brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
 
        /* Get info on the SOCRAM cores... */
        bus->ramsize = bus->ci->ramsize;
@@ -3792,24 +3945,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                goto fail;
 
        /* set PMUControl so a backplane reset does PMU state reload */
-       reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
+       reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
                               pmucontrol);
-       reg_val = brcmf_sdiod_regrl(bus->sdiodev,
-                                   reg_addr,
-                                   &err);
+       reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
        if (err)
                goto fail;
 
        reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
 
-       brcmf_sdiod_regwl(bus->sdiodev,
-                         reg_addr,
-                         reg_val,
-                         &err);
+       brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
        if (err)
                goto fail;
 
-
        sdio_release_host(bus->sdiodev->func[1]);
 
        brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -3849,6 +3996,7 @@ brcmf_sdio_watchdog_thread(void *data)
                        brcmf_sdio_bus_watchdog(bus);
                        /* Count the tick for reference */
                        bus->sdcnt.tickcnt++;
+                       reinit_completion(&bus->watchdog_wait);
                } else
                        break;
        }
@@ -3925,7 +4073,8 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
        }
 
        spin_lock_init(&bus->rxctl_lock);
-       spin_lock_init(&bus->txqlock);
+       spin_lock_init(&bus->txq_lock);
+       sema_init(&bus->tx_seq_lock, 1);
        init_waitqueue_head(&bus->ctrl_wait);
        init_waitqueue_head(&bus->dcmd_resp_wait);
 
@@ -4024,14 +4173,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
                /* De-register interrupt handler */
                brcmf_sdiod_intr_unregister(bus->sdiodev);
 
-               cancel_work_sync(&bus->datawork);
-               if (bus->brcmf_wq)
-                       destroy_workqueue(bus->brcmf_wq);
-
                if (bus->sdiodev->bus_if->drvr) {
                        brcmf_detach(bus->sdiodev->dev);
                }
 
+               cancel_work_sync(&bus->datawork);
+               if (bus->brcmf_wq)
+                       destroy_workqueue(bus->brcmf_wq);
+
                if (bus->ci) {
                        if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
                                sdio_claim_host(bus->sdiodev->func[1]);
@@ -4042,12 +4191,11 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
                                 * all necessary cores.
                                 */
                                msleep(20);
-                               brcmf_sdio_chip_enter_download(bus->sdiodev,
-                                                              bus->ci);
+                               brcmf_chip_enter_download(bus->ci);
                                brcmf_sdio_clkctl(bus, CLK_NONE, false);
                                sdio_release_host(bus->sdiodev->func[1]);
                        }
-                       brcmf_sdio_chip_detach(&bus->ci);
+                       brcmf_chip_detach(bus->ci);
                }
 
                kfree(bus->rxbuf);
index 22adbe311d206df979a592419d7d247280474ec6..59a5af5bf994d88b3e4f969c8e0633de3bd57d87 100644 (file)
@@ -124,7 +124,8 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
 }
 
 static u32
-brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+brcmf_create_iovar(char *name, const char *data, u32 datalen,
+                  char *buf, u32 buflen)
 {
        u32 len;
 
@@ -144,7 +145,7 @@ brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
 
 
 s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
                         u32 len)
 {
        struct brcmf_pub *drvr = ifp->drvr;
index 77eae86e55c23318e439fa23118f3d10e4b33788..a30be683f4a15eff78c0fa2a5133ed176b4962a0 100644 (file)
@@ -83,7 +83,7 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
 s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
 s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
 
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
                             u32 len);
 s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
                             u32 len);
index af17a5bc8b83bdab2ee9d5a5ff2ec17490cb7603..614e4888504fae4f517389bfb49c4d08a5519924 100644 (file)
 
 #define BRCMF_MAXRATES_IN_SET          16      /* max # of rates in rateset */
 
+/* OBSS Coex Auto/On/Off */
+#define BRCMF_OBSS_COEX_AUTO           (-1)
+#define BRCMF_OBSS_COEX_OFF            0
+#define BRCMF_OBSS_COEX_ON             1
+
 enum brcmf_fil_p2p_if_types {
        BRCMF_FIL_P2P_IF_CLIENT,
        BRCMF_FIL_P2P_IF_GO,
@@ -87,6 +92,11 @@ struct brcmf_fil_bss_enable_le {
        __le32 enable;
 };
 
+struct brcmf_fil_bwcap_le {
+       __le32 band;
+       __le32 bw_cap;
+};
+
 /**
  * struct tdls_iovar - common structure for tdls iovars.
  *
index fc4f98b275d7db6a8519b7c05dcddeb7aa2e2bf1..f3445ac627e48d84ef0391f59b5904beb9bf80f9 100644 (file)
@@ -797,7 +797,8 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg,
                        /* SOCIAL CHANNELS 1, 6, 11 */
                        search_state = WL_P2P_DISC_ST_SEARCH;
                        brcmf_dbg(INFO, "P2P SEARCH PHASE START\n");
-               } else if (dev != NULL && vif->mode == WL_MODE_AP) {
+               } else if (dev != NULL &&
+                          vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
                        /* If you are already a GO, then do SEARCH only */
                        brcmf_dbg(INFO, "Already a GO. Do SEARCH Only\n");
                        search_state = WL_P2P_DISC_ST_SEARCH;
@@ -2256,7 +2257,6 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
        struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
        struct brcmf_cfg80211_vif *vif;
        enum brcmf_fil_p2p_if_types iftype;
-       enum wl_mode mode;
        int err;
 
        if (brcmf_cfg80211_vif_event_armed(cfg))
@@ -2267,11 +2267,9 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
        switch (type) {
        case NL80211_IFTYPE_P2P_CLIENT:
                iftype = BRCMF_FIL_P2P_IF_CLIENT;
-               mode = WL_MODE_BSS;
                break;
        case NL80211_IFTYPE_P2P_GO:
                iftype = BRCMF_FIL_P2P_IF_GO;
-               mode = WL_MODE_AP;
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                return brcmf_p2p_create_p2pdev(&cfg->p2p, wiphy,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
deleted file mode 100644 (file)
index 82bf3c5..0000000
+++ /dev/null
@@ -1,972 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/* ***** SDIO interface chip backplane handle functions ***** */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/ssb/ssb_regs.h>
-#include <linux/bcma/bcma.h>
-
-#include <chipcommon.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include <soc.h>
-#include "dhd_dbg.h"
-#include "sdio_host.h"
-#include "sdio_chip.h"
-
-/* chip core base & ramsize */
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE          0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE       0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE          0x18002000
-#define BCM4329_RAMSIZE                        0x48000
-
-/* bcm43143 */
-/* SDIO device core */
-#define BCM43143_CORE_BUS_BASE         0x18002000
-/* internal memory core */
-#define BCM43143_CORE_SOCRAM_BASE      0x18004000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM43143_CORE_ARM_BASE         0x18003000
-#define BCM43143_RAMSIZE               0x70000
-
-/* All D11 cores, ID 0x812 */
-#define BCM43xx_CORE_D11_BASE          0x18001000
-
-#define        SBCOREREV(sbidh) \
-       ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
-         ((sbidh) & SSB_IDHIGH_RCLO))
-
-/* SOC Interconnect types (aka chip types) */
-#define SOCI_SB                0
-#define SOCI_AI                1
-
-/* EROM CompIdentB */
-#define CIB_REV_MASK           0xff000000
-#define CIB_REV_SHIFT          24
-
-/* ARM CR4 core specific control flag bits */
-#define ARMCR4_BCMA_IOCTL_CPUHALT      0x0020
-
-/* D11 core specific control flag bits */
-#define D11_BCMA_IOCTL_PHYCLOCKEN      0x0004
-#define D11_BCMA_IOCTL_PHYRESET                0x0008
-
-#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
-       u8 strength;    /* Pad Drive Strength in mA */
-       u8 sel;         /* Chip-specific select value */
-};
-/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
-       {32, 0x6},
-       {26, 0x7},
-       {22, 0x4},
-       {16, 0x5},
-       {12, 0x2},
-       {8, 0x3},
-       {4, 0x0},
-       {0, 0x1}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
-        {6, 0x7},
-        {5, 0x6},
-        {4, 0x5},
-        {3, 0x4},
-        {2, 0x2},
-        {1, 0x1},
-        {0, 0x0}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
-static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
-       {3, 0x3},
-       {2, 0x2},
-       {1, 0x1},
-       {0, 0x0} };
-
-/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
-       {16, 0x7},
-       {12, 0x5},
-       {8,  0x3},
-       {4,  0x1}
-};
-
-u8
-brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
-{
-       u8 idx;
-
-       for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
-               if (coreid == ci->c_inf[idx].id)
-                       return idx;
-
-       return BRCMF_MAX_CORENUM;
-}
-
-static u32
-brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
-                     struct brcmf_chip *ci, u16 coreid)
-{
-       u32 regdata;
-       u8 idx;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_SB(ci->c_inf[idx].base, sbidhigh),
-                                   NULL);
-       return SBCOREREV(regdata);
-}
-
-static u32
-brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
-                     struct brcmf_chip *ci, u16 coreid)
-{
-       u8 idx;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
-       return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-static bool
-brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
-                      struct brcmf_chip *ci, u16 coreid)
-{
-       u32 regdata;
-       u8 idx;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-       if (idx == BRCMF_MAX_CORENUM)
-               return false;
-
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-                                   NULL);
-       regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
-                   SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
-       return (SSB_TMSLOW_CLOCK == regdata);
-}
-
-static bool
-brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
-                      struct brcmf_chip *ci, u16 coreid)
-{
-       u32 regdata;
-       u8 idx;
-       bool ret;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-       if (idx == BRCMF_MAX_CORENUM)
-               return false;
-
-       regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
-                                   NULL);
-       ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
-
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
-                                   NULL);
-       ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
-
-       return ret;
-}
-
-static void
-brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
-                         struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
-                         u32 in_resetbits)
-{
-       u32 regdata, base;
-       u8 idx;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-       base = ci->c_inf[idx].base;
-
-       regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
-       if (regdata & SSB_TMSLOW_RESET)
-               return;
-
-       regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
-       if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
-               /*
-                * set target reject and spin until busy is clear
-                * (preserve core-specific bits)
-                */
-               regdata = brcmf_sdiod_regrl(sdiodev,
-                                           CORE_SB(base, sbtmstatelow), NULL);
-               brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
-                                 regdata | SSB_TMSLOW_REJECT, NULL);
-
-               regdata = brcmf_sdiod_regrl(sdiodev,
-                                           CORE_SB(base, sbtmstatelow), NULL);
-               udelay(1);
-               SPINWAIT((brcmf_sdiod_regrl(sdiodev,
-                                           CORE_SB(base, sbtmstatehigh),
-                                           NULL) &
-                         SSB_TMSHIGH_BUSY), 100000);
-
-               regdata = brcmf_sdiod_regrl(sdiodev,
-                                           CORE_SB(base, sbtmstatehigh),
-                                           NULL);
-               if (regdata & SSB_TMSHIGH_BUSY)
-                       brcmf_err("core state still busy\n");
-
-               regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
-                                           NULL);
-               if (regdata & SSB_IDLOW_INITIATOR) {
-                       regdata = brcmf_sdiod_regrl(sdiodev,
-                                                   CORE_SB(base, sbimstate),
-                                                   NULL);
-                       regdata |= SSB_IMSTATE_REJECT;
-                       brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
-                                         regdata, NULL);
-                       regdata = brcmf_sdiod_regrl(sdiodev,
-                                                   CORE_SB(base, sbimstate),
-                                                   NULL);
-                       udelay(1);
-                       SPINWAIT((brcmf_sdiod_regrl(sdiodev,
-                                                   CORE_SB(base, sbimstate),
-                                                   NULL) &
-                                 SSB_IMSTATE_BUSY), 100000);
-               }
-
-               /* set reset and reject while enabling the clocks */
-               regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
-                         SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
-               brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
-                                 regdata, NULL);
-               regdata = brcmf_sdiod_regrl(sdiodev,
-                                           CORE_SB(base, sbtmstatelow), NULL);
-               udelay(10);
-
-               /* clear the initiator reject bit */
-               regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
-                                           NULL);
-               if (regdata & SSB_IDLOW_INITIATOR) {
-                       regdata = brcmf_sdiod_regrl(sdiodev,
-                                                   CORE_SB(base, sbimstate),
-                                                   NULL);
-                       regdata &= ~SSB_IMSTATE_REJECT;
-                       brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
-                                         regdata, NULL);
-               }
-       }
-
-       /* leave reset and reject asserted */
-       brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
-                         (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
-       udelay(1);
-}
-
-static void
-brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
-                         struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
-                         u32 in_resetbits)
-{
-       u8 idx;
-       u32 regdata;
-       u32 wrapbase;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-       if (idx == BRCMF_MAX_CORENUM)
-               return;
-
-       wrapbase = ci->c_inf[idx].wrapbase;
-
-       /* if core is already in reset, just return */
-       regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
-       if ((regdata & BCMA_RESET_CTL_RESET) != 0)
-               return;
-
-       /* configure reset */
-       brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
-                         BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
-       regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-
-       /* put in reset */
-       brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
-                         BCMA_RESET_CTL_RESET, NULL);
-       usleep_range(10, 20);
-
-       /* wait till reset is 1 */
-       SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
-                BCMA_RESET_CTL_RESET, 300);
-
-       /* post reset configure */
-       brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
-                         BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
-       regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-static void
-brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
-                       struct brcmf_chip *ci, u16 coreid,  u32 pre_resetbits,
-                       u32 in_resetbits, u32 post_resetbits)
-{
-       u32 regdata;
-       u8 idx;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-       if (idx == BRCMF_MAX_CORENUM)
-               return;
-
-       /*
-        * Must do the disable sequence first to work for
-        * arbitrary current core state.
-        */
-       brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
-                                 in_resetbits);
-
-       /*
-        * Now do the initialization sequence.
-        * set reset while enabling the clock and
-        * forcing them on throughout the core
-        */
-       brcmf_sdiod_regwl(sdiodev,
-                         CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-                         SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
-                         NULL);
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-                                   NULL);
-       udelay(1);
-
-       /* clear any serror */
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
-                                   NULL);
-       if (regdata & SSB_TMSHIGH_SERR)
-               brcmf_sdiod_regwl(sdiodev,
-                                 CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
-                                 0, NULL);
-
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_SB(ci->c_inf[idx].base, sbimstate),
-                                   NULL);
-       if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
-               brcmf_sdiod_regwl(sdiodev,
-                                 CORE_SB(ci->c_inf[idx].base, sbimstate),
-                                 regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
-                                 NULL);
-
-       /* clear reset and allow it to propagate throughout the core */
-       brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-                         SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-                                   NULL);
-       udelay(1);
-
-       /* leave clock enabled */
-       brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-                         SSB_TMSLOW_CLOCK, NULL);
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
-                                   NULL);
-       udelay(1);
-}
-
-static void
-brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
-                       struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
-                       u32 in_resetbits, u32 post_resetbits)
-{
-       u8 idx;
-       u32 regdata;
-       u32 wrapbase;
-
-       idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-       if (idx == BRCMF_MAX_CORENUM)
-               return;
-
-       wrapbase = ci->c_inf[idx].wrapbase;
-
-       /* must disable first to work for arbitrary current core state */
-       brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
-                                 in_resetbits);
-
-       while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
-              BCMA_RESET_CTL_RESET) {
-               brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
-               usleep_range(40, 60);
-       }
-
-       brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
-                         BCMA_IOCTL_CLK, NULL);
-       regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-#ifdef DEBUG
-/* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
-       u8 core_idx;
-
-       /* check RAM core presence for ARM CM3 core */
-       core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
-       if (BRCMF_MAX_CORENUM != core_idx) {
-               core_idx = brcmf_sdio_chip_getinfidx(ci,
-                                                    BCMA_CORE_INTERNAL_MEM);
-               if (BRCMF_MAX_CORENUM == core_idx) {
-                       brcmf_err("RAM core not provided with ARM CM3 core\n");
-                       return -ENODEV;
-               }
-       }
-
-       /* check RAM base for ARM CR4 core */
-       core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
-       if (BRCMF_MAX_CORENUM != core_idx) {
-               if (ci->rambase == 0) {
-                       brcmf_err("RAM base not provided with ARM CR4 core\n");
-                       return -ENOMEM;
-               }
-       }
-
-       return 0;
-}
-#else  /* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
-       return 0;
-}
-#endif
-
-static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
-                                      struct brcmf_chip *ci)
-{
-       u32 regdata;
-       u32 socitype;
-
-       /* Get CC core rev
-        * Chipid is assume to be at offset 0 from SI_ENUM_BASE
-        * For different chiptypes or old sdio hosts w/o chipcommon,
-        * other ways of recognition should be added here.
-        */
-       regdata = brcmf_sdiod_regrl(sdiodev,
-                                   CORE_CC_REG(SI_ENUM_BASE, chipid),
-                                   NULL);
-       ci->chip = regdata & CID_ID_MASK;
-       ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
-       if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
-           ci->chiprev >= 2)
-               ci->chip = BCM4339_CHIP_ID;
-       socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
-
-       brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
-                 socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
-
-       if (socitype == SOCI_SB) {
-               if (ci->chip != BCM4329_CHIP_ID) {
-                       brcmf_err("SB chip is not supported\n");
-                       return -ENODEV;
-               }
-               ci->iscoreup = brcmf_sdio_sb_iscoreup;
-               ci->corerev = brcmf_sdio_sb_corerev;
-               ci->coredisable = brcmf_sdio_sb_coredisable;
-               ci->resetcore = brcmf_sdio_sb_resetcore;
-
-               ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
-               ci->c_inf[0].base = SI_ENUM_BASE;
-               ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-               ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
-               ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-               ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
-               ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-               ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
-               ci->c_inf[4].id = BCMA_CORE_80211;
-               ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
-               ci->ramsize = BCM4329_RAMSIZE;
-       } else if (socitype == SOCI_AI) {
-               ci->iscoreup = brcmf_sdio_ai_iscoreup;
-               ci->corerev = brcmf_sdio_ai_corerev;
-               ci->coredisable = brcmf_sdio_ai_coredisable;
-               ci->resetcore = brcmf_sdio_ai_resetcore;
-
-               ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
-               ci->c_inf[0].base = SI_ENUM_BASE;
-
-               /* Address of cores for new chips should be added here */
-               switch (ci->chip) {
-               case BCM43143_CHIP_ID:
-                       ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
-                       ci->c_inf[0].cib = 0x2b000000;
-                       ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-                       ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
-                       ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
-                       ci->c_inf[1].cib = 0x18000000;
-                       ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-                       ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
-                       ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
-                       ci->c_inf[2].cib = 0x14000000;
-                       ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-                       ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
-                       ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
-                       ci->c_inf[3].cib = 0x07000000;
-                       ci->c_inf[4].id = BCMA_CORE_80211;
-                       ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
-                       ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
-                       ci->ramsize = BCM43143_RAMSIZE;
-                       break;
-               case BCM43241_CHIP_ID:
-                       ci->c_inf[0].wrapbase = 0x18100000;
-                       ci->c_inf[0].cib = 0x2a084411;
-                       ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-                       ci->c_inf[1].base = 0x18002000;
-                       ci->c_inf[1].wrapbase = 0x18102000;
-                       ci->c_inf[1].cib = 0x0e004211;
-                       ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-                       ci->c_inf[2].base = 0x18004000;
-                       ci->c_inf[2].wrapbase = 0x18104000;
-                       ci->c_inf[2].cib = 0x14080401;
-                       ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-                       ci->c_inf[3].base = 0x18003000;
-                       ci->c_inf[3].wrapbase = 0x18103000;
-                       ci->c_inf[3].cib = 0x07004211;
-                       ci->c_inf[4].id = BCMA_CORE_80211;
-                       ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
-                       ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
-                       ci->ramsize = 0x90000;
-                       break;
-               case BCM4330_CHIP_ID:
-                       ci->c_inf[0].wrapbase = 0x18100000;
-                       ci->c_inf[0].cib = 0x27004211;
-                       ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-                       ci->c_inf[1].base = 0x18002000;
-                       ci->c_inf[1].wrapbase = 0x18102000;
-                       ci->c_inf[1].cib = 0x07004211;
-                       ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-                       ci->c_inf[2].base = 0x18004000;
-                       ci->c_inf[2].wrapbase = 0x18104000;
-                       ci->c_inf[2].cib = 0x0d080401;
-                       ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-                       ci->c_inf[3].base = 0x18003000;
-                       ci->c_inf[3].wrapbase = 0x18103000;
-                       ci->c_inf[3].cib = 0x03004211;
-                       ci->c_inf[4].id = BCMA_CORE_80211;
-                       ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
-                       ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
-                       ci->ramsize = 0x48000;
-                       break;
-               case BCM4334_CHIP_ID:
-                       ci->c_inf[0].wrapbase = 0x18100000;
-                       ci->c_inf[0].cib = 0x29004211;
-                       ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-                       ci->c_inf[1].base = 0x18002000;
-                       ci->c_inf[1].wrapbase = 0x18102000;
-                       ci->c_inf[1].cib = 0x0d004211;
-                       ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-                       ci->c_inf[2].base = 0x18004000;
-                       ci->c_inf[2].wrapbase = 0x18104000;
-                       ci->c_inf[2].cib = 0x13080401;
-                       ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-                       ci->c_inf[3].base = 0x18003000;
-                       ci->c_inf[3].wrapbase = 0x18103000;
-                       ci->c_inf[3].cib = 0x07004211;
-                       ci->c_inf[4].id = BCMA_CORE_80211;
-                       ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
-                       ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
-                       ci->ramsize = 0x80000;
-                       break;
-               case BCM4335_CHIP_ID:
-                       ci->c_inf[0].wrapbase = 0x18100000;
-                       ci->c_inf[0].cib = 0x2b084411;
-                       ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-                       ci->c_inf[1].base = 0x18005000;
-                       ci->c_inf[1].wrapbase = 0x18105000;
-                       ci->c_inf[1].cib = 0x0f004211;
-                       ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
-                       ci->c_inf[2].base = 0x18002000;
-                       ci->c_inf[2].wrapbase = 0x18102000;
-                       ci->c_inf[2].cib = 0x01084411;
-                       ci->c_inf[3].id = BCMA_CORE_80211;
-                       ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
-                       ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
-                       ci->ramsize = 0xc0000;
-                       ci->rambase = 0x180000;
-                       break;
-               case BCM43362_CHIP_ID:
-                       ci->c_inf[0].wrapbase = 0x18100000;
-                       ci->c_inf[0].cib = 0x27004211;
-                       ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-                       ci->c_inf[1].base = 0x18002000;
-                       ci->c_inf[1].wrapbase = 0x18102000;
-                       ci->c_inf[1].cib = 0x0a004211;
-                       ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
-                       ci->c_inf[2].base = 0x18004000;
-                       ci->c_inf[2].wrapbase = 0x18104000;
-                       ci->c_inf[2].cib = 0x08080401;
-                       ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
-                       ci->c_inf[3].base = 0x18003000;
-                       ci->c_inf[3].wrapbase = 0x18103000;
-                       ci->c_inf[3].cib = 0x03004211;
-                       ci->c_inf[4].id = BCMA_CORE_80211;
-                       ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
-                       ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
-                       ci->ramsize = 0x3C000;
-                       break;
-               case BCM4339_CHIP_ID:
-                       ci->c_inf[0].wrapbase = 0x18100000;
-                       ci->c_inf[0].cib = 0x2e084411;
-                       ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
-                       ci->c_inf[1].base = 0x18005000;
-                       ci->c_inf[1].wrapbase = 0x18105000;
-                       ci->c_inf[1].cib = 0x15004211;
-                       ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
-                       ci->c_inf[2].base = 0x18002000;
-                       ci->c_inf[2].wrapbase = 0x18102000;
-                       ci->c_inf[2].cib = 0x04084411;
-                       ci->c_inf[3].id = BCMA_CORE_80211;
-                       ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
-                       ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
-                       ci->ramsize = 0xc0000;
-                       ci->rambase = 0x180000;
-                       break;
-               default:
-                       brcmf_err("AXI chip is not supported\n");
-                       return -ENODEV;
-               }
-       } else {
-               brcmf_err("chip backplane type %u is not supported\n",
-                         socitype);
-               return -ENODEV;
-       }
-
-       return brcmf_sdio_chip_cichk(ci);
-}
-
-static int
-brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
-{
-       int err = 0;
-       u8 clkval, clkset;
-
-       /* Try forcing SDIO core to do ALPAvail request only */
-       clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
-       if (err) {
-               brcmf_err("error writing for HT off\n");
-               return err;
-       }
-
-       /* If register supported, wait for ALPAvail and then force ALP */
-       /* This may take up to 15 milliseconds */
-       clkval = brcmf_sdiod_regrb(sdiodev,
-                                  SBSDIO_FUNC1_CHIPCLKCSR, NULL);
-
-       if ((clkval & ~SBSDIO_AVBITS) != clkset) {
-               brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
-                         clkset, clkval);
-               return -EACCES;
-       }
-
-       SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
-                                             SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
-                       !SBSDIO_ALPAV(clkval)),
-                       PMU_MAX_TRANSITION_DLY);
-       if (!SBSDIO_ALPAV(clkval)) {
-               brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
-                         clkval);
-               return -EBUSY;
-       }
-
-       clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
-       udelay(65);
-
-       /* Also, disable the extra SDIO pull-ups */
-       brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
-       return 0;
-}
-
-static void
-brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
-                            struct brcmf_chip *ci)
-{
-       u32 base = ci->c_inf[0].base;
-
-       /* get chipcommon rev */
-       ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
-
-       /* get chipcommon capabilites */
-       ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
-                                             CORE_CC_REG(base, capabilities),
-                                             NULL);
-
-       /* get pmu caps & rev */
-       if (ci->c_inf[0].caps & CC_CAP_PMU) {
-               ci->pmucaps =
-                       brcmf_sdiod_regrl(sdiodev,
-                                         CORE_CC_REG(base, pmucapabilities),
-                                         NULL);
-               ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
-       }
-
-       ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
-
-       brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
-                 ci->c_inf[0].rev, ci->pmurev,
-                 ci->c_inf[1].rev, ci->c_inf[1].id);
-
-       /*
-        * Make sure any on-chip ARM is off (in case strapping is wrong),
-        * or downloaded code was already running.
-        */
-       ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
-}
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-                          struct brcmf_chip **ci_ptr)
-{
-       int ret;
-       struct brcmf_chip *ci;
-
-       brcmf_dbg(TRACE, "Enter\n");
-
-       ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
-       if (!ci)
-               return -ENOMEM;
-
-       ret = brcmf_sdio_chip_buscoreprep(sdiodev);
-       if (ret != 0)
-               goto err;
-
-       ret = brcmf_sdio_chip_recognition(sdiodev, ci);
-       if (ret != 0)
-               goto err;
-
-       brcmf_sdio_chip_buscoresetup(sdiodev, ci);
-
-       brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
-                         0, NULL);
-       brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
-                         0, NULL);
-
-       *ci_ptr = ci;
-       return 0;
-
-err:
-       kfree(ci);
-       return ret;
-}
-
-void
-brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
-{
-       brcmf_dbg(TRACE, "Enter\n");
-
-       kfree(*ci_ptr);
-       *ci_ptr = NULL;
-}
-
-static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
-{
-       const char *fmt;
-
-       fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
-       snprintf(buf, len, fmt, chipid);
-       return buf;
-}
-
-void
-brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-                                 struct brcmf_chip *ci, u32 drivestrength)
-{
-       const struct sdiod_drive_str *str_tab = NULL;
-       u32 str_mask;
-       u32 str_shift;
-       char chn[8];
-       u32 base = ci->c_inf[0].base;
-       u32 i;
-       u32 drivestrength_sel = 0;
-       u32 cc_data_temp;
-       u32 addr;
-
-       if (!(ci->c_inf[0].caps & CC_CAP_PMU))
-               return;
-
-       switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
-       case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
-               str_tab = sdiod_drvstr_tab1_1v8;
-               str_mask = 0x00003800;
-               str_shift = 11;
-               break;
-       case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
-               str_tab = sdiod_drvstr_tab6_1v8;
-               str_mask = 0x00001800;
-               str_shift = 11;
-               break;
-       case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
-               /* note: 43143 does not support tristate */
-               i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
-               if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
-                       str_tab = sdiod_drvstr_tab2_3v3;
-                       str_mask = 0x00000007;
-                       str_shift = 0;
-               } else
-                       brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
-                                 brcmf_sdio_chip_name(ci->chip, chn, 8),
-                                 drivestrength);
-               break;
-       case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
-               str_tab = sdiod_drive_strength_tab5_1v8;
-               str_mask = 0x00003800;
-               str_shift = 11;
-               break;
-       default:
-               brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
-                         brcmf_sdio_chip_name(ci->chip, chn, 8),
-                         ci->chiprev, ci->pmurev);
-               break;
-       }
-
-       if (str_tab != NULL) {
-               for (i = 0; str_tab[i].strength != 0; i++) {
-                       if (drivestrength >= str_tab[i].strength) {
-                               drivestrength_sel = str_tab[i].sel;
-                               break;
-                       }
-               }
-               addr = CORE_CC_REG(base, chipcontrol_addr);
-               brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
-               cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
-               cc_data_temp &= ~str_mask;
-               drivestrength_sel <<= str_shift;
-               cc_data_temp |= drivestrength_sel;
-               brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
-
-               brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
-                         str_tab[i].strength, drivestrength, cc_data_temp);
-       }
-}
-
-static void
-brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
-                           struct brcmf_chip *ci)
-{
-       ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
-       ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
-                     D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
-                     D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
-       ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
-}
-
-static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
-                                      struct brcmf_chip *ci)
-{
-       u8 core_idx;
-       u32 reg_addr;
-
-       if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
-               brcmf_err("SOCRAM core is down after reset?\n");
-               return false;
-       }
-
-       /* clear all interrupts */
-       core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
-       reg_addr = ci->c_inf[core_idx].base;
-       reg_addr += offsetof(struct sdpcmd_regs, intstatus);
-       brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
-       ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
-
-       return true;
-}
-
-static inline void
-brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
-                           struct brcmf_chip *ci)
-{
-       u8 idx;
-       u32 regdata;
-       u32 wrapbase;
-       idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
-
-       if (idx == BRCMF_MAX_CORENUM)
-               return;
-
-       wrapbase = ci->c_inf[idx].wrapbase;
-       regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-       regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
-       ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
-                     ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
-       ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
-                     D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
-                     D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
-}
-
-static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
-                                      struct brcmf_chip *ci, u32 rstvec)
-{
-       u8 core_idx;
-       u32 reg_addr;
-
-       /* clear all interrupts */
-       core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
-       reg_addr = ci->c_inf[core_idx].base;
-       reg_addr += offsetof(struct sdpcmd_regs, intstatus);
-       brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
-       /* Write reset vector to address 0 */
-       brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
-                         sizeof(rstvec));
-
-       /* restore ARM */
-       ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
-                     0, 0);
-
-       return true;
-}
-
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-                                   struct brcmf_chip *ci)
-{
-       u8 arm_core_idx;
-
-       arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
-       if (BRCMF_MAX_CORENUM != arm_core_idx) {
-               brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
-               return;
-       }
-
-       brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
-}
-
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-                                  struct brcmf_chip *ci, u32 rstvec)
-{
-       u8 arm_core_idx;
-
-       arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
-       if (BRCMF_MAX_CORENUM != arm_core_idx)
-               return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
-
-       return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
deleted file mode 100644 (file)
index fb06143..0000000
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMFMAC_SDIO_CHIP_H_
-#define _BRCMFMAC_SDIO_CHIP_H_
-
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
-               (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
-               (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
-               (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
-/* SDIO function 1 register CHIPCLKCSR */
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP               0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT                        0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP               0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ           0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ            0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF     0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL               0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL                        0x80
-#define SBSDIO_AVBITS          (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval)   ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval)    (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-#define SBSDIO_CLKAV(regval, alponly) \
-       (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
-#define BRCMF_MAX_CORENUM      6
-
-struct brcmf_core {
-       u16 id;
-       u16 rev;
-       u32 base;
-       u32 wrapbase;
-       u32 caps;
-       u32 cib;
-};
-
-struct brcmf_chip {
-       u32 chip;
-       u32 chiprev;
-       /* core info */
-       /* always put chipcommon core at 0, bus core at 1 */
-       struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
-       u32 pmurev;
-       u32 pmucaps;
-       u32 ramsize;
-       u32 rambase;
-       u32 rst_vec;    /* reset vertor for ARM CR4 core */
-
-       bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
-                        u16 coreid);
-       u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
-                        u16 coreid);
-       void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
-                       struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
-                       u32 in_resetbits);
-       void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
-                       struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
-                       u32 in_resetbits, u32 post_resetbits);
-};
-
-struct sbconfig {
-       u32 PAD[2];
-       u32 sbipsflag;  /* initiator port ocp slave flag */
-       u32 PAD[3];
-       u32 sbtpsflag;  /* target port ocp slave flag */
-       u32 PAD[11];
-       u32 sbtmerrloga;        /* (sonics >= 2.3) */
-       u32 PAD;
-       u32 sbtmerrlog; /* (sonics >= 2.3) */
-       u32 PAD[3];
-       u32 sbadmatch3; /* address match3 */
-       u32 PAD;
-       u32 sbadmatch2; /* address match2 */
-       u32 PAD;
-       u32 sbadmatch1; /* address match1 */
-       u32 PAD[7];
-       u32 sbimstate;  /* initiator agent state */
-       u32 sbintvec;   /* interrupt mask */
-       u32 sbtmstatelow;       /* target state */
-       u32 sbtmstatehigh;      /* target state */
-       u32 sbbwa0;             /* bandwidth allocation table0 */
-       u32 PAD;
-       u32 sbimconfiglow;      /* initiator configuration */
-       u32 sbimconfighigh;     /* initiator configuration */
-       u32 sbadmatch0; /* address match0 */
-       u32 PAD;
-       u32 sbtmconfiglow;      /* target configuration */
-       u32 sbtmconfighigh;     /* target configuration */
-       u32 sbbconfig;  /* broadcast configuration */
-       u32 PAD;
-       u32 sbbstate;   /* broadcast state */
-       u32 PAD[3];
-       u32 sbactcnfg;  /* activate configuration */
-       u32 PAD[3];
-       u32 sbflagst;   /* current sbflags */
-       u32 PAD[3];
-       u32 sbidlow;            /* identification */
-       u32 sbidhigh;   /* identification */
-};
-
-/* sdio core registers */
-struct sdpcmd_regs {
-       u32 corecontrol;                /* 0x00, rev8 */
-       u32 corestatus;                 /* rev8 */
-       u32 PAD[1];
-       u32 biststatus;                 /* rev8 */
-
-       /* PCMCIA access */
-       u16 pcmciamesportaladdr;        /* 0x010, rev8 */
-       u16 PAD[1];
-       u16 pcmciamesportalmask;        /* rev8 */
-       u16 PAD[1];
-       u16 pcmciawrframebc;            /* rev8 */
-       u16 PAD[1];
-       u16 pcmciaunderflowtimer;       /* rev8 */
-       u16 PAD[1];
-
-       /* interrupt */
-       u32 intstatus;                  /* 0x020, rev8 */
-       u32 hostintmask;                /* rev8 */
-       u32 intmask;                    /* rev8 */
-       u32 sbintstatus;                /* rev8 */
-       u32 sbintmask;                  /* rev8 */
-       u32 funcintmask;                /* rev4 */
-       u32 PAD[2];
-       u32 tosbmailbox;                /* 0x040, rev8 */
-       u32 tohostmailbox;              /* rev8 */
-       u32 tosbmailboxdata;            /* rev8 */
-       u32 tohostmailboxdata;          /* rev8 */
-
-       /* synchronized access to registers in SDIO clock domain */
-       u32 sdioaccess;                 /* 0x050, rev8 */
-       u32 PAD[3];
-
-       /* PCMCIA frame control */
-       u8 pcmciaframectrl;             /* 0x060, rev8 */
-       u8 PAD[3];
-       u8 pcmciawatermark;             /* rev8 */
-       u8 PAD[155];
-
-       /* interrupt batching control */
-       u32 intrcvlazy;                 /* 0x100, rev8 */
-       u32 PAD[3];
-
-       /* counters */
-       u32 cmd52rd;                    /* 0x110, rev8 */
-       u32 cmd52wr;                    /* rev8 */
-       u32 cmd53rd;                    /* rev8 */
-       u32 cmd53wr;                    /* rev8 */
-       u32 abort;                      /* rev8 */
-       u32 datacrcerror;               /* rev8 */
-       u32 rdoutofsync;                /* rev8 */
-       u32 wroutofsync;                /* rev8 */
-       u32 writebusy;                  /* rev8 */
-       u32 readwait;                   /* rev8 */
-       u32 readterm;                   /* rev8 */
-       u32 writeterm;                  /* rev8 */
-       u32 PAD[40];
-       u32 clockctlstatus;             /* rev8 */
-       u32 PAD[7];
-
-       u32 PAD[128];                   /* DMA engines */
-
-       /* SDIO/PCMCIA CIS region */
-       char cis[512];                  /* 0x400-0x5ff, rev6 */
-
-       /* PCMCIA function control registers */
-       char pcmciafcr[256];            /* 0x600-6ff, rev6 */
-       u16 PAD[55];
-
-       /* PCMCIA backplane access */
-       u16 backplanecsr;               /* 0x76E, rev6 */
-       u16 backplaneaddr0;             /* rev6 */
-       u16 backplaneaddr1;             /* rev6 */
-       u16 backplaneaddr2;             /* rev6 */
-       u16 backplaneaddr3;             /* rev6 */
-       u16 backplanedata0;             /* rev6 */
-       u16 backplanedata1;             /* rev6 */
-       u16 backplanedata2;             /* rev6 */
-       u16 backplanedata3;             /* rev6 */
-       u16 PAD[31];
-
-       /* sprom "size" & "blank" info */
-       u16 spromstatus;                /* 0x7BE, rev2 */
-       u32 PAD[464];
-
-       u16 PAD[0x80];
-};
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-                          struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-                                      struct brcmf_chip *ci,
-                                      u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-                                   struct brcmf_chip *ci);
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-                                  struct brcmf_chip *ci, u32 rstvec);
-
-#endif         /* _BRCMFMAC_SDIO_CHIP_H_ */
index 092e9c8249926ac80a6831f75ec72c818ff86c43..3deab7959a0d9a5c3b22ad757caf4615b283b596 100644 (file)
@@ -180,6 +180,97 @@ struct brcmf_sdio_dev {
        uint max_request_size;
        ushort max_segment_count;
        uint max_segment_size;
+       uint txglomsz;
+       struct sg_table sgtable;
+};
+
+/* sdio core registers */
+struct sdpcmd_regs {
+       u32 corecontrol;                /* 0x00, rev8 */
+       u32 corestatus;                 /* rev8 */
+       u32 PAD[1];
+       u32 biststatus;                 /* rev8 */
+
+       /* PCMCIA access */
+       u16 pcmciamesportaladdr;        /* 0x010, rev8 */
+       u16 PAD[1];
+       u16 pcmciamesportalmask;        /* rev8 */
+       u16 PAD[1];
+       u16 pcmciawrframebc;            /* rev8 */
+       u16 PAD[1];
+       u16 pcmciaunderflowtimer;       /* rev8 */
+       u16 PAD[1];
+
+       /* interrupt */
+       u32 intstatus;                  /* 0x020, rev8 */
+       u32 hostintmask;                /* rev8 */
+       u32 intmask;                    /* rev8 */
+       u32 sbintstatus;                /* rev8 */
+       u32 sbintmask;                  /* rev8 */
+       u32 funcintmask;                /* rev4 */
+       u32 PAD[2];
+       u32 tosbmailbox;                /* 0x040, rev8 */
+       u32 tohostmailbox;              /* rev8 */
+       u32 tosbmailboxdata;            /* rev8 */
+       u32 tohostmailboxdata;          /* rev8 */
+
+       /* synchronized access to registers in SDIO clock domain */
+       u32 sdioaccess;                 /* 0x050, rev8 */
+       u32 PAD[3];
+
+       /* PCMCIA frame control */
+       u8 pcmciaframectrl;             /* 0x060, rev8 */
+       u8 PAD[3];
+       u8 pcmciawatermark;             /* rev8 */
+       u8 PAD[155];
+
+       /* interrupt batching control */
+       u32 intrcvlazy;                 /* 0x100, rev8 */
+       u32 PAD[3];
+
+       /* counters */
+       u32 cmd52rd;                    /* 0x110, rev8 */
+       u32 cmd52wr;                    /* rev8 */
+       u32 cmd53rd;                    /* rev8 */
+       u32 cmd53wr;                    /* rev8 */
+       u32 abort;                      /* rev8 */
+       u32 datacrcerror;               /* rev8 */
+       u32 rdoutofsync;                /* rev8 */
+       u32 wroutofsync;                /* rev8 */
+       u32 writebusy;                  /* rev8 */
+       u32 readwait;                   /* rev8 */
+       u32 readterm;                   /* rev8 */
+       u32 writeterm;                  /* rev8 */
+       u32 PAD[40];
+       u32 clockctlstatus;             /* rev8 */
+       u32 PAD[7];
+
+       u32 PAD[128];                   /* DMA engines */
+
+       /* SDIO/PCMCIA CIS region */
+       char cis[512];                  /* 0x400-0x5ff, rev6 */
+
+       /* PCMCIA function control registers */
+       char pcmciafcr[256];            /* 0x600-6ff, rev6 */
+       u16 PAD[55];
+
+       /* PCMCIA backplane access */
+       u16 backplanecsr;               /* 0x76E, rev6 */
+       u16 backplaneaddr0;             /* rev6 */
+       u16 backplaneaddr1;             /* rev6 */
+       u16 backplaneaddr2;             /* rev6 */
+       u16 backplaneaddr3;             /* rev6 */
+       u16 backplanedata0;             /* rev6 */
+       u16 backplanedata1;             /* rev6 */
+       u16 backplanedata2;             /* rev6 */
+       u16 backplanedata3;             /* rev6 */
+       u16 PAD[31];
+
+       /* sprom "size" & "blank" info */
+       u16 spromstatus;                /* 0x7BE, rev2 */
+       u32 PAD[464];
+
+       u16 PAD[0x80];
 };
 
 /* Register/deregister interrupt handler. */
index d7718a5fa2f0323c46458300f417b7dbd6e46b8e..afb3d15e38ff0379a99c5e2c534be23c57b94e38 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/kernel.h>
 #include <linux/etherdevice.h>
+#include <linux/module.h>
 #include <net/cfg80211.h>
 #include <net/netlink.h>
 
@@ -190,6 +191,7 @@ static struct ieee80211_supported_band __wl_band_2ghz = {
        .n_channels = ARRAY_SIZE(__wl_2ghz_channels),
        .bitrates = wl_g_rates,
        .n_bitrates = wl_g_rates_size,
+       .ht_cap = {IEEE80211_HT_CAP_SUP_WIDTH_20_40, true},
 };
 
 static struct ieee80211_supported_band __wl_band_5ghz_a = {
@@ -251,6 +253,10 @@ struct parsed_vndr_ies {
        struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
 };
 
+static int brcmf_roamoff;
+module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
+MODULE_PARM_DESC(roamoff, "do not use internal roaming engine");
+
 /* Quarter dBm units to mW
  * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153
  * Table is offset so the last entry is largest mW value that fits in
@@ -351,13 +357,11 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
  * triples, returning a pointer to the substring whose first element
  * matches tag
  */
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key)
 {
-       struct brcmf_tlv *elt;
-       int totlen;
-
-       elt = (struct brcmf_tlv *)buf;
-       totlen = buflen;
+       const struct brcmf_tlv *elt = buf;
+       int totlen = buflen;
 
        /* find tagged parameter */
        while (totlen >= TLV_HDR_LEN) {
@@ -378,8 +382,8 @@ struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
  * not update the tlvs buffer pointer/length.
  */
 static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
-                u8 *oui, u32 oui_len, u8 type)
+brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
+                const u8 *oui, u32 oui_len, u8 type)
 {
        /* If the contents match the OUI and the type */
        if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
@@ -401,12 +405,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
 }
 
 static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
+brcmf_find_wpaie(const u8 *parse, u32 len)
 {
-       struct brcmf_tlv *ie;
+       const struct brcmf_tlv *ie;
 
        while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
-               if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+               if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
                                     WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
                        return (struct brcmf_vs_tlv *)ie;
        }
@@ -414,9 +418,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
 }
 
 static struct brcmf_vs_tlv *
-brcmf_find_wpsie(u8 *parse, u32 len)
+brcmf_find_wpsie(const u8 *parse, u32 len)
 {
-       struct brcmf_tlv *ie;
+       const struct brcmf_tlv *ie;
 
        while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
                if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
@@ -491,6 +495,19 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
        return err;
 }
 
+static bool brcmf_is_apmode(struct brcmf_cfg80211_vif *vif)
+{
+       enum nl80211_iftype iftype;
+
+       iftype = vif->wdev.iftype;
+       return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO;
+}
+
+static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
+{
+       return vif->wdev.iftype == NL80211_IFTYPE_ADHOC;
+}
+
 static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy,
                                                     const char *name,
                                                     enum nl80211_iftype type,
@@ -651,7 +668,6 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                          type);
                return -EOPNOTSUPP;
        case NL80211_IFTYPE_ADHOC:
-               vif->mode = WL_MODE_IBSS;
                infra = 0;
                break;
        case NL80211_IFTYPE_STATION:
@@ -667,12 +683,10 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                         */
                        return 0;
                }
-               vif->mode = WL_MODE_BSS;
                infra = 1;
                break;
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
-               vif->mode = WL_MODE_AP;
                ap = 1;
                break;
        default:
@@ -696,7 +710,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
                        err = -EAGAIN;
                        goto done;
                }
-               brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
+               brcmf_dbg(INFO, "IF Type = %s\n", brcmf_is_ibssmode(vif) ?
                          "Adhoc" : "Infra");
        }
        ndev->ieee80211_ptr->iftype = type;
@@ -1340,13 +1354,14 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
 }
 
 static s32
-brcmf_set_set_cipher(struct net_device *ndev,
-                    struct cfg80211_connect_params *sme)
+brcmf_set_wsec_mode(struct net_device *ndev,
+                    struct cfg80211_connect_params *sme, bool mfp)
 {
        struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
        struct brcmf_cfg80211_security *sec;
        s32 pval = 0;
        s32 gval = 0;
+       s32 wsec;
        s32 err = 0;
 
        if (sme->crypto.n_ciphers_pairwise) {
@@ -1398,7 +1413,12 @@ brcmf_set_set_cipher(struct net_device *ndev,
        if (brcmf_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval &&
            sme->privacy)
                pval = AES_ENABLED;
-       err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", pval | gval);
+
+       if (mfp)
+               wsec = pval | gval | MFP_CAPABLE;
+       else
+               wsec = pval | gval;
+       err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec);
        if (err) {
                brcmf_err("error (%d)\n", err);
                return err;
@@ -1562,13 +1582,12 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
        struct ieee80211_channel *chan = sme->channel;
        struct brcmf_join_params join_params;
        size_t join_params_size;
-       struct brcmf_tlv *rsn_ie;
-       struct brcmf_vs_tlv *wpa_ie;
-       void *ie;
+       const struct brcmf_tlv *rsn_ie;
+       const struct brcmf_vs_tlv *wpa_ie;
+       const void *ie;
        u32 ie_len;
        struct brcmf_ext_join_params_le *ext_join_params;
        u16 chanspec;
-
        s32 err = 0;
 
        brcmf_dbg(TRACE, "Enter\n");
@@ -1591,7 +1610,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                        ie_len = wpa_ie->len + TLV_HDR_LEN;
                } else {
                        /* find the RSN_IE */
-                       rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+                       rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
+                                                 sme->ie_len,
                                                  WLAN_EID_RSN);
                        if (rsn_ie) {
                                ie = rsn_ie;
@@ -1636,7 +1656,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
                goto done;
        }
 
-       err = brcmf_set_set_cipher(ndev, sme);
+       err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED);
        if (err) {
                brcmf_err("wl_set_set_cipher failed (%d)\n", err);
                goto done;
@@ -1678,22 +1698,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
        ext_join_params->ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
        memcpy(&ext_join_params->ssid_le.SSID, sme->ssid,
               profile->ssid.SSID_len);
-       /*increase dwell time to receive probe response or detect Beacon
-        * from target AP at a noisy air only during connect command
-        */
-       ext_join_params->scan_le.active_time =
-               cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
-       ext_join_params->scan_le.passive_time =
-               cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+
        /* Set up join scan parameters */
        ext_join_params->scan_le.scan_type = -1;
-       /* to sync with presence period of VSDB GO.
-        * Send probe request more frequently. Probe request will be stopped
-        * when it gets probe response from target AP/GO.
-        */
-       ext_join_params->scan_le.nprobes =
-               cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
-                           BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
        ext_join_params->scan_le.home_time = cpu_to_le32(-1);
 
        if (sme->bssid)
@@ -1706,6 +1713,25 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
 
                ext_join_params->assoc_le.chanspec_list[0] =
                        cpu_to_le16(chanspec);
+               /* Increase dwell time to receive probe response or detect
+                * beacon from target AP at a noisy air only during connect
+                * command.
+                */
+               ext_join_params->scan_le.active_time =
+                       cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS);
+               ext_join_params->scan_le.passive_time =
+                       cpu_to_le32(BRCMF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS);
+               /* To sync with presence period of VSDB GO send probe request
+                * more frequently. Probe request will be stopped when it gets
+                * probe response from target AP/GO.
+                */
+               ext_join_params->scan_le.nprobes =
+                       cpu_to_le32(BRCMF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS /
+                                   BRCMF_SCAN_JOIN_PROBE_INTERVAL_MS);
+       } else {
+               ext_join_params->scan_le.active_time = cpu_to_le32(-1);
+               ext_join_params->scan_le.passive_time = cpu_to_le32(-1);
+               ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
        }
 
        err  = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
@@ -1913,7 +1939,7 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
                brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
                memcpy(key.data, params->key, key.len);
 
-               if ((ifp->vif->mode != WL_MODE_AP) &&
+               if (!brcmf_is_apmode(ifp->vif) &&
                    (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
                        brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
                        memcpy(keybuf, &key.data[24], sizeof(keybuf));
@@ -1981,7 +2007,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
        if (!check_vif_up(ifp->vif))
                return -EIO;
 
-       if (mac_addr) {
+       if (mac_addr &&
+               (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+               (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
                brcmf_dbg(TRACE, "Exit");
                return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
        }
@@ -2010,7 +2038,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
                brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
                break;
        case WLAN_CIPHER_SUITE_TKIP:
-               if (ifp->vif->mode != WL_MODE_AP) {
+               if (!brcmf_is_apmode(ifp->vif)) {
                        brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
                        memcpy(keybuf, &key.data[24], sizeof(keybuf));
                        memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
@@ -2164,12 +2192,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
        s32 err = 0;
        u8 *bssid = profile->bssid;
        struct brcmf_sta_info_le sta_info_le;
+       u32 beacon_period;
+       u32 dtim_period;
 
        brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
        if (!check_vif_up(ifp->vif))
                return -EIO;
 
-       if (ifp->vif->mode == WL_MODE_AP) {
+       if (brcmf_is_apmode(ifp->vif)) {
                memcpy(&sta_info_le, mac, ETH_ALEN);
                err = brcmf_fil_iovar_data_get(ifp, "sta_info",
                                               &sta_info_le,
@@ -2186,7 +2216,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                }
                brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
                          sinfo->inactive_time, sinfo->connected_time);
-       } else if (ifp->vif->mode == WL_MODE_BSS) {
+       } else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) {
                if (memcmp(mac, bssid, ETH_ALEN)) {
                        brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
                                  mac, bssid);
@@ -2218,6 +2248,30 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
                                sinfo->signal = rssi;
                                brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
                        }
+                       err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
+                                                   &beacon_period);
+                       if (err) {
+                               brcmf_err("Could not get beacon period (%d)\n",
+                                         err);
+                               goto done;
+                       } else {
+                               sinfo->bss_param.beacon_interval =
+                                       beacon_period;
+                               brcmf_dbg(CONN, "Beacon peroid %d\n",
+                                         beacon_period);
+                       }
+                       err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
+                                                   &dtim_period);
+                       if (err) {
+                               brcmf_err("Could not get DTIM period (%d)\n",
+                                         err);
+                               goto done;
+                       } else {
+                               sinfo->bss_param.dtim_period = dtim_period;
+                               brcmf_dbg(CONN, "DTIM peroid %d\n",
+                                         dtim_period);
+                       }
+                       sinfo->filled |= STATION_INFO_BSS_PARAM;
                }
        } else
                err = -EPERM;
@@ -2444,18 +2498,13 @@ CleanUp:
        return err;
 }
 
-static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
-{
-       return vif->mode == WL_MODE_IBSS;
-}
-
 static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
                                 struct brcmf_if *ifp)
 {
        struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
        struct brcmf_bss_info_le *bi;
        struct brcmf_ssid *ssid;
-       struct brcmf_tlv *tim;
+       const struct brcmf_tlv *tim;
        u16 beacon_interval;
        u8 dtim_period;
        size_t ie_len;
@@ -3220,8 +3269,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
 }
 
 static s32
-brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
-                    bool is_rsn_ie)
+brcmf_configure_wpaie(struct net_device *ndev,
+                     const struct brcmf_vs_tlv *wpa_ie,
+                     bool is_rsn_ie)
 {
        struct brcmf_if *ifp = netdev_priv(ndev);
        u32 auth = 0; /* d11 open authentication */
@@ -3707,11 +3757,11 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
        s32 ie_offset;
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_if *ifp = netdev_priv(ndev);
-       struct brcmf_tlv *ssid_ie;
+       const struct brcmf_tlv *ssid_ie;
        struct brcmf_ssid_le ssid_le;
        s32 err = -EPERM;
-       struct brcmf_tlv *rsn_ie;
-       struct brcmf_vs_tlv *wpa_ie;
+       const struct brcmf_tlv *rsn_ie;
+       const struct brcmf_vs_tlv *wpa_ie;
        struct brcmf_join_params join_params;
        enum nl80211_iftype dev_role;
        struct brcmf_fil_bss_enable_le bss_enable;
@@ -4220,32 +4270,6 @@ static struct cfg80211_ops wl_cfg80211_ops = {
        CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
 };
 
-static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
-{
-       switch (type) {
-       case NL80211_IFTYPE_AP_VLAN:
-       case NL80211_IFTYPE_WDS:
-       case NL80211_IFTYPE_MONITOR:
-       case NL80211_IFTYPE_MESH_POINT:
-               return -ENOTSUPP;
-       case NL80211_IFTYPE_ADHOC:
-               return WL_MODE_IBSS;
-       case NL80211_IFTYPE_STATION:
-       case NL80211_IFTYPE_P2P_CLIENT:
-               return WL_MODE_BSS;
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_P2P_GO:
-               return WL_MODE_AP;
-       case NL80211_IFTYPE_P2P_DEVICE:
-               return WL_MODE_P2P;
-       case NL80211_IFTYPE_UNSPECIFIED:
-       default:
-               break;
-       }
-
-       return -EINVAL;
-}
-
 static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
 {
        /* scheduled scan settings */
@@ -4370,7 +4394,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
        vif->wdev.wiphy = cfg->wiphy;
        vif->wdev.iftype = type;
 
-       vif->mode = brcmf_nl80211_iftype_to_mode(type);
        vif->pm_block = pm_block;
        vif->roam_off = -1;
 
@@ -4416,7 +4439,9 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
        u32 event = e->event_code;
        u16 flags = e->flags;
 
-       if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
+       if ((event == BRCMF_E_DEAUTH) || (event == BRCMF_E_DEAUTH_IND) ||
+           (event == BRCMF_E_DISASSOC_IND) ||
+           ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
                brcmf_dbg(CONN, "Processing link down\n");
                return true;
        }
@@ -4658,16 +4683,19 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
        struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
        struct net_device *ndev = ifp->ndev;
        struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+       struct ieee80211_channel *chan;
        s32 err = 0;
+       u16 reason;
 
-       if (ifp->vif->mode == WL_MODE_AP) {
+       if (brcmf_is_apmode(ifp->vif)) {
                err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
        } else if (brcmf_is_linkup(e)) {
                brcmf_dbg(CONN, "Linkup\n");
                if (brcmf_is_ibssmode(ifp->vif)) {
+                       chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
                        memcpy(profile->bssid, e->addr, ETH_ALEN);
                        wl_inform_ibss(cfg, ndev, e->addr);
-                       cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
+                       cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
                        clear_bit(BRCMF_VIF_STATUS_CONNECTING,
                                  &ifp->vif->sme_state);
                        set_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -4679,9 +4707,15 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
                if (!brcmf_is_ibssmode(ifp->vif)) {
                        brcmf_bss_connect_done(cfg, ndev, e, false);
                        if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
-                                              &ifp->vif->sme_state))
-                               cfg80211_disconnected(ndev, 0, NULL, 0,
+                                              &ifp->vif->sme_state)) {
+                               reason = 0;
+                               if (((e->event_code == BRCMF_E_DEAUTH_IND) ||
+                                    (e->event_code == BRCMF_E_DISASSOC_IND)) &&
+                                   (e->reason != WLAN_REASON_UNSPECIFIED))
+                                       reason = e->reason;
+                               cfg80211_disconnected(ndev, reason, NULL, 0,
                                                      GFP_KERNEL);
+                       }
                }
                brcmf_link_down(ifp->vif);
                brcmf_init_prof(ndev_to_prof(ndev));
@@ -4875,11 +4909,8 @@ static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
 
        cfg->scan_request = NULL;
        cfg->pwr_save = true;
-       cfg->roam_on = true;    /* roam on & off switch.
-                                we enable roam per default */
-       cfg->active_scan = true;        /* we do active scan for
-                                specific scan per default */
-       cfg->dongle_up = false; /* dongle is not up yet */
+       cfg->active_scan = true;        /* we do active scan per default */
+       cfg->dongle_up = false;         /* dongle is not up yet */
        err = brcmf_init_priv_mem(cfg);
        if (err)
                return err;
@@ -4904,6 +4935,30 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
        mutex_init(&event->vif_event_lock);
 }
 
+static int brcmf_enable_bw40_2g(struct brcmf_if *ifp)
+{
+       struct brcmf_fil_bwcap_le band_bwcap;
+       u32 val;
+       int err;
+
+       /* verify support for bw_cap command */
+       val = WLC_BAND_5G;
+       err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &val);
+
+       if (!err) {
+               /* only set 2G bandwidth using bw_cap command */
+               band_bwcap.band = cpu_to_le32(WLC_BAND_2G);
+               band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT);
+               err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap,
+                                              sizeof(band_bwcap));
+       } else {
+               brcmf_dbg(INFO, "fallback to mimo_bw_cap\n");
+               val = WLC_N_BW_40ALL;
+               err = brcmf_fil_iovar_int_set(ifp, "mimo_bw_cap", val);
+       }
+       return err;
+}
+
 struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                                                  struct device *busdev)
 {
@@ -4961,6 +5016,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                goto cfg80211_p2p_attach_out;
        }
 
+       /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
+        * setup 40MHz in 2GHz band and enable OBSS scanning.
+        */
+       if (wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap &
+           IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
+               err = brcmf_enable_bw40_2g(ifp);
+               if (!err)
+                       err = brcmf_fil_iovar_int_set(ifp, "obss_coex",
+                                                     BRCMF_OBSS_COEX_AUTO);
+       }
+
        err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
        if (err) {
                brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
@@ -4999,7 +5065,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
 }
 
 static s32
-brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
+brcmf_dongle_roam(struct brcmf_if *ifp, u32 bcn_timeout)
 {
        s32 err = 0;
        __le32 roamtrigger[2];
@@ -5009,7 +5075,7 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
         * Setup timeout if Beacons are lost and roam is
         * off to report link down
         */
-       if (roamvar) {
+       if (brcmf_roamoff) {
                err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
                if (err) {
                        brcmf_err("bcn_timeout error (%d)\n", err);
@@ -5021,8 +5087,9 @@ brcmf_dongle_roam(struct brcmf_if *ifp, u32 roamvar, u32 bcn_timeout)
         * Enable/Disable built-in roaming to allow supplicant
         * to take care of roaming
         */
-       brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On");
-       err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
+       brcmf_dbg(INFO, "Internal Roaming = %s\n",
+                 brcmf_roamoff ? "Off" : "On");
+       err = brcmf_fil_iovar_int_set(ifp, "roam_off", !!(brcmf_roamoff));
        if (err) {
                brcmf_err("roam_off error (%d)\n", err);
                goto dongle_rom_out;
@@ -5164,9 +5231,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
                                ieee80211_channel_to_frequency(ch.chnum, band);
                        band_chan_arr[index].hw_value = ch.chnum;
 
-                       brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
-                                 ch.chnum, band_chan_arr[index].center_freq,
-                                 ch.bw, ch.sb);
                        if (ch.bw == BRCMU_CHAN_BW_40) {
                                /* assuming the order is HT20, HT40 Upper,
                                 * HT40 lower from chanspecs
@@ -5267,6 +5331,8 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
        u32 band_list[3];
        u32 nmode;
        u32 bw_cap[2] = { 0, 0 };
+       u32 rxchain;
+       u32 nchain;
        s8 phy;
        s32 err;
        u32 nband;
@@ -5303,6 +5369,16 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
        brcmf_dbg(INFO, "nmode=%d, bw_cap=(%d, %d)\n", nmode,
                  bw_cap[IEEE80211_BAND_2GHZ], bw_cap[IEEE80211_BAND_5GHZ]);
 
+       err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
+       if (err) {
+               brcmf_err("rxchain error (%d)\n", err);
+               nchain = 1;
+       } else {
+               for (nchain = 0; rxchain; nchain++)
+                       rxchain = rxchain & (rxchain - 1);
+       }
+       brcmf_dbg(INFO, "nchain=%d\n", nchain);
+
        err = brcmf_construct_reginfo(cfg, bw_cap);
        if (err) {
                brcmf_err("brcmf_construct_reginfo failed (%d)\n", err);
@@ -5331,10 +5407,7 @@ static s32 brcmf_update_wiphybands(struct brcmf_cfg80211_info *cfg)
                band->ht_cap.ht_supported = true;
                band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
                band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
-               /* An HT shall support all EQM rates for one spatial
-                * stream
-                */
-               band->ht_cap.mcs.rx_mask[0] = 0xff;
+               memset(band->ht_cap.mcs.rx_mask, 0xff, nchain);
                band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
                bands[band->band] = band;
        }
@@ -5381,7 +5454,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
        brcmf_dbg(INFO, "power save set to %s\n",
                  (power_mode ? "enabled" : "disabled"));
 
-       err = brcmf_dongle_roam(ifp, (cfg->roam_on ? 0 : 1), WL_BEACON_TIMEOUT);
+       err = brcmf_dongle_roam(ifp, WL_BEACON_TIMEOUT);
        if (err)
                goto default_conf_out;
        err = brcmf_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype,
index 2dc6a074e8ede14c4d3709a2d899c390e1bd421c..283c525a44f759d7995d882df93fdb700360abed 100644 (file)
@@ -89,21 +89,6 @@ enum brcmf_scan_status {
        BRCMF_SCAN_STATUS_SUPPRESS,
 };
 
-/**
- * enum wl_mode - driver mode of virtual interface.
- *
- * @WL_MODE_BSS: connects to BSS.
- * @WL_MODE_IBSS: operate as ad-hoc.
- * @WL_MODE_AP: operate as access-point.
- * @WL_MODE_P2P: provide P2P discovery.
- */
-enum wl_mode {
-       WL_MODE_BSS,
-       WL_MODE_IBSS,
-       WL_MODE_AP,
-       WL_MODE_P2P
-};
-
 /* dongle configuration */
 struct brcmf_cfg80211_conf {
        u32 frag_threshold;
@@ -193,7 +178,6 @@ struct vif_saved_ie {
  * @ifp: lower layer interface pointer
  * @wdev: wireless device.
  * @profile: profile information.
- * @mode: operating mode.
  * @roam_off: roaming state.
  * @sme_state: SME state using enum brcmf_vif_status bits.
  * @pm_block: power-management blocked.
@@ -204,7 +188,6 @@ struct brcmf_cfg80211_vif {
        struct brcmf_if *ifp;
        struct wireless_dev wdev;
        struct brcmf_cfg80211_profile profile;
-       s32 mode;
        s32 roam_off;
        unsigned long sme_state;
        bool pm_block;
@@ -402,7 +385,6 @@ struct brcmf_cfg80211_info {
        bool ibss_starter;
        bool pwr_save;
        bool dongle_up;
-       bool roam_on;
        bool scan_tried;
        u8 *dcmd_buf;
        u8 *extra_buf;
@@ -491,7 +473,8 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
 s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
                          const u8 *vndr_ie_buf, u32 vndr_ie_len);
 s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key);
 u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
                        struct ieee80211_channel *ch);
 u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
index 925034b80e9cf68f14c8af4fca29b8aafdeda433..8c5fa4e581392d73d28ba29b790151be8f37f569 100644 (file)
@@ -426,6 +426,12 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        bool blocked;
        int err;
 
+       if (!wl->ucode.bcm43xx_bomminor) {
+               err = brcms_request_fw(wl, wl->wlc->hw->d11core);
+               if (err)
+                       return -ENOENT;
+       }
+
        ieee80211_wake_queues(hw);
        spin_lock_bh(&wl->lock);
        blocked = brcms_rfkill_set_hw_state(wl);
@@ -433,14 +439,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
        if (!blocked)
                wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
 
-       if (!wl->ucode.bcm43xx_bomminor) {
-               err = brcms_request_fw(wl, wl->wlc->hw->d11core);
-               if (err) {
-                       brcms_remove(wl->wlc->hw->d11core);
-                       return -ENOENT;
-               }
-       }
-
        spin_lock_bh(&wl->lock);
        /* avoid acknowledging frames before a non-monitor device is added */
        wl->mute_tx = true;
@@ -1094,12 +1092,6 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
  * Attach to the WL device identified by vendor and device parameters.
  * regs is a host accessible memory address pointing to WL device registers.
  *
- * brcms_attach is not defined as static because in the case where no bus
- * is defined, wl_attach will never be called, and thus, gcc will issue
- * a warning that this function is defined but not used if we declare
- * it as static.
- *
- *
  * is called in brcms_bcma_probe() context, therefore no locking required.
  */
 static struct brcms_info *brcms_attach(struct bcma_device *pdev)
index 6fa5d4863782ea2575089583270186c015b810fa..d816270db3be56cd7af0ec130fc6bf74b9aa0b69 100644 (file)
@@ -43,5 +43,6 @@
 #define BCM4335_CHIP_ID                0x4335
 #define BCM43362_CHIP_ID       43362
 #define BCM4339_CHIP_ID                0x4339
+#define BCM4354_CHIP_ID                0x4354
 
 #endif                         /* _BRCM_HW_IDS_H_ */
index 7ca2aa1035b2101d1d8129c1ca480724863dd507..74419d4bd123772f94e222326c61ee4e93f0f9d3 100644 (file)
@@ -217,6 +217,9 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec)
 #define WSEC_SWFLAG            0x0008
 /* to go into transition mode without setting wep */
 #define SES_OW_ENABLED         0x0040
+/* MFP */
+#define MFP_CAPABLE            0x0200
+#define MFP_REQUIRED           0x0400
 
 /* WPA authentication mode bitvec */
 #define WPA_AUTH_DISABLED      0x0000  /* Legacy (i.e., non-WPA) */
index 5a9ffd3a6a6caa0d1c98a27cc238de4e8e5047cb..e23d67e0bfe66806f1498e81fbcb82c4b3675a40 100644 (file)
@@ -202,8 +202,8 @@ static int cw1200_load_firmware_cw1200(struct cw1200_common *priv)
                }
 
                /* calculate the block size */
-               tx_size = block_size = min((size_t)(firmware->size - put),
-                       (size_t)DOWNLOAD_BLOCK_SIZE);
+               tx_size = block_size = min_t(size_t, firmware->size - put,
+                                       DOWNLOAD_BLOCK_SIZE);
 
                memcpy(buf, &firmware->data[put], block_size);
                if (block_size < DOWNLOAD_BLOCK_SIZE) {
index 9f825f2620da749ca2d8090c55171eb021bcf77a..b6ec51923b203afb589e49738a13d87a6c37a836 100644 (file)
@@ -677,6 +677,8 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
        PCMCIA_DEVICE_PROD_ID12(
                "ZoomAir 11Mbps High", "Rate wireless Networking",
                0x273fe3db, 0x32a1eaee),
+       PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card",
+               0xa37434e9, 0x9762e8f1),
        PCMCIA_DEVICE_PROD_ID123(
                "Pretec", "CompactWLAN Card 802.11b", "2.5",
                0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
index 3aba49259ef1886d09157559977cb7f72ac1d944..dfc6dfc56d52fa8029beb5e25826c61908b02286 100644 (file)
@@ -7065,7 +7065,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
        if (wrqu->data.length > IW_ESSID_MAX_SIZE)
                return -E2BIG;
 
-       wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+       wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
        memset(priv->nick, 0, sizeof(priv->nick));
        memcpy(priv->nick, extra, wrqu->data.length);
 
index 139326065bd923aad89a867774168480f7a83d0c..c5aa404069f3b14d4c9bba36987faeed088cba58 100644 (file)
@@ -9169,7 +9169,7 @@ static int ipw_wx_set_nick(struct net_device *dev,
        if (wrqu->data.length > IW_ESSID_MAX_SIZE)
                return -E2BIG;
        mutex_lock(&priv->mutex);
-       wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
+       wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
        memset(priv->nick, 0, sizeof(priv->nick));
        memcpy(priv->nick, extra, wrqu->data.length);
        IPW_DEBUG_TRACE("<<\n");
index 0487461ae4da22053607bb49178e296787e18dc3..dc1d20cf64ee9b04b83b7d28495b696a44694317 100644 (file)
@@ -1248,14 +1248,7 @@ il3945_rx_handle(struct il_priv *il)
                len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
                len += sizeof(u32);     /* account for status word */
 
-               /* Reclaim a command buffer only if this packet is a response
-                *   to a (driver-originated) command.
-                * If the packet (e.g. Rx frame) originated from uCode,
-                *   there is no command buffer to reclaim.
-                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-                *   but apparently a few don't get set; catch them here. */
-               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
-                   pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+               reclaim = il_need_reclaim(il, pkt);
 
                /* Based on type of command response or notification,
                 *   handle those that need handling via function in
@@ -1495,12 +1488,14 @@ il3945_irq_tasklet(struct il_priv *il)
        if (inta & CSR_INT_BIT_WAKEUP) {
                D_ISR("Wakeup interrupt\n");
                il_rx_queue_update_write_ptr(il, &il->rxq);
+
+               spin_lock_irqsave(&il->lock, flags);
                il_txq_update_write_ptr(il, &il->txq[0]);
                il_txq_update_write_ptr(il, &il->txq[1]);
                il_txq_update_write_ptr(il, &il->txq[2]);
                il_txq_update_write_ptr(il, &il->txq[3]);
                il_txq_update_write_ptr(il, &il->txq[4]);
-               il_txq_update_write_ptr(il, &il->txq[5]);
+               spin_unlock_irqrestore(&il->lock, flags);
 
                il->isr_stats.wakeup++;
                handled |= CSR_INT_BIT_WAKEUP;
index 9a45f6f626f69633c938bd386242cdacff6496e7..76b0729ade17ee2d18cc42df934f286762c2c155 100644 (file)
@@ -891,8 +891,7 @@ il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
 {
 }
 
-static struct rate_control_ops rs_ops = {
-       .module = NULL,
+static const struct rate_control_ops rs_ops = {
        .name = RS_NAME,
        .tx_status = il3945_rs_tx_status,
        .get_rate = il3945_rs_get_rate,
index 43f488a8cda21790a646dcd4efa4ffd04d445c96..888ad5c74639e351a3727c8b934a68f7969849e5 100644 (file)
@@ -92,7 +92,6 @@ il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
  * EEPROM
  */
 struct il_mod_params il4965_mod_params = {
-       .amsdu_size_8K = 1,
        .restart_fw = 1,
        /* the rest are 0 by default */
 };
@@ -4274,17 +4273,7 @@ il4965_rx_handle(struct il_priv *il)
                len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
                len += sizeof(u32);     /* account for status word */
 
-               /* Reclaim a command buffer only if this packet is a response
-                *   to a (driver-originated) command.
-                * If the packet (e.g. Rx frame) originated from uCode,
-                *   there is no command buffer to reclaim.
-                * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
-                *   but apparently a few don't get set; catch them here. */
-               reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
-                   (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
-                   (pkt->hdr.cmd != N_RX_MPDU) &&
-                   (pkt->hdr.cmd != N_COMPRESSED_BA) &&
-                   (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+               reclaim = il_need_reclaim(il, pkt);
 
                /* Based on type of command response or notification,
                 *   handle those that need handling via function in
@@ -6876,6 +6865,6 @@ module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
 MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
 module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
                   S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
 module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
 MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
index 4d5e33259ca894d66fe2edc997b8d3ed6580467b..eaaeea19d8c5bcc99d887ee7b3fd9c592b897045 100644 (file)
@@ -2807,8 +2807,7 @@ il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
 {
 }
 
-static struct rate_control_ops rs_4965_ops = {
-       .module = NULL,
+static const struct rate_control_ops rs_4965_ops = {
        .name = IL4965_RS_NAME,
        .tx_status = il4965_rs_tx_status,
        .get_rate = il4965_rs_get_rate,
index 048421511988afb12892bc7f9951d07f90d5d85c..dd744135c9566fcc8bf9877c37ba19762bccb27c 100644 (file)
@@ -2270,7 +2270,8 @@ struct il_spectrum_notification {
  */
 #define IL_POWER_VEC_SIZE 5
 
-#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK        cpu_to_le16(BIT(0))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK                cpu_to_le16(BIT(0))
+#define IL_POWER_SLEEP_OVER_DTIM_MSK           cpu_to_le16(BIT(2))
 #define IL_POWER_PCI_PM_MSK                    cpu_to_le16(BIT(3))
 
 struct il3945_powertable_cmd {
index 02e8233ccf29865e988eed0be9036c7dfc2179c8..4f42174d999412102e273744fc39ff692b9a9234 100644 (file)
@@ -1078,29 +1078,82 @@ EXPORT_SYMBOL(il_get_channel_info);
  * Setting power level allows the card to go to sleep when not busy.
  *
  * We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
+ * we get from mac80211.
  */
 
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct il_power_vec_entry {
-       struct il_powertable_cmd cmd;
-       u8 no_dtim;             /* number of skip dtim */
-};
+#define SLP_VEC(X0, X1, X2, X3, X4) { \
+               cpu_to_le32(X0), \
+               cpu_to_le32(X1), \
+               cpu_to_le32(X2), \
+               cpu_to_le32(X3), \
+               cpu_to_le32(X4)  \
+}
 
 static void
-il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
 {
+       const __le32 interval[3][IL_POWER_VEC_SIZE] = {
+               SLP_VEC(2, 2, 4, 6, 0xFF),
+               SLP_VEC(2, 4, 7, 10, 10),
+               SLP_VEC(4, 7, 10, 10, 0xFF)
+       };
+       int i, dtim_period, no_dtim;
+       u32 max_sleep;
+       bool skip;
+
        memset(cmd, 0, sizeof(*cmd));
 
        if (il->power_data.pci_pm)
                cmd->flags |= IL_POWER_PCI_PM_MSK;
 
-       D_POWER("Sleep command for CAM\n");
+       /* if no Power Save, we are done */
+       if (il->power_data.ps_disabled)
+               return;
+
+       cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
+       cmd->keep_alive_seconds = 0;
+       cmd->debug_flags = 0;
+       cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
+       cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
+       cmd->keep_alive_beacons = 0;
+
+       dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
+
+       if (dtim_period <= 2) {
+               memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
+               no_dtim = 2;
+       } else if (dtim_period <= 10) {
+               memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
+               no_dtim = 2;
+       } else {
+               memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
+               no_dtim = 0;
+       }
+
+       if (dtim_period == 0) {
+               dtim_period = 1;
+               skip = false;
+       } else {
+               skip = !!no_dtim;
+       }
+
+       if (skip) {
+               __le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
+
+               max_sleep = le32_to_cpu(tmp);
+               if (max_sleep == 0xFF)
+                       max_sleep = dtim_period * (skip + 1);
+               else if (max_sleep >  dtim_period)
+                       max_sleep = (max_sleep / dtim_period) * dtim_period;
+               cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
+       } else {
+               max_sleep = dtim_period;
+               cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
+       }
+
+       for (i = 0; i < IL_POWER_VEC_SIZE; i++)
+               if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
+                       cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
 }
 
 static int
@@ -1173,7 +1226,8 @@ il_power_update_mode(struct il_priv *il, bool force)
 {
        struct il_powertable_cmd cmd;
 
-       il_power_sleep_cam_cmd(il, &cmd);
+       il_build_powertable_cmd(il, &cmd);
+
        return il_power_set_mode(il, &cmd, force);
 }
 EXPORT_SYMBOL(il_power_update_mode);
@@ -5081,6 +5135,7 @@ set_ch_out:
        }
 
        if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+               il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
                ret = il_power_update_mode(il, false);
                if (ret)
                        D_MAC80211("Error setting sleep level\n");
index ad123d66ab6c5c13e2bd120df7b3c2b412d1400a..dfb13c70efe83ea8415f93ef2dad9c97a0cb6891 100644 (file)
@@ -1123,6 +1123,7 @@ struct il_power_mgr {
        struct il_powertable_cmd sleep_cmd_next;
        int debug_sleep_level_override;
        bool pci_pm;
+       bool ps_disabled;
 };
 
 struct il_priv {
@@ -1597,7 +1598,7 @@ struct il_mod_params {
        int disable_hw_scan;    /* def: 0 = use h/w scan */
        int num_of_queues;      /* def: HW dependent */
        int disable_11n;        /* def: 0 = 11n capabilities enabled */
-       int amsdu_size_8K;      /* def: 1 = enable 8K amsdu size */
+       int amsdu_size_8K;      /* def: 0 = disable 8K amsdu size */
        int antenna;            /* def: 0 = both antennas (use diversity) */
        int restart_fw;         /* def: 1 = restart firmware */
 };
@@ -1978,6 +1979,20 @@ void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
 u32 il_read_targ_mem(struct il_priv *il, u32 addr);
 void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
 
+static inline bool il_need_reclaim(struct il_priv *il, struct il_rx_pkt *pkt)
+{
+       /* Reclaim a command buffer only if this packet is a response
+        * to a (driver-originated) command. If the packet (e.g. Rx frame)
+        * originated from uCode, there is no command buffer to reclaim.
+        * Ucode should set SEQ_RX_FRAME bit if ucode-originated, but
+        * apparently a few don't get set; catch them here.
+        */
+       return !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+              pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX &&
+              pkt->hdr.cmd != N_RX_PHY && pkt->hdr.cmd != N_RX &&
+              pkt->hdr.cmd != N_RX_MPDU && pkt->hdr.cmd != N_COMPRESSED_BA;
+}
+
 static inline void
 _il_write8(struct il_priv *il, u32 ofs, u8 val)
 {
index 3eb2102ce2366e47fce1b149cc68fb14926e1e22..74b3b4de7bb7de57ef42fabfe36866eef3b05287 100644 (file)
@@ -68,6 +68,19 @@ config IWLWIFI_OPMODE_MODULAR
 comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
        depends on IWLWIFI && IWLDVM=n && IWLMVM=n
 
+config IWLWIFI_BCAST_FILTERING
+       bool "Enable broadcast filtering"
+       depends on IWLMVM
+       help
+         Say Y here to enable default bcast filtering configuration.
+
+         Enabling broadcast filtering will drop any incoming wireless
+         broadcast frames, except some very specific predefined
+         patterns (e.g. incoming arp requests).
+
+         If unsure, don't enable this option, as some programs might
+         expect incoming broadcasts for their normal operations.
+
 menu "Debugging Options"
        depends on IWLWIFI
 
@@ -111,6 +124,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
          Enable use of experimental ucode for testing and debugging.
 
 config IWLWIFI_DEVICE_TRACING
+
        bool "iwlwifi device access tracing"
        depends on IWLWIFI
        depends on EVENT_TRACING
index 1fa64429bcc28a9f5647549c7dbea3b7f19caeb4..3d32f4120174d9fe6d3bb0a337cd5fe97fed6b83 100644 (file)
@@ -8,7 +8,7 @@ iwlwifi-objs            += iwl-eeprom-read.o iwl-eeprom-parse.o
 iwlwifi-objs           += iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs           += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
-iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
 
 iwlwifi-objs += $(iwlwifi-m)
 
index 562772d851021e390575868ece4f3fbf04068ede..c160dad03037bd01399a8747096a3653ee67cd3a 100644 (file)
@@ -109,7 +109,7 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
 
 struct iwl_ucode_capabilities;
 
-extern struct ieee80211_ops iwlagn_hw_ops;
+extern const struct ieee80211_ops iwlagn_hw_ops;
 
 static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
 {
@@ -480,7 +480,7 @@ do {                                                                        \
 } while (0)
 #endif                         /* CONFIG_IWLWIFI_DEBUG */
 
-extern const char *iwl_dvm_cmd_strings[REPLY_MAX];
+extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
 
 static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
 {
index 7b140e487deb8da42a19d669e4428830516470c6..758c54eeb206718f8077aa2686304ea1a1e4c9bb 100644 (file)
@@ -317,7 +317,7 @@ static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
        .nrg_th_cca = 62,
 };
 
-static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
+static const struct iwl_sensitivity_ranges iwl5150_sensitivity = {
        .min_nrg_cck = 95,
        .auto_corr_min_ofdm = 90,
        .auto_corr_min_ofdm_mrc = 170,
index 73086c1629ca13be4e359badffd927a3fee43847..dd55c9cf7ba80376ef3ae507b434e79d6dd1cca4 100644 (file)
@@ -1582,7 +1582,7 @@ static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
-struct ieee80211_ops iwlagn_hw_ops = {
+const struct ieee80211_ops iwlagn_hw_ops = {
        .tx = iwlagn_mac_tx,
        .start = iwlagn_mac_start,
        .stop = iwlagn_mac_stop,
index ba1b1ea54252c8d1ce9018263c42ef88fca9d50f..6a6df71af1d7ba6e4b4dfec16a2042c6cc357de4 100644 (file)
@@ -252,13 +252,17 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
        struct iwl_priv *priv =
                container_of(work, struct iwl_priv, bt_runtime_config);
 
+       mutex_lock(&priv->mutex);
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
-               return;
+               goto out;
 
        /* dont send host command if rf-kill is on */
        if (!iwl_is_ready_rf(priv))
-               return;
+               goto out;
+
        iwlagn_send_advance_bt_config(priv);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static void iwl_bg_bt_full_concurrency(struct work_struct *work)
@@ -2035,7 +2039,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
        ieee80211_free_txskb(priv->hw, skb);
 }
 
-static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 {
        struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
 
@@ -2045,6 +2049,8 @@ static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
                clear_bit(STATUS_RF_KILL_HW, &priv->status);
 
        wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+
+       return false;
 }
 
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
index 0977d93b529d3ce11fde93ac1e1c4ff626415a13..aa773a2da4ab877f6b5876a28024a6c2e23ffa9b 100644 (file)
@@ -176,46 +176,46 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
  * (2.4 GHz) band.
  */
 
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
        7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
 };
 
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202}, /* Norm */
        {0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210}, /* SGI */
        {0, 0, 0, 0, 47, 0,  91, 133, 171, 242, 305, 334, 362}, /* AGG */
        {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
        {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
        {0, 0, 0, 0,  94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
        {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0,  74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
        {0, 0, 0, 0,  81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
        {0, 0, 0, 0,  89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
        {0, 0, 0, 0,  97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
 };
 
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
        {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
        {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
        {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0,  99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
        {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
        {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
        {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
 };
 
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 152, 0, 211, 239, 255, 279,  290,  294,  297}, /* Norm */
        {0, 0, 0, 0, 160, 0, 219, 245, 261, 284,  294,  297,  300}, /* SGI */
        {0, 0, 0, 0, 254, 0, 443, 584, 695, 868,  984, 1030, 1070}, /* AGG */
@@ -1111,7 +1111,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
                                      struct iwl_scale_tbl_info *tbl)
 {
        /* Used to choose among HT tables */
-       s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+       const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
 
        /* Check for invalid LQ type */
        if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
@@ -1173,9 +1173,8 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
            &(lq_sta->lq_info[lq_sta->active_tbl]);
        s32 active_sr = active_tbl->win[index].success_ratio;
        s32 active_tpt = active_tbl->expected_tpt[index];
-
        /* expected "search" throughput */
-       s32 *tpt_tbl = tbl->expected_tpt;
+       const u16 *tpt_tbl = tbl->expected_tpt;
 
        s32 new_rate, high, low, start_hi;
        u16 high_low;
@@ -3319,8 +3318,8 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
                              struct ieee80211_sta *sta, void *priv_sta)
 {
 }
-static struct rate_control_ops rs_ops = {
-       .module = NULL,
+
+static const struct rate_control_ops rs_ops = {
        .name = RS_NAME,
        .tx_status = rs_tx_status,
        .get_rate = rs_get_rate,
index bdd5644a400bc780a2c7fed99dca5b482a1cdcd9..f6bd25cad2036b9d4e0ecc69471e4f4a468cd8c1 100644 (file)
@@ -315,7 +315,7 @@ struct iwl_scale_tbl_info {
        u8 is_dup;      /* 1 = duplicated data streams */
        u8 action;      /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
        u8 max_search;  /* maximun number of tables we can search */
-       s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
+       const u16 *expected_tpt;        /* throughput metrics; expected_tpt_G, etc. */
        u32 current_rate;  /* rate_n_flags, uCode API format */
        struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
 };
index 7a1bc1c547e17ea3e35c47b1070c174c9268c9d6..cd8377346aff0c2936a6f0ee773ae8fd33042025 100644 (file)
@@ -39,7 +39,7 @@
 
 #define IWL_CMD_ENTRY(x) [x] = #x
 
-const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
+const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
        IWL_CMD_ENTRY(REPLY_ALIVE),
        IWL_CMD_ENTRY(REPLY_ERROR),
        IWL_CMD_ENTRY(REPLY_ECHO),
index 2a59da2ff87aaf278b65983e1735cbc1eeb49d2d..003a546571d4772ba568d388acec485c1443f67f 100644 (file)
@@ -71,8 +71,8 @@
 #define IWL3160_UCODE_API_MAX  8
 
 /* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK   7
-#define IWL3160_UCODE_API_OK   7
+#define IWL7260_UCODE_API_OK   8
+#define IWL3160_UCODE_API_OK   8
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  7
@@ -95,6 +95,8 @@
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
+#define NVM_HW_SECTION_NUM_FAMILY_7000         0
+
 static const struct iwl_base_params iwl7000_base_params = {
        .eeprom_size = OTP_LOW_IMAGE_SIZE,
        .num_of_queues = IWLAGN_NUM_QUEUES,
@@ -120,7 +122,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
        .max_inst_size = IWL60_RTC_INST_SIZE,                   \
        .max_data_size = IWL60_RTC_DATA_SIZE,                   \
        .base_params = &iwl7000_base_params,                    \
-       .led_mode = IWL_LED_RF_STATE
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000
 
 
 const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -131,6 +134,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
        .nvm_ver = IWL7260_NVM_VERSION,
        .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
        .host_interrupt_operation_mode = true,
+       .lp_xtal_workaround = true,
 };
 
 const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -142,6 +146,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
        .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
        .high_temp = true,
        .host_interrupt_operation_mode = true,
+       .lp_xtal_workaround = true,
 };
 
 const struct iwl_cfg iwl7260_2n_cfg = {
@@ -152,6 +157,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
        .nvm_ver = IWL7260_NVM_VERSION,
        .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
        .host_interrupt_operation_mode = true,
+       .lp_xtal_workaround = true,
 };
 
 const struct iwl_cfg iwl7260_n_cfg = {
@@ -162,6 +168,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
        .nvm_ver = IWL7260_NVM_VERSION,
        .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
        .host_interrupt_operation_mode = true,
+       .lp_xtal_workaround = true,
 };
 
 const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -194,6 +201,17 @@ const struct iwl_cfg iwl3160_n_cfg = {
        .host_interrupt_operation_mode = true,
 };
 
+static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
+       {.pwr = 1600, .backoff = 0},
+       {.pwr = 1300, .backoff = 467},
+       {.pwr = 900,  .backoff = 1900},
+       {.pwr = 800, .backoff = 2630},
+       {.pwr = 700, .backoff = 3720},
+       {.pwr = 600, .backoff = 5550},
+       {.pwr = 500, .backoff = 9350},
+       {0},
+};
+
 const struct iwl_cfg iwl7265_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 7265",
        .fw_name_pre = IWL7265_FW_PRE,
@@ -201,6 +219,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL7265_NVM_VERSION,
        .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+       .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
 };
 
 const struct iwl_cfg iwl7265_2n_cfg = {
@@ -210,6 +229,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL7265_NVM_VERSION,
        .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+       .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
 };
 
 const struct iwl_cfg iwl7265_n_cfg = {
@@ -219,6 +239,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL7265_NVM_VERSION,
        .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+       .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
 };
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
new file mode 100644 (file)
index 0000000..f5bd82b
--- /dev/null
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL8000_UCODE_API_MAX  8
+
+/* Oldest version we won't warn about */
+#define IWL8000_UCODE_API_OK   8
+
+/* Lowest firmware API version supported */
+#define IWL8000_UCODE_API_MIN  8
+
+/* NVM versions */
+#define IWL8000_NVM_VERSION            0x0a1d
+#define IWL8000_TX_POWER_VERSION       0xffff /* meaningless */
+
+#define IWL8000_FW_PRE "iwlwifi-8000-"
+#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_8000         10
+
+static const struct iwl_base_params iwl8000_base_params = {
+       .eeprom_size = OTP_LOW_IMAGE_SIZE,
+       .num_of_queues = IWLAGN_NUM_QUEUES,
+       .pll_cfg_val = 0,
+       .shadow_ram_support = true,
+       .led_compensation = 57,
+       .wd_timeout = IWL_LONG_WD_TIMEOUT,
+       .max_event_log_size = 512,
+       .shadow_reg_enable = true,
+       .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl8000_ht_params = {
+       .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_8000                                                \
+       .ucode_api_max = IWL8000_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL8000_UCODE_API_OK,                   \
+       .ucode_api_min = IWL8000_UCODE_API_MIN,                 \
+       .device_family = IWL_DEVICE_FAMILY_8000,                \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                   \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                   \
+       .base_params = &iwl8000_base_params,                    \
+       .led_mode = IWL_LED_RF_STATE,                           \
+       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000
+
+const struct iwl_cfg iwl8260_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 8260",
+       .fw_name_pre = IWL8000_FW_PRE,
+       IWL_DEVICE_8000,
+       .ht_params = &iwl8000_ht_params,
+       .nvm_ver = IWL8000_NVM_VERSION,
+       .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl8260_n_cfg = {
+       .name = "Intel(R) Dual Band Wireless-AC 8260",
+       .fw_name_pre = IWL8000_FW_PRE,
+       IWL_DEVICE_8000,
+       .ht_params = &iwl8000_ht_params,
+       .nvm_ver = IWL8000_NVM_VERSION,
+       .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
index 1ced525157dc20b6c5ed0546a4ee762cfbcd6784..3f17dc3f2c8a9fdde83bddb254efb8cc8d33502f 100644 (file)
@@ -84,6 +84,7 @@ enum iwl_device_family {
        IWL_DEVICE_FAMILY_6050,
        IWL_DEVICE_FAMILY_6150,
        IWL_DEVICE_FAMILY_7000,
+       IWL_DEVICE_FAMILY_8000,
 };
 
 /*
@@ -192,6 +193,15 @@ struct iwl_eeprom_params {
        bool enhanced_txpower;
 };
 
+/* Tx-backoff power threshold
+ * @pwr: The power limit in mw
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_pwr_tx_backoff {
+       u32 pwr;
+       u32 backoff;
+};
+
 /**
  * struct iwl_cfg
  * @name: Offical name of the device
@@ -217,6 +227,9 @@ struct iwl_eeprom_params {
  * @high_temp: Is this NIC is designated to be in high temperature.
  * @host_interrupt_operation_mode: device needs host interrupt operation
  *     mode set
+ * @d0i3: device uses d0i3 instead of d3
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @pwr_tx_backoffs: translation table between power limits and backoffs
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -247,6 +260,10 @@ struct iwl_cfg {
        const bool internal_wimax_coex;
        const bool host_interrupt_operation_mode;
        bool high_temp;
+       bool d0i3;
+       u8   nvm_hw_section_num;
+       bool lp_xtal_workaround;
+       const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
 };
 
 /*
@@ -307,6 +324,8 @@ extern const struct iwl_cfg iwl3160_n_cfg;
 extern const struct iwl_cfg iwl7265_2ac_cfg;
 extern const struct iwl_cfg iwl7265_2n_cfg;
 extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl8260_2ac_cfg;
+extern const struct iwl_cfg iwl8260_n_cfg;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
index 9d325516c42d80111eb2336c2ffe1a4b1f2c3d99..fe129c94ae3ea52e068d5cdc7f7e15732c033d4e 100644 (file)
 /* Analog phase-lock-loop configuration  */
 #define CSR_ANA_PLL_CFG         (CSR_BASE+0x20c)
 
+/*
+ * CSR HW resources monitor registers
+ */
+#define CSR_MONITOR_CFG_REG            (CSR_BASE+0x214)
+#define CSR_MONITOR_STATUS_REG         (CSR_BASE+0x228)
+#define CSR_MONITOR_XTAL_RESOURCES     (0x00000010)
+
 /*
  * CSR Hardware Revision Workaround Register.  Indicates hardware rev;
  * "step" determines CCK backoff for txpower calculation.  Used for 4965 only.
 #define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY     (0x00400000) /* PCI_OWN_SEM */
 #define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
 #define CSR_HW_IF_CONFIG_REG_PREPARE             (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE        (0x40000000) /* PERSISTENCE */
 
 #define CSR_INT_PERIODIC_DIS                   (0x00) /* disable periodic int*/
 #define CSR_INT_PERIODIC_ENA                   (0xFF) /* 255*32 usec ~ 8 msec*/
  *         001 -- MAC power-down
  *         010 -- PHY (radio) power-down
  *         011 -- Error
+ *    10:  XTAL ON request
  *   9-6:  SYS_CONFIG
  *         Indicates current system configuration, reflecting pins on chip
  *         as forced high/low by device circuit board.
 #define CSR_GP_CNTRL_REG_FLAG_INIT_DONE              (0x00000004)
 #define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ         (0x00000008)
 #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
+#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON               (0x00000400)
 
 #define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN           (0x00000001)
 
 #define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
 
-/* SECURE boot registers */
-#define CSR_SECURE_BOOT_CONFIG_ADDR    (0x100)
-enum secure_boot_config_reg {
-       CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP  = 0x00000001,
-       CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ        = 0x00000002,
-};
-
-#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR       (0x100)
-#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR       (0x100)
-enum secure_boot_status_reg {
-       CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS          = 0x00000003,
-       CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED       = 0x00000002,
-       CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS         = 0x00000004,
-       CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL            = 0x00000008,
-       CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL       = 0x00000010,
-};
-
-#define CSR_UCODE_LOAD_STATUS_ADDR     (0x100)
-enum secure_load_status_reg {
-       CSR_CPU_STATUS_LOADING_STARTED                  = 0x00000001,
-       CSR_CPU_STATUS_LOADING_COMPLETED                = 0x00000002,
-       CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED            = 0x000000F8,
-       CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK         = 0x0000FF00,
-};
-
-#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
-#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
-
-#define CSR_SECURE_TIME_OUT    (100)
+/*
+ * SHR target access (Shared block memory space)
+ *
+ * Shared internal registers can be accessed directly from PCI bus through SHR
+ * arbiter without need for the MAC HW to be powered up. This is possible due to
+ * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
+ * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
+ *
+ * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
+ * need not be powered up so no "grab inc access" is required.
+ */
 
-#define FH_TCSR_0_REG0 (0x1D00)
+/*
+ * Registers for accessing shared registers (e.g. SHR_APMG_GP1,
+ * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
+ * first, write to the control register:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
+ * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
+ *
+ * To write the register, first, write to the data register
+ * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
+ */
+#define HEEP_CTRL_WRD_PCIEX_CTRL_REG   (CSR_BASE+0x0ec)
+#define HEEP_CTRL_WRD_PCIEX_DATA_REG   (CSR_BASE+0x0f4)
 
 /*
  * HBUS (Host-side Bus)
index a75aac986a23ebd80872f9164ee45d1b1e96e4bd..c8cbdbe15924a61123d76828c0d1f6c7686dcfe4 100644 (file)
@@ -126,6 +126,7 @@ do {                                                                \
 /* 0x00000F00 - 0x00000100 */
 #define IWL_DL_POWER           0x00000100
 #define IWL_DL_TEMP            0x00000200
+#define IWL_DL_RPM             0x00000400
 #define IWL_DL_SCAN            0x00000800
 /* 0x0000F000 - 0x00001000 */
 #define IWL_DL_ASSOC           0x00001000
@@ -189,5 +190,6 @@ do {                                                                \
 #define IWL_DEBUG_RADIO(p, f, a...)    IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
 #define IWL_DEBUG_POWER(p, f, a...)    IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
 #define IWL_DEBUG_11H(p, f, a...)      IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+#define IWL_DEBUG_RPM(p, f, a...)      IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
 
 #endif
index 75103554cd635061628470255dd7a6eae6e8b82f..0a3e841b44a9ebe81cbd40b0313e8cafcc355f3a 100644 (file)
@@ -128,7 +128,7 @@ struct iwl_drv {
        const struct iwl_cfg *cfg;
 
        int fw_index;                   /* firmware we're trying to load */
-       char firmware_name[25];         /* name of firmware file to load */
+       char firmware_name[32];         /* name of firmware file to load */
 
        struct completion request_firmware_complete;
 
@@ -237,7 +237,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
                return -ENOENT;
        }
 
-       sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+       snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
+                name_pre, tag);
 
        IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
                       (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
@@ -403,6 +404,38 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
        return 0;
 }
 
+static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
+                                  struct iwl_ucode_capabilities *capa)
+{
+       const struct iwl_ucode_api *ucode_api = (void *)data;
+       u32 api_index = le32_to_cpu(ucode_api->api_index);
+
+       if (api_index >= IWL_API_ARRAY_SIZE) {
+               IWL_ERR(drv, "api_index larger than supported by driver\n");
+               return -EINVAL;
+       }
+
+       capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+
+       return 0;
+}
+
+static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
+                                     struct iwl_ucode_capabilities *capa)
+{
+       const struct iwl_ucode_capa *ucode_capa = (void *)data;
+       u32 api_index = le32_to_cpu(ucode_capa->api_index);
+
+       if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+               IWL_ERR(drv, "api_index larger than supported by driver\n");
+               return -EINVAL;
+       }
+
+       capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+
+       return 0;
+}
+
 static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
                                    const struct firmware *ucode_raw,
                                    struct iwl_firmware_pieces *pieces)
@@ -637,6 +670,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                         */
                        capa->flags = le32_to_cpup((__le32 *)tlv_data);
                        break;
+               case IWL_UCODE_TLV_API_CHANGES_SET:
+                       if (tlv_len != sizeof(struct iwl_ucode_api))
+                               goto invalid_tlv_len;
+                       if (iwl_set_ucode_api_flags(drv, tlv_data, capa))
+                               goto tlv_error;
+                       break;
+               case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
+                       if (tlv_len != sizeof(struct iwl_ucode_capa))
+                               goto invalid_tlv_len;
+                       if (iwl_set_ucode_capabilities(drv, tlv_data, capa))
+                               goto tlv_error;
+                       break;
                case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
@@ -727,6 +772,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+                       drv->fw.valid_tx_ant = (drv->fw.phy_config &
+                                               FW_PHY_CFG_TX_CHAIN) >>
+                                               FW_PHY_CFG_TX_CHAIN_POS;
+                       drv->fw.valid_rx_ant = (drv->fw.phy_config &
+                                               FW_PHY_CFG_RX_CHAIN) >>
+                                               FW_PHY_CFG_RX_CHAIN_POS;
                        break;
                 case IWL_UCODE_TLV_SECURE_SEC_RT:
                        iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
@@ -1300,8 +1351,7 @@ MODULE_PARM_DESC(antenna_coupling,
 
 module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
 MODULE_PARM_DESC(wd_disable,
-               "Disable stuck queue watchdog timer 0=system default, "
-               "1=disable, 2=enable (default: 0)");
+               "Disable stuck queue watchdog timer 0=system default, 1=disable (default: 1)");
 
 module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO);
 MODULE_PARM_DESC(nvm_file, "NVM file name");
index 592c01e11013c3ce74264f49951c29eff84610c4..3c72cb710b0cb8dc4b4d7d7be478488acc569029 100644 (file)
 #define DRV_COPYRIGHT  "Copyright(c) 2003- 2014 Intel Corporation"
 #define DRV_AUTHOR     "<ilw@linux.intel.com>"
 
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
+#define NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
+#define NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
+#define NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x)   (x & 0xF)
+#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x)   ((x >> 4) & 0xF)
+#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x)   ((x >> 8) & 0xF)
+#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x)   ((x >> 12) & 0xFFF)
+#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF)
+#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF)
 
 /**
  * DOC: Driver system flows - drv component
index e3c7deafabe6e71abd012751a59655aa977ef980..f0548b8a64b072a1a58b80e6d6d5cc7b9d00df59 100644 (file)
@@ -81,16 +81,17 @@ struct iwl_nvm_data {
        bool sku_cap_band_24GHz_enable;
        bool sku_cap_band_52GHz_enable;
        bool sku_cap_11n_enable;
+       bool sku_cap_11ac_enable;
        bool sku_cap_amt_enable;
        bool sku_cap_ipan_enable;
 
-       u8 radio_cfg_type;
+       u16 radio_cfg_type;
        u8 radio_cfg_step;
        u8 radio_cfg_dash;
        u8 radio_cfg_pnum;
        u8 valid_tx_ant, valid_rx_ant;
 
-       u16 nvm_version;
+       u32 nvm_version;
        s8 max_tx_pwr_half_dbm;
 
        struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
index 88e2d6eb569f4f23ba7696178ffe96135dfdb508..b45e576a4b57feb01982edb99191a65a3ae38bca 100644 (file)
@@ -126,6 +126,8 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
        IWL_UCODE_TLV_NUM_OF_CPU        = 27,
        IWL_UCODE_TLV_CSCHEME           = 28,
+       IWL_UCODE_TLV_API_CHANGES_SET   = 29,
+       IWL_UCODE_TLV_ENABLED_CAPABILITIES      = 30,
 };
 
 struct iwl_ucode_tlv {
@@ -158,4 +160,19 @@ struct iwl_tlv_ucode_header {
        u8 data[0];
 };
 
+/*
+ * ucode TLVs
+ *
+ * ability to get extension for: flags & capabilities from ucode binaries files
+ */
+struct iwl_ucode_api {
+       __le32 api_index;
+       __le32 api_flags;
+} __packed;
+
+struct iwl_ucode_capa {
+       __le32 api_index;
+       __le32 api_capa;
+} __packed;
+
 #endif  /* __iwl_fw_file_h__ */
index 5f1493c44097c5fc0543e0c886a96b9180522841..d14f19339d6140607c99d1b6660b039f9ac4aa66 100644 (file)
  * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
  * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
  *     containing CAM (Continuous Active Mode) indication.
- * @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
- *     single bound interface).
+ * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_DCM: support power save on BSS station and
+ *     P2P client interfaces simultaneously if they are in different bindings.
  * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
  */
 enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_PAN                 = BIT(0),
@@ -116,9 +118,27 @@ enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_SCHED_SCAN          = BIT(17),
        IWL_UCODE_TLV_FLAGS_STA_KEY_CMD         = BIT(19),
        IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD       = BIT(20),
-       IWL_UCODE_TLV_FLAGS_P2P_PS              = BIT(21),
+       IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM      = BIT(22),
        IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT       = BIT(24),
        IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD        = BIT(26),
+       IWL_UCODE_TLV_FLAGS_BCAST_FILTERING     = BIT(29),
+       IWL_UCODE_TLV_FLAGS_GO_UAPSD            = BIT(30),
+};
+
+/**
+ * enum iwl_ucode_tlv_api - ucode api
+ * @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
+ */
+enum iwl_ucode_tlv_api {
+       IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
+};
+
+/**
+ * enum iwl_ucode_tlv_capa - ucode capabilities
+ * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ */
+enum iwl_ucode_tlv_capa {
+       IWL_UCODE_TLV_CAPA_D0I3_SUPPORT         = BIT(0),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -160,13 +180,16 @@ enum iwl_ucode_sec {
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 6
-#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU  (IWL_UCODE_SECTION_MAX/2)
+#define IWL_UCODE_SECTION_MAX 12
+#define IWL_API_ARRAY_SIZE     1
+#define IWL_CAPABILITIES_ARRAY_SIZE    1
 
 struct iwl_ucode_capabilities {
        u32 max_probe_length;
        u32 standard_phy_calibration_size;
        u32 flags;
+       u32 api[IWL_API_ARRAY_SIZE];
+       u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
 };
 
 /* one for each uCode image (inst/data, init/runtime/wowlan) */
@@ -285,22 +308,12 @@ struct iwl_fw {
 
        struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
        u32 phy_config;
+       u8 valid_tx_ant;
+       u8 valid_rx_ant;
 
        bool mvm_fw;
 
        struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
 };
 
-static inline u8 iwl_fw_valid_tx_ant(const struct iwl_fw *fw)
-{
-       return (fw->phy_config & FW_PHY_CFG_TX_CHAIN) >>
-               FW_PHY_CFG_TX_CHAIN_POS;
-}
-
-static inline u8 iwl_fw_valid_rx_ant(const struct iwl_fw *fw)
-{
-       return (fw->phy_config & FW_PHY_CFG_RX_CHAIN) >>
-               FW_PHY_CFG_RX_CHAIN_POS;
-}
-
 #endif  /* __iwl_fw_h__ */
index f98175a0d35b7e508c0e71b7fa10c8c0fc7c22e1..44cc3cf45762d1465e47627b70eea990f257e3a3 100644 (file)
@@ -93,14 +93,14 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
 }
 IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
 
-static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
+u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
 {
        u32 val = iwl_trans_read_prph(trans, ofs);
        trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
        return val;
 }
 
-static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
 {
        trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
        iwl_trans_write_prph(trans, ofs, val);
@@ -130,6 +130,21 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
 }
 IWL_EXPORT_SYMBOL(iwl_write_prph);
 
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+                     u32 bits, u32 mask, int timeout)
+{
+       int t = 0;
+
+       do {
+               if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
+                       return t;
+               udelay(IWL_POLL_INTERVAL);
+               t += IWL_POLL_INTERVAL;
+       } while (t < timeout);
+
+       return -ETIMEDOUT;
+}
+
 void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
 {
        unsigned long flags;
index c339c1bed08056fe217c54c0825404d9618b2768..665ddd9dbbc48ff5dec47f246b8efb0639fb5c86 100644 (file)
@@ -70,8 +70,12 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
 void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
 
 
+u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs);
 u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
+void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
 void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+                     u32 bits, u32 mask, int timeout);
 void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
 void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
                            u32 bits, u32 mask);
index b29075c3da8e2e65e8c621c89fda3b5f9b586e79..d994317db85b72cbfc11f602e2d414efc54233b1 100644 (file)
@@ -96,7 +96,7 @@ enum iwl_disable_11n {
  *     use IWL_[DIS,EN]ABLE_HT_* constants
  * @amsdu_size_8K: enable 8K amsdu size, default = 0
  * @restart_fw: restart firmware, default = 1
- * @wd_disable: enable stuck queue check, default = 0
+ * @wd_disable: disable stuck queue check, default = 1
  * @bt_coex_active: enable bt coex, default = true
  * @led_mode: system default, default = 0
  * @power_save: disable power save, default = false
index 725e954d8475284a0f2b6332d3627c638dd6d01a..6be30c69850619f81c2468febb3634fd5cf390fd 100644 (file)
@@ -71,7 +71,7 @@ enum wkp_nvm_offsets {
        /* NVM HW-Section offset (in words) definitions */
        HW_ADDR = 0x15,
 
-/* NVM SW-Section offset (in words) definitions */
+       /* NVM SW-Section offset (in words) definitions */
        NVM_SW_SECTION = 0x1C0,
        NVM_VERSION = 0,
        RADIO_CFG = 1,
@@ -79,11 +79,32 @@ enum wkp_nvm_offsets {
        N_HW_ADDRS = 3,
        NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
 
-/* NVM calibration section offset (in words) definitions */
+       /* NVM calibration section offset (in words) definitions */
        NVM_CALIB_SECTION = 0x2B8,
        XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
 };
 
+enum family_8000_nvm_offsets {
+       /* NVM HW-Section offset (in words) definitions */
+       HW_ADDR0_FAMILY_8000 = 0x12,
+       HW_ADDR1_FAMILY_8000 = 0x16,
+       MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
+
+       /* NVM SW-Section offset (in words) definitions */
+       NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
+       NVM_VERSION_FAMILY_8000 = 0,
+       RADIO_CFG_FAMILY_8000 = 2,
+       SKU_FAMILY_8000 = 4,
+       N_HW_ADDRS_FAMILY_8000 = 5,
+
+       /* NVM REGULATORY -Section offset (in words) definitions */
+       NVM_CHANNELS_FAMILY_8000 = 0,
+
+       /* NVM calibration section offset (in words) definitions */
+       NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
+       XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000
+};
+
 /* SKU Capabilities (actual values from NVM definition) */
 enum nvm_sku_bits {
        NVM_SKU_CAP_BAND_24GHZ  = BIT(0),
@@ -92,14 +113,6 @@ enum nvm_sku_bits {
        NVM_SKU_CAP_11AC_ENABLE = BIT(3),
 };
 
-/* radio config bits (actual values from NVM definition) */
-#define NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
-#define NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
-#define NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
-#define NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
-#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
-#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
 /*
  * These are the channel numbers in the order that they are stored in the NVM
  */
@@ -112,7 +125,17 @@ static const u8 iwl_nvm_channels[] = {
        149, 153, 157, 161, 165
 };
 
+static const u8 iwl_nvm_channels_family_8000[] = {
+       /* 2.4 GHz */
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+       /* 5 GHz */
+       36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+       96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+       149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+
 #define IWL_NUM_CHANNELS       ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NUM_CHANNELS_FAMILY_8000   ARRAY_SIZE(iwl_nvm_channels_family_8000)
 #define NUM_2GHZ_CHANNELS      14
 #define FIRST_2GHZ_HT_MINUS    5
 #define LAST_2GHZ_HT_PLUS      9
@@ -179,8 +202,18 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
        struct ieee80211_channel *channel;
        u16 ch_flags;
        bool is_5ghz;
+       int num_of_ch;
+       const u8 *nvm_chan;
+
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               num_of_ch = IWL_NUM_CHANNELS;
+               nvm_chan = &iwl_nvm_channels[0];
+       } else {
+               num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
+               nvm_chan = &iwl_nvm_channels_family_8000[0];
+       }
 
-       for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
+       for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
 
                if (ch_idx >= NUM_2GHZ_CHANNELS &&
@@ -190,7 +223,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                if (!(ch_flags & NVM_CHANNEL_VALID)) {
                        IWL_DEBUG_EEPROM(dev,
                                         "Ch. %d Flags %x [%sGHz] - No traffic\n",
-                                        iwl_nvm_channels[ch_idx],
+                                        nvm_chan[ch_idx],
                                         ch_flags,
                                         (ch_idx >= NUM_2GHZ_CHANNELS) ?
                                         "5.2" : "2.4");
@@ -200,7 +233,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                channel = &data->channels[n_channels];
                n_channels++;
 
-               channel->hw_value = iwl_nvm_channels[ch_idx];
+               channel->hw_value = nvm_chan[ch_idx];
                channel->band = (ch_idx < NUM_2GHZ_CHANNELS) ?
                                IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
                channel->center_freq =
@@ -211,11 +244,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
                channel->flags = IEEE80211_CHAN_NO_HT40;
                if (ch_idx < NUM_2GHZ_CHANNELS &&
                    (ch_flags & NVM_CHANNEL_40MHZ)) {
-                       if (iwl_nvm_channels[ch_idx] <= LAST_2GHZ_HT_PLUS)
+                       if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
-                       if (iwl_nvm_channels[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+                       if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
-               } else if (iwl_nvm_channels[ch_idx] <= LAST_5GHZ_HT &&
+               } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT &&
                           (ch_flags & NVM_CHANNEL_40MHZ)) {
                        if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
                                channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
@@ -266,9 +299,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
 
 static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
                                  struct iwl_nvm_data *data,
-                                 struct ieee80211_sta_vht_cap *vht_cap)
+                                 struct ieee80211_sta_vht_cap *vht_cap,
+                                 u8 tx_chains, u8 rx_chains)
 {
-       int num_ants = num_of_ant(data->valid_rx_ant);
+       int num_rx_ants = num_of_ant(rx_chains);
+       int num_tx_ants = num_of_ant(tx_chains);
 
        vht_cap->vht_supported = true;
 
@@ -278,8 +313,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
                       3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
                       7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
 
-       if (num_ants > 1)
+       if (num_tx_ants > 1)
                vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+       else
+               vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
 
        if (iwlwifi_mod_params.amsdu_size_8K)
                vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
@@ -294,10 +331,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
                            IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
                            IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
 
-       if (num_ants == 1 ||
-           cfg->rx_with_siso_diversity) {
-               vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
-                               IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+       if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
+               vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
                /* this works because NOT_SUPPORTED == 3 */
                vht_cap->vht_mcs.rx_mcs_map |=
                        cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
@@ -307,14 +342,23 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
 }
 
 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
-                           struct iwl_nvm_data *data, const __le16 *nvm_sw,
-                           bool enable_vht, u8 tx_chains, u8 rx_chains)
+                           struct iwl_nvm_data *data,
+                           const __le16 *ch_section, bool enable_vht,
+                           u8 tx_chains, u8 rx_chains)
 {
-       int n_channels = iwl_init_channel_map(dev, cfg, data,
-                       &nvm_sw[NVM_CHANNELS]);
+       int n_channels;
        int n_used = 0;
        struct ieee80211_supported_band *sband;
 
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               n_channels = iwl_init_channel_map(
+                               dev, cfg, data,
+                               &ch_section[NVM_CHANNELS]);
+       else
+               n_channels = iwl_init_channel_map(
+                               dev, cfg, data,
+                               &ch_section[NVM_CHANNELS_FAMILY_8000]);
+
        sband = &data->bands[IEEE80211_BAND_2GHZ];
        sband->band = IEEE80211_BAND_2GHZ;
        sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -333,80 +377,160 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
        iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
                             tx_chains, rx_chains);
        if (enable_vht)
-               iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
+               iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
+                                     tx_chains, rx_chains);
 
        if (n_channels != n_used)
                IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
                            n_used, n_channels);
 }
 
+static int iwl_get_sku(const struct iwl_cfg *cfg,
+                      const __le16 *nvm_sw)
+{
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               return le16_to_cpup(nvm_sw + SKU);
+       else
+               return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000));
+}
+
+static int iwl_get_nvm_version(const struct iwl_cfg *cfg,
+                              const __le16 *nvm_sw)
+{
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               return le16_to_cpup(nvm_sw + NVM_VERSION);
+       else
+               return le32_to_cpup((__le32 *)(nvm_sw +
+                                              NVM_VERSION_FAMILY_8000));
+}
+
+static int iwl_get_radio_cfg(const struct iwl_cfg *cfg,
+                            const __le16 *nvm_sw)
+{
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               return le16_to_cpup(nvm_sw + RADIO_CFG);
+       else
+               return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+}
+
+#define N_HW_ADDRS_MASK_FAMILY_8000    0xF
+static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg,
+                             const __le16 *nvm_sw)
+{
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               return le16_to_cpup(nvm_sw + N_HW_ADDRS);
+       else
+               return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000))
+                      & N_HW_ADDRS_MASK_FAMILY_8000;
+}
+
+static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
+                             struct iwl_nvm_data *data,
+                             u32 radio_cfg)
+{
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+               data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+               data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+               data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+               return;
+       }
+
+       /* set the radio configuration for family 8000 */
+       data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg);
+       data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
+       data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
+       data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
+}
+
+static void iwl_set_hw_address(const struct iwl_cfg *cfg,
+                              struct iwl_nvm_data *data,
+                              const __le16 *nvm_sec)
+{
+       u8 hw_addr[ETH_ALEN];
+
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               memcpy(hw_addr, nvm_sec + HW_ADDR, ETH_ALEN);
+       else
+               memcpy(hw_addr, nvm_sec + MAC_ADDRESS_OVERRIDE_FAMILY_8000,
+                      ETH_ALEN);
+
+       /* The byte order is little endian 16 bit, meaning 214365 */
+       data->hw_addr[0] = hw_addr[1];
+       data->hw_addr[1] = hw_addr[0];
+       data->hw_addr[2] = hw_addr[3];
+       data->hw_addr[3] = hw_addr[2];
+       data->hw_addr[4] = hw_addr[5];
+       data->hw_addr[5] = hw_addr[4];
+}
+
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
-                  const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains)
+                  const __le16 *nvm_calib, const __le16 *regulatory,
+                  const __le16 *mac_override, u8 tx_chains, u8 rx_chains)
 {
        struct iwl_nvm_data *data;
-       u8 hw_addr[ETH_ALEN];
-       u16 radio_cfg, sku;
-
-       data = kzalloc(sizeof(*data) +
-                      sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
-                      GFP_KERNEL);
+       u32 sku;
+       u32 radio_cfg;
+
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               data = kzalloc(sizeof(*data) +
+                              sizeof(struct ieee80211_channel) *
+                              IWL_NUM_CHANNELS,
+                              GFP_KERNEL);
+       else
+               data = kzalloc(sizeof(*data) +
+                              sizeof(struct ieee80211_channel) *
+                              IWL_NUM_CHANNELS_FAMILY_8000,
+                              GFP_KERNEL);
        if (!data)
                return NULL;
 
-       data->nvm_version = le16_to_cpup(nvm_sw + NVM_VERSION);
+       data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
 
-       radio_cfg = le16_to_cpup(nvm_sw + RADIO_CFG);
-       data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
-       data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
-       data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
-       data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
-       data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
-       data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+       radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw);
+       iwl_set_radio_cfg(cfg, data, radio_cfg);
 
-       sku = le16_to_cpup(nvm_sw + SKU);
+       sku = iwl_get_sku(cfg, nvm_sw);
        data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
        data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
        data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+       data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE;
        if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
                data->sku_cap_11n_enable = false;
 
-       /* check overrides (some devices have wrong NVM) */
-       if (cfg->valid_tx_ant)
-               data->valid_tx_ant = cfg->valid_tx_ant;
-       if (cfg->valid_rx_ant)
-               data->valid_rx_ant = cfg->valid_rx_ant;
+       data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
-       if (!data->valid_tx_ant || !data->valid_rx_ant) {
-               IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
-                           data->valid_tx_ant, data->valid_rx_ant);
-               kfree(data);
-               return NULL;
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               /* Checking for required sections */
+               if (!nvm_calib) {
+                       IWL_ERR_DEV(dev,
+                                   "Can't parse empty Calib NVM sections\n");
+                       kfree(data);
+                       return NULL;
+               }
+               /* in family 8000 Xtal calibration values moved to OTP */
+               data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+               data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
        }
 
-       data->n_hw_addrs = le16_to_cpup(nvm_sw + N_HW_ADDRS);
+       if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               iwl_set_hw_address(cfg, data, nvm_hw);
 
-       data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
-       data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+               iwl_init_sbands(dev, cfg, data, nvm_sw,
+                               sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
+                               rx_chains);
+       } else {
+               /* MAC address in family 8000 */
+               iwl_set_hw_address(cfg, data, mac_override);
 
-       /* The byte order is little endian 16 bit, meaning 214365 */
-       memcpy(hw_addr, nvm_hw + HW_ADDR, ETH_ALEN);
-       data->hw_addr[0] = hw_addr[1];
-       data->hw_addr[1] = hw_addr[0];
-       data->hw_addr[2] = hw_addr[3];
-       data->hw_addr[3] = hw_addr[2];
-       data->hw_addr[4] = hw_addr[5];
-       data->hw_addr[5] = hw_addr[4];
-
-       iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
-                       tx_chains, rx_chains);
+               iwl_init_sbands(dev, cfg, data, regulatory,
+                               sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains,
+                               rx_chains);
+       }
 
-       data->calib_version = 255;   /* TODO:
-                                       this value will prevent some checks from
-                                       failing, we need to check if this
-                                       field is still needed, and if it does,
-                                       where is it in the NVM*/
+       data->calib_version = 255;
 
        return data;
 }
index 0c4399aba8c6bfcbbee990f964296543563dfbe1..c9c45a39d212c2516433bdce81c74373460383ab 100644 (file)
@@ -75,6 +75,7 @@
 struct iwl_nvm_data *
 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_hw, const __le16 *nvm_sw,
-                  const __le16 *nvm_calib, u8 tx_chains, u8 rx_chains);
+                  const __le16 *nvm_calib, const __le16 *regulatory,
+                  const __le16 *mac_override, u8 tx_chains, u8 rx_chains);
 
 #endif /* __iwl_nvm_parse_h__ */
index b5be51f3cd3d15c669e92e9de2ef95749cdb33aa..ea29504ac61704c39c24a117dec0a5d92aa58376 100644 (file)
@@ -119,7 +119,8 @@ struct iwl_cfg;
  * @queue_not_full: notifies that a HW queue is not full any more.
  *     Must be atomic and called with BH disabled.
  * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
- *     the radio is killed. May sleep.
+ *     the radio is killed. Return %true if the device should be stopped by
+ *     the transport immediately after the call. May sleep.
  * @free_skb: allows the transport layer to free skbs that haven't been
  *     reclaimed by the op_mode. This can happen when the driver is freed and
  *     there are Tx packets pending in the transport layer.
@@ -131,6 +132,8 @@ struct iwl_cfg;
  * @nic_config: configure NIC, called before firmware is started.
  *     May sleep
  * @wimax_active: invoked when WiMax becomes active. May sleep
+ * @enter_d0i3: configure the fw to enter d0i3. May sleep.
+ * @exit_d0i3: configure the fw to exit d0i3. May sleep.
  */
 struct iwl_op_mode_ops {
        struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -142,12 +145,14 @@ struct iwl_op_mode_ops {
                  struct iwl_device_cmd *cmd);
        void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
        void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
-       void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
+       bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
        void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
        void (*nic_error)(struct iwl_op_mode *op_mode);
        void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
        void (*nic_config)(struct iwl_op_mode *op_mode);
        void (*wimax_active)(struct iwl_op_mode *op_mode);
+       int (*enter_d0i3)(struct iwl_op_mode *op_mode);
+       int (*exit_d0i3)(struct iwl_op_mode *op_mode);
 };
 
 int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -155,7 +160,7 @@ void iwl_opmode_deregister(const char *name);
 
 /**
  * struct iwl_op_mode - operational mode
- * @ops - pointer to its own ops
+ * @ops: pointer to its own ops
  *
  * This holds an implementation of the mac80211 / fw API.
  */
@@ -191,11 +196,11 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
        op_mode->ops->queue_not_full(op_mode, queue);
 }
 
-static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
-                                         bool state)
+static inline bool __must_check
+iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
 {
        might_sleep();
-       op_mode->ops->hw_rf_kill(op_mode, state);
+       return op_mode->ops->hw_rf_kill(op_mode, state);
 }
 
 static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
@@ -226,4 +231,22 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
        op_mode->ops->wimax_active(op_mode);
 }
 
+static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+       might_sleep();
+
+       if (!op_mode->ops->enter_d0i3)
+               return 0;
+       return op_mode->ops->enter_d0i3(op_mode);
+}
+
+static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+       might_sleep();
+
+       if (!op_mode->ops->exit_d0i3)
+               return 0;
+       return op_mode->ops->exit_d0i3(op_mode);
+}
+
 #endif /* __iwl_op_mode_h__ */
index fa77d63a277a393e913e154a1323237801496cee..b761ac4822a35b1e6a8b59f952655d39f1f0cb82 100644 (file)
@@ -72,7 +72,7 @@
 #include "iwl-trans.h"
 
 #define CHANNEL_NUM_SIZE       4       /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_PAPD_CH_GROUPS 7
 #define IWL_NUM_TXP_CH_GROUPS  9
 
 struct iwl_phy_db_entry {
@@ -383,7 +383,7 @@ static int iwl_phy_db_send_all_channel_groups(
                if (!entry)
                        return -EINVAL;
 
-               if (WARN_ON_ONCE(!entry->size))
+               if (!entry->size)
                        continue;
 
                /* Send the requested PHY DB section */
index 100bd0d79681a81616109f433e7f6f815d0b575d..5f657c501406cc995f7f8c065f26d9983ba43ffe 100644 (file)
@@ -95,7 +95,8 @@
 #define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK        (0x000001E0) /* bit 8:5 */
 #define APMG_SVR_DIGITAL_VOLTAGE_1_32          (0x00000060)
 
-#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
+#define APMG_PCIDEV_STT_VAL_PERSIST_DIS        (0x00000200)
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
 
 #define APMG_RTC_INT_STT_RFKILL                (0x10000000)
 
 /* Device NMI register */
 #define DEVICE_SET_NMI_REG 0x00a01c30
 
+/* Shared registers (0x0..0x3ff, via target indirect or periphery */
+#define SHR_BASE       0x00a10000
+
+/* Shared GP1 register */
+#define SHR_APMG_GP1_REG               0x01dc
+#define SHR_APMG_GP1_REG_PRPH          (SHR_BASE + SHR_APMG_GP1_REG)
+#define SHR_APMG_GP1_WF_XTAL_LP_EN     0x00000004
+#define SHR_APMG_GP1_CHICKEN_BIT_SELECT        0x80000000
+
+/* Shared DL_CFG register */
+#define SHR_APMG_DL_CFG_REG                    0x01c4
+#define SHR_APMG_DL_CFG_REG_PRPH               (SHR_BASE + SHR_APMG_DL_CFG_REG)
+#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK  0x000000c0
+#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080
+#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP      0x00000100
+
+/* Shared APMG_XTAL_CFG register */
+#define SHR_APMG_XTAL_CFG_REG          0x1c0
+#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ  0x80000000
+
+/*
+ * Device reset for family 8000
+ * write to bit 24 in order to reset the CPU
+*/
+#define RELEASE_CPU_RESET              (0x300C)
+#define RELEASE_CPU_RESET_BIT          BIT(24)
+
 /*****************************************************************************
  *                        7000/3000 series SHR DTS addresses                 *
  *****************************************************************************/
@@ -281,4 +309,43 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
 #define OSC_CLK                                (0xa04068)
 #define OSC_CLK_FORCE_CONTROL          (0x8)
 
+/* SECURE boot registers */
+#define LMPM_SECURE_BOOT_CONFIG_ADDR   (0x100)
+enum secure_boot_config_reg {
+       LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+       LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ       = 0x00000002,
+};
+
+#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR      (0x1E30)
+#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR      (0x1E34)
+enum secure_boot_status_reg {
+       LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS         = 0x00000001,
+       LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED      = 0x00000002,
+       LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS        = 0x00000004,
+       LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL           = 0x00000008,
+       LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL      = 0x00000010,
+       LMPM_SECURE_BOOT_STATUS_SUCCESS                 = 0x00000003,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR     (0x1E70)
+enum secure_load_status_reg {
+       LMPM_CPU_UCODE_LOADING_STARTED                  = 0x00000001,
+       LMPM_CPU_HDRS_LOADING_COMPLETED                 = 0x00000003,
+       LMPM_CPU_UCODE_LOADING_COMPLETED                = 0x00000007,
+       LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED           = 0x000000F8,
+       LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK        = 0x0000FF00,
+};
+
+#define LMPM_SECURE_INSPECTOR_CODE_ADDR        (0x1E38)
+#define LMPM_SECURE_INSPECTOR_DATA_ADDR        (0x1E3C)
+#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR   (0x1E78)
+#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR   (0x1E7C)
+
+#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE   (0x400000)
+#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE   (0x402000)
+#define LMPM_SECURE_CPU1_HDR_MEM_SPACE         (0x420000)
+#define LMPM_SECURE_CPU2_HDR_MEM_SPACE         (0x420400)
+
+#define LMPM_SECURE_TIME_OUT   (100)
+
 #endif                         /* __iwl_prph_h__ */
index 1f065cf4a4baeb09fc4c4288be705dbe0772fe64..8cdb0dd618a6fdfcc8d57095e41974e6e22984ab 100644 (file)
@@ -193,12 +193,23 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  * @CMD_ASYNC: Return right away and don't wait for the response
  * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
  *     response. The caller needs to call iwl_free_resp when done.
+ * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
+ *     command queue, but after other high priority commands. valid only
+ *     with CMD_ASYNC.
+ * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
+ * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
+ * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
+ *     (i.e. mark it as non-idle).
  */
 enum CMD_MODE {
        CMD_SYNC                = 0,
        CMD_ASYNC               = BIT(0),
        CMD_WANT_SKB            = BIT(1),
        CMD_SEND_IN_RFKILL      = BIT(2),
+       CMD_HIGH_PRIO           = BIT(3),
+       CMD_SEND_IN_IDLE        = BIT(4),
+       CMD_MAKE_TRANS_IDLE     = BIT(5),
+       CMD_WAKE_UP_TRANS       = BIT(6),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -335,6 +346,9 @@ enum iwl_d3_status {
  * @STATUS_INT_ENABLED: interrupts are enabled
  * @STATUS_RFKILL: the HW RFkill switch is in KILL position
  * @STATUS_FW_ERROR: the fw is in error state
+ * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
+ *     are sent
+ * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
  */
 enum iwl_trans_status {
        STATUS_SYNC_HCMD_ACTIVE,
@@ -343,6 +357,8 @@ enum iwl_trans_status {
        STATUS_INT_ENABLED,
        STATUS_RFKILL,
        STATUS_FW_ERROR,
+       STATUS_TRANS_GOING_IDLE,
+       STATUS_TRANS_IDLE,
 };
 
 /**
@@ -377,7 +393,7 @@ struct iwl_trans_config {
        bool rx_buf_size_8k;
        bool bc_table_dword;
        unsigned int queue_watchdog_timeout;
-       const char **command_names;
+       const char *const *command_names;
 };
 
 struct iwl_trans;
@@ -443,6 +459,11 @@ struct iwl_trans;
  * @release_nic_access: let the NIC go to sleep. The "flags" parameter
  *     must be the same one that was sent before to the grab_nic_access.
  * @set_bits_mask - set SRAM register according to value and mask.
+ * @ref: grab a reference to the transport/FW layers, disallowing
+ *     certain low power states
+ * @unref: release a reference previously taken with @ref. Note that
+ *     initially the reference count is 1, making an initial @unref
+ *     necessary to allow low power states.
  */
 struct iwl_trans_ops {
 
@@ -489,6 +510,8 @@ struct iwl_trans_ops {
                                   unsigned long *flags);
        void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
                              u32 value);
+       void (*ref)(struct iwl_trans *trans);
+       void (*unref)(struct iwl_trans *trans);
 };
 
 /**
@@ -523,6 +546,7 @@ enum iwl_trans_state {
  *     starting the firmware, used for tracing
  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
  *     start of the 802.11 header in the @rx_mpdu_cmd
+ * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
  */
 struct iwl_trans {
        const struct iwl_trans_ops *ops;
@@ -551,6 +575,8 @@ struct iwl_trans {
        struct lockdep_map sync_cmd_lockdep_map;
 #endif
 
+       u64 dflt_pwr_limit;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
@@ -627,6 +653,18 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
        return trans->ops->d3_resume(trans, status, test);
 }
 
+static inline void iwl_trans_ref(struct iwl_trans *trans)
+{
+       if (trans->ops->ref)
+               trans->ops->ref(trans);
+}
+
+static inline void iwl_trans_unref(struct iwl_trans *trans)
+{
+       if (trans->ops->unref)
+               trans->ops->unref(trans);
+}
+
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
                                     struct iwl_host_cmd *cmd)
 {
index f98ec2b238989da818cb1b4af67f6c6a319c8de4..ccdd3b7c4cce38fb10caf66326e7c439f0c079ee 100644 (file)
@@ -2,8 +2,8 @@ obj-$(CONFIG_IWLMVM)   += iwlmvm.o
 iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
 iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
 iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o power_legacy.o bt-coex.o
-iwlmvm-y += led.o tt.o
+iwlmvm-y += power.o coex.o
+iwlmvm-y += led.o tt.o offloading.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
deleted file mode 100644 (file)
index 18a895a..0000000
+++ /dev/null
@@ -1,964 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-
-#include "fw-api-bt-coex.h"
-#include "iwl-modparams.h"
-#include "mvm.h"
-#include "iwl-debug.h"
-
-#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant)                 \
-       [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) |    \
-                  ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
-
-static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
-                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
-                      BT_COEX_PRIO_TBL_PRIO_LOW, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
-                      BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
-                      BT_COEX_PRIO_TBL_DISABLED, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
-       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
-                      BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
-       0, 0, 0, 0, 0, 0,
-};
-
-#undef EVENT_PRIO_ANT
-
-#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD    (-62)
-#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD   (-65)
-#define BT_ANTENNA_COUPLING_THRESHOLD          (30)
-
-int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
-{
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return 0;
-
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
-                                   sizeof(struct iwl_bt_coex_prio_tbl_cmd),
-                                   &iwl_bt_prio_tbl);
-}
-
-const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
-       [BT_KILL_MSK_DEFAULT] = 0xffff0000,
-       [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
-       [BT_KILL_MSK_REDUCED_TXPOW] = 0,
-};
-
-const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
-       [BT_KILL_MSK_DEFAULT] = 0xffff0000,
-       [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
-       [BT_KILL_MSK_REDUCED_TXPOW] = 0,
-};
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
-       cpu_to_le32(0xf0f0f0f0),
-       cpu_to_le32(0xc0c0c0c0),
-       cpu_to_le32(0xfcfcfcfc),
-       cpu_to_le32(0xff00ff00),
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x40000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x44000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0xf0005000),
-       },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
-       {
-               /* Tight */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaeaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xc0004000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Loose */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0x00000000),
-               cpu_to_le32(0xf0005000),
-               cpu_to_le32(0xf0005000),
-       },
-       {
-               /* Tx Tx disabled */
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xaaaaaaaa),
-               cpu_to_le32(0xcc00ff28),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xcc00aaaa),
-               cpu_to_le32(0x0000aaaa),
-               cpu_to_le32(0xC0004000),
-               cpu_to_le32(0xC0004000),
-               cpu_to_le32(0xF0005000),
-               cpu_to_le32(0xF0005000),
-       },
-};
-
-/* 20MHz / 40MHz below / 40Mhz above*/
-static const __le64 iwl_ci_mask[][3] = {
-       /* dummy entry for channel 0 */
-       {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
-       {
-               cpu_to_le64(0x0000001FFFULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x00007FFFFFULL),
-       },
-       {
-               cpu_to_le64(0x000000FFFFULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x0003FFFFFFULL),
-       },
-       {
-               cpu_to_le64(0x000003FFFCULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x000FFFFFFCULL),
-       },
-       {
-               cpu_to_le64(0x00001FFFE0ULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x007FFFFFE0ULL),
-       },
-       {
-               cpu_to_le64(0x00007FFF80ULL),
-               cpu_to_le64(0x00007FFFFFULL),
-               cpu_to_le64(0x01FFFFFF80ULL),
-       },
-       {
-               cpu_to_le64(0x0003FFFC00ULL),
-               cpu_to_le64(0x0003FFFFFFULL),
-               cpu_to_le64(0x0FFFFFFC00ULL),
-       },
-       {
-               cpu_to_le64(0x000FFFF000ULL),
-               cpu_to_le64(0x000FFFFFFCULL),
-               cpu_to_le64(0x3FFFFFF000ULL),
-       },
-       {
-               cpu_to_le64(0x007FFF8000ULL),
-               cpu_to_le64(0x007FFFFFE0ULL),
-               cpu_to_le64(0xFFFFFF8000ULL),
-       },
-       {
-               cpu_to_le64(0x01FFFE0000ULL),
-               cpu_to_le64(0x01FFFFFF80ULL),
-               cpu_to_le64(0xFFFFFE0000ULL),
-       },
-       {
-               cpu_to_le64(0x0FFFF00000ULL),
-               cpu_to_le64(0x0FFFFFFC00ULL),
-               cpu_to_le64(0x0ULL),
-       },
-       {
-               cpu_to_le64(0x3FFFC00000ULL),
-               cpu_to_le64(0x3FFFFFF000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFFE000000ULL),
-               cpu_to_le64(0xFFFFFF8000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFF8000000ULL),
-               cpu_to_le64(0xFFFFFE0000ULL),
-               cpu_to_le64(0x0)
-       },
-       {
-               cpu_to_le64(0xFFC0000000ULL),
-               cpu_to_le64(0x0ULL),
-               cpu_to_le64(0x0ULL)
-       },
-};
-
-static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
-       cpu_to_le32(0x22002200),
-       cpu_to_le32(0x33113311),
-};
-
-static enum iwl_bt_coex_lut_type
-iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
-{
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       enum iwl_bt_coex_lut_type ret;
-       u16 phy_ctx_id;
-
-       /*
-        * Checking that we hold mvm->mutex is a good idea, but the rate
-        * control can't acquire the mutex since it runs in Tx path.
-        * So this is racy in that case, but in the worst case, the AMPDU
-        * size limit will be wrong for a short time which is not a big
-        * issue.
-        */
-
-       rcu_read_lock();
-
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-       if (!chanctx_conf ||
-            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return BT_COEX_LOOSE_LUT;
-       }
-
-       ret = BT_COEX_TX_DIS_LUT;
-
-       if (mvm->cfg->bt_shared_single_ant) {
-               rcu_read_unlock();
-               return ret;
-       }
-
-       phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
-
-       if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
-               ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
-       else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
-               ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
-       /* else - default = TX TX disallowed */
-
-       rcu_read_unlock();
-
-       return ret;
-}
-
-int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_coex_cmd *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-               .flags = CMD_SYNC,
-       };
-       int ret;
-       u32 flags;
-
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return 0;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-
-       bt_cmd->max_kill = 5;
-       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
-       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
-       bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
-       bt_cmd->bt4_tx_rx_max_freq0 = 15,
-
-       flags = iwlwifi_mod_params.bt_coex_active ?
-                       BT_COEX_NW : BT_COEX_DISABLE;
-       flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
-       bt_cmd->flags = cpu_to_le32(flags);
-
-       bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
-                                           BT_VALID_BT_PRIO_BOOST |
-                                           BT_VALID_MAX_KILL |
-                                           BT_VALID_3W_TMRS |
-                                           BT_VALID_KILL_ACK |
-                                           BT_VALID_KILL_CTS |
-                                           BT_VALID_REDUCED_TX_POWER |
-                                           BT_VALID_LUT |
-                                           BT_VALID_WIFI_RX_SW_PRIO_BOOST |
-                                           BT_VALID_WIFI_TX_SW_PRIO_BOOST |
-                                           BT_VALID_CORUN_LUT_20 |
-                                           BT_VALID_CORUN_LUT_40 |
-                                           BT_VALID_ANT_ISOLATION |
-                                           BT_VALID_ANT_ISOLATION_THRS |
-                                           BT_VALID_TXTX_DELTA_FREQ_THRS |
-                                           BT_VALID_TXRX_MAX_FREQ_0 |
-                                           BT_VALID_SYNC_TO_SCO);
-
-       if (mvm->cfg->bt_shared_single_ant)
-               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
-                      sizeof(iwl_single_shared_ant));
-       else
-               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
-                      sizeof(iwl_combined_lookup));
-
-       memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
-              sizeof(iwl_bt_prio_boost));
-       memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
-              sizeof(iwl_bt_mprio_lut));
-       bt_cmd->kill_ack_msk =
-               cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
-       bt_cmd->kill_cts_msk =
-               cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
-
-       memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
-       memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
-                                          bool reduced_tx_power)
-{
-       enum iwl_bt_kill_msk bt_kill_msk;
-       struct iwl_bt_coex_cmd *bt_cmd;
-       struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .data[0] = &bt_cmd,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-               .flags = CMD_SYNC,
-       };
-       int ret = 0;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       if (reduced_tx_power) {
-               /* Reduced Tx power has precedence on the type of the profile */
-               bt_kill_msk = BT_KILL_MSK_REDUCED_TXPOW;
-       } else {
-               /* Low latency BT profile is active: give higher prio to BT */
-               if (BT_MBOX_MSG(notif, 3, SCO_STATE)  ||
-                   BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
-                   BT_MBOX_MSG(notif, 3, SNIFF_STATE))
-                       bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
-               else
-                       bt_kill_msk = BT_KILL_MSK_DEFAULT;
-       }
-
-       IWL_DEBUG_COEX(mvm,
-                      "Update kill_msk: %d - SCO %sactive A2DP %sactive SNIFF %sactive\n",
-                      bt_kill_msk,
-                      BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
-                      BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
-                      BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
-
-       /* Don't send HCMD if there is no update */
-       if (bt_kill_msk == mvm->bt_kill_msk)
-               return 0;
-
-       mvm->bt_kill_msk = bt_kill_msk;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
-
-       bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
-       bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
-       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
-                                            BT_VALID_KILL_ACK |
-                                            BT_VALID_KILL_CTS);
-
-       IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
-                      iwl_bt_ack_kill_msk[bt_kill_msk],
-                      iwl_bt_cts_kill_msk[bt_kill_msk]);
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
-                                      bool enable)
-{
-       struct iwl_bt_coex_cmd *bt_cmd;
-       /* Send ASYNC since this can be sent from an atomic context */
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_DUP, },
-               .flags = CMD_ASYNC,
-       };
-
-       struct ieee80211_sta *sta;
-       struct iwl_mvm_sta *mvmsta;
-       int ret;
-
-       if (sta_id == IWL_MVM_STATION_COUNT)
-               return 0;
-
-       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
-                                       lockdep_is_held(&mvm->mutex));
-
-       /* This can happen if the station has been removed right now */
-       if (IS_ERR_OR_NULL(sta))
-               return 0;
-
-       mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-       /* nothing to do */
-       if (mvmsta->bt_reduced_txpower == enable)
-               return 0;
-
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
-
-       bt_cmd->valid_bit_msk =
-               cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
-       bt_cmd->bt_reduced_tx_power = sta_id;
-
-       if (enable)
-               bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
-
-       IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
-                      enable ? "en" : "dis", sta_id);
-
-       mvmsta->bt_reduced_txpower = enable;
-
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
-}
-
-struct iwl_bt_iterator_data {
-       struct iwl_bt_coex_profile_notif *notif;
-       struct iwl_mvm *mvm;
-       u32 num_bss_ifaces;
-       bool reduced_tx_power;
-       struct ieee80211_chanctx_conf *primary;
-       struct ieee80211_chanctx_conf *secondary;
-};
-
-static inline
-void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
-                                      struct ieee80211_vif *vif,
-                                      bool enable, int rssi)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
-       mvmvif->bf_data.last_bt_coex_event = rssi;
-       mvmvif->bf_data.bt_coex_max_thold =
-               enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
-       mvmvif->bf_data.bt_coex_min_thold =
-               enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
-}
-
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
-                                     struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       enum ieee80211_smps_mode smps_mode;
-       int ave_rssi;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       if (vif->type != NL80211_IFTYPE_STATION &&
-           vif->type != NL80211_IFTYPE_AP)
-               return;
-
-       smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
-       /* If channel context is invalid or not on 2.4GHz .. */
-       if ((!chanctx_conf ||
-            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
-               /* ... and it is an associated STATION, relax constraints */
-               if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
-                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-                                           smps_mode);
-               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               return;
-       }
-
-       /* SoftAP / GO will always be primary */
-       if (vif->type == NL80211_IFTYPE_AP) {
-               if (!mvmvif->ap_ibss_active)
-                       return;
-
-               /* the Ack / Cts kill mask must be default if AP / GO */
-               data->reduced_tx_power = false;
-
-               if (chanctx_conf == data->primary)
-                       return;
-
-               /* downgrade the current primary no matter what its type is */
-               data->secondary = data->primary;
-               data->primary = chanctx_conf;
-               return;
-       }
-
-       data->num_bss_ifaces++;
-
-       /* we are now a STA / P2P Client, and take associated ones only */
-       if (!vif->bss_conf.assoc)
-               return;
-
-       /* STA / P2P Client, try to be primary if first vif */
-       if (!data->primary || data->primary == chanctx_conf)
-               data->primary = chanctx_conf;
-       else if (!data->secondary)
-               /* if secondary is not NULL, it might be a GO */
-               data->secondary = chanctx_conf;
-
-       if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
-               smps_mode = IEEE80211_SMPS_STATIC;
-       else if (le32_to_cpu(data->notif->bt_activity_grading) >=
-                BT_LOW_TRAFFIC)
-               smps_mode = IEEE80211_SMPS_DYNAMIC;
-
-       IWL_DEBUG_COEX(data->mvm,
-                      "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
-                      mvmvif->id,  data->notif->bt_status,
-                      data->notif->bt_activity_grading, smps_mode);
-
-       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
-
-       /* don't reduce the Tx power if in loose scheme */
-       if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
-           mvm->cfg->bt_shared_single_ant) {
-               data->reduced_tx_power = false;
-               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               return;
-       }
-
-       /* reduced Txpower only if BT is on, so ...*/
-       if (!data->notif->bt_status) {
-               /* ... cancel reduced Tx power ... */
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-               data->reduced_tx_power = false;
-
-               /* ... and there is no need to get reports on RSSI any more. */
-               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
-               return;
-       }
-
-       /* try to get the avg rssi from fw */
-       ave_rssi = mvmvif->bf_data.ave_beacon_signal;
-
-       /* if the RSSI isn't valid, fake it is very low */
-       if (!ave_rssi)
-               ave_rssi = -100;
-       if (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) {
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-
-               /*
-                * bt_kill_msk can be BT_KILL_MSK_REDUCED_TXPOW only if all the
-                * BSS / P2P clients have rssi above threshold.
-                * We set the bt_kill_msk to BT_KILL_MSK_REDUCED_TXPOW before
-                * the iteration, if one interface's rssi isn't good enough,
-                * bt_kill_msk will be set to default values.
-                */
-       } else if (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) {
-               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
-                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
-
-               /*
-                * One interface hasn't rssi above threshold, bt_kill_msk must
-                * be set to default values.
-                */
-               data->reduced_tx_power = false;
-       }
-
-       /* Begin to monitor the RSSI: it may influence the reduced Tx power */
-       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
-}
-
-static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
-{
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-               .notif = &mvm->last_bt_notif,
-               .reduced_tx_power = true,
-       };
-       struct iwl_bt_coex_ci_cmd cmd = {};
-       u8 ci_bw_idx;
-
-       rcu_read_lock();
-       ieee80211_iterate_active_interfaces_atomic(
-                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-                                       iwl_mvm_bt_notif_iterator, &data);
-
-       if (data.primary) {
-               struct ieee80211_chanctx_conf *chan = data.primary;
-               if (WARN_ON(!chan->def.chan)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-                       ci_bw_idx = 0;
-                       cmd.co_run_bw_primary = 0;
-               } else {
-                       cmd.co_run_bw_primary = 1;
-                       if (chan->def.center_freq1 >
-                           chan->def.chan->center_freq)
-                               ci_bw_idx = 2;
-                       else
-                               ci_bw_idx = 1;
-               }
-
-               cmd.bt_primary_ci =
-                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
-       }
-
-       if (data.secondary) {
-               struct ieee80211_chanctx_conf *chan = data.secondary;
-               if (WARN_ON(!data.secondary->def.chan)) {
-                       rcu_read_unlock();
-                       return;
-               }
-
-               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
-                       ci_bw_idx = 0;
-                       cmd.co_run_bw_secondary = 0;
-               } else {
-                       cmd.co_run_bw_secondary = 1;
-                       if (chan->def.center_freq1 >
-                           chan->def.chan->center_freq)
-                               ci_bw_idx = 2;
-                       else
-                               ci_bw_idx = 1;
-               }
-
-               cmd.bt_secondary_ci =
-                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
-               cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
-       }
-
-       rcu_read_unlock();
-
-       /* Don't spam the fw with the same command over and over */
-       if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
-               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
-                                        sizeof(cmd), &cmd))
-                       IWL_ERR(mvm, "Failed to send BT_CI cmd");
-               memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
-       }
-
-       /*
-        * If there are no BSS / P2P client interfaces, reduced Tx Power is
-        * irrelevant since it is based on the RSSI coming from the beacon.
-        * Use BT_KILL_MSK_DEFAULT in that case.
-        */
-       data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
-
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-/* upon association, the fw will send in BT Coex notification */
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-                            struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *dev_cmd)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
-
-
-       IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-       IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
-                      notif->bt_status ? "ON" : "OFF");
-       IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
-       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
-       IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
-                      le32_to_cpu(notif->primary_ch_lut));
-       IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
-                      le32_to_cpu(notif->secondary_ch_lut));
-       IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
-                      le32_to_cpu(notif->bt_activity_grading));
-       IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
-                      notif->bt_agg_traffic_load);
-
-       /* remember this notification for future use: rssi fluctuations */
-       memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
-
-       iwl_mvm_bt_coex_notif_handle(mvm);
-
-       /*
-        * This is an async handler for a notification, returning anything other
-        * than 0 doesn't make sense even if HCMD failed.
-        */
-       return 0;
-}
-
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
-                                  struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-
-       struct ieee80211_sta *sta;
-       struct iwl_mvm_sta *mvmsta;
-
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       /* If channel context is invalid or not on 2.4GHz - don't count it */
-       if (!chanctx_conf ||
-           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return;
-       }
-       rcu_read_unlock();
-
-       if (vif->type != NL80211_IFTYPE_STATION ||
-           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-                                       lockdep_is_held(&mvm->mutex));
-
-       /* This can happen if the station has been removed right now */
-       if (IS_ERR_OR_NULL(sta))
-               return;
-
-       mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-       data->num_bss_ifaces++;
-
-       /*
-        * This interface doesn't support reduced Tx power (because of low
-        * RSSI probably), then set bt_kill_msk to default values.
-        */
-       if (!mvmsta->bt_reduced_txpower)
-               data->reduced_tx_power = false;
-       /* else - possibly leave it to BT_KILL_MSK_REDUCED_TXPOW */
-}
-
-void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                          enum ieee80211_rssi_event rssi_event)
-{
-       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-               .reduced_tx_power = true,
-       };
-       int ret;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /*
-        * Rssi update while not associated - can happen since the statistics
-        * are handled asynchronously
-        */
-       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       /* No BT - reports should be disabled */
-       if (!mvm->last_bt_notif.bt_status)
-               return;
-
-       IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
-                      rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
-
-       /*
-        * Check if rssi is good enough for reduced Tx power, but not in loose
-        * scheme.
-        */
-       if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
-           iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
-               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
-                                                 false);
-       else
-               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
-
-       if (ret)
-               IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
-       ieee80211_iterate_active_interfaces_atomic(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_bt_rssi_iterator, &data);
-
-       /*
-        * If there are no BSS / P2P client interfaces, reduced Tx Power is
-        * irrelevant since it is based on the RSSI coming from the beacon.
-        * Use BT_KILL_MSK_DEFAULT in that case.
-        */
-       data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
-
-       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
-               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
-#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT        (1200)
-
-u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
-                                  struct ieee80211_sta *sta)
-{
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       enum iwl_bt_coex_lut_type lut_type;
-
-       if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
-           BT_HIGH_TRAFFIC)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-
-       if (lut_type == BT_COEX_LOOSE_LUT)
-               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
-       /* tight coex, high bt traffic, reduce AGG time limit */
-       return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
-}
-
-bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
-                                    struct ieee80211_sta *sta)
-{
-       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-       if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
-           BT_HIGH_TRAFFIC)
-               return true;
-
-       /*
-        * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
-        * already killed.
-        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
-        * Tx.
-        */
-       return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
-}
-
-void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
-{
-       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
-               return;
-
-       iwl_mvm_bt_coex_notif_handle(mvm);
-}
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
new file mode 100644 (file)
index 0000000..685f7e8
--- /dev/null
@@ -0,0 +1,1322 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "fw-api-coex.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-debug.h"
+
+#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant)                 \
+       [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) |    \
+                  ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
+
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
+                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
+                      BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
+                      BT_COEX_PRIO_TBL_PRIO_LOW, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
+                      BT_COEX_PRIO_TBL_PRIO_LOW, 1),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
+                      BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
+                      BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
+                      BT_COEX_PRIO_TBL_DISABLED, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
+                      BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
+                      BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
+       EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
+                      BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
+       0, 0, 0, 0, 0, 0,
+};
+
+#undef EVENT_PRIO_ANT
+
+#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD    (-62)
+#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD   (-65)
+#define BT_ANTENNA_COUPLING_THRESHOLD          (30)
+
+int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+{
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+               return 0;
+
+       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
+                                   sizeof(struct iwl_bt_coex_prio_tbl_cmd),
+                                   &iwl_bt_prio_tbl);
+}
+
+const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
+       [BT_KILL_MSK_DEFAULT] = 0xffff0000,
+       [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
+       [BT_KILL_MSK_REDUCED_TXPOW] = 0,
+};
+
+const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
+       [BT_KILL_MSK_DEFAULT] = 0xffff0000,
+       [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
+       [BT_KILL_MSK_REDUCED_TXPOW] = 0,
+};
+
+static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
+       cpu_to_le32(0xf0f0f0f0),
+       cpu_to_le32(0xc0c0c0c0),
+       cpu_to_le32(0xfcfcfcfc),
+       cpu_to_le32(0xff00ff00),
+};
+
+static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+};
+
+static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+       {
+               /* Tight */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaeaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               /* Loose */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               /* Tx Tx disabled */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xC0004000),
+               cpu_to_le32(0xC0004000),
+               cpu_to_le32(0xF0005000),
+               cpu_to_le32(0xF0005000),
+       },
+};
+
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+       /* dummy entry for channel 0 */
+       {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+       {
+               cpu_to_le64(0x0000001FFFULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x00007FFFFFULL),
+       },
+       {
+               cpu_to_le64(0x000000FFFFULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x0003FFFFFFULL),
+       },
+       {
+               cpu_to_le64(0x000003FFFCULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x000FFFFFFCULL),
+       },
+       {
+               cpu_to_le64(0x00001FFFE0ULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x007FFFFFE0ULL),
+       },
+       {
+               cpu_to_le64(0x00007FFF80ULL),
+               cpu_to_le64(0x00007FFFFFULL),
+               cpu_to_le64(0x01FFFFFF80ULL),
+       },
+       {
+               cpu_to_le64(0x0003FFFC00ULL),
+               cpu_to_le64(0x0003FFFFFFULL),
+               cpu_to_le64(0x0FFFFFFC00ULL),
+       },
+       {
+               cpu_to_le64(0x000FFFF000ULL),
+               cpu_to_le64(0x000FFFFFFCULL),
+               cpu_to_le64(0x3FFFFFF000ULL),
+       },
+       {
+               cpu_to_le64(0x007FFF8000ULL),
+               cpu_to_le64(0x007FFFFFE0ULL),
+               cpu_to_le64(0xFFFFFF8000ULL),
+       },
+       {
+               cpu_to_le64(0x01FFFE0000ULL),
+               cpu_to_le64(0x01FFFFFF80ULL),
+               cpu_to_le64(0xFFFFFE0000ULL),
+       },
+       {
+               cpu_to_le64(0x0FFFF00000ULL),
+               cpu_to_le64(0x0FFFFFFC00ULL),
+               cpu_to_le64(0x0ULL),
+       },
+       {
+               cpu_to_le64(0x3FFFC00000ULL),
+               cpu_to_le64(0x3FFFFFF000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFFE000000ULL),
+               cpu_to_le64(0xFFFFFF8000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFF8000000ULL),
+               cpu_to_le64(0xFFFFFE0000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFC0000000ULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x0ULL)
+       },
+};
+
+static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+       cpu_to_le32(0x22002200),
+       cpu_to_le32(0x33113311),
+};
+
+struct corunning_block_luts {
+       u8 range;
+       __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
+};
+
+/*
+ * Ranges for the antenna coupling calibration / co-running block LUT:
+ *             LUT0: [ 0, 12[
+ *             LUT1: [12, 20[
+ *             LUT2: [20, 21[
+ *             LUT3: [21, 23[
+ *             LUT4: [23, 27[
+ *             LUT5: [27, 30[
+ *             LUT6: [30, 32[
+ *             LUT7: [32, 33[
+ *             LUT8: [33, - [
+ */
+static const struct corunning_block_luts antenna_coupling_ranges[] = {
+       {
+               .range = 0,
+               .lut20 = {
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 12,
+               .lut20 = {
+                       cpu_to_le32(0x00000001),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 20,
+               .lut20 = {
+                       cpu_to_le32(0x00000002),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 21,
+               .lut20 = {
+                       cpu_to_le32(0x00000003),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 23,
+               .lut20 = {
+                       cpu_to_le32(0x00000004),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 27,
+               .lut20 = {
+                       cpu_to_le32(0x00000005),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 30,
+               .lut20 = {
+                       cpu_to_le32(0x00000006),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 32,
+               .lut20 = {
+                       cpu_to_le32(0x00000007),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+       {
+               .range = 33,
+               .lut20 = {
+                       cpu_to_le32(0x00000008),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+                       cpu_to_le32(0x00000000),  cpu_to_le32(0x00000000),
+               },
+       },
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum iwl_bt_coex_lut_type ret;
+       u16 phy_ctx_id;
+
+       /*
+        * Checking that we hold mvm->mutex is a good idea, but the rate
+        * control can't acquire the mutex since it runs in Tx path.
+        * So this is racy in that case, but in the worst case, the AMPDU
+        * size limit will be wrong for a short time which is not a big
+        * issue.
+        */
+
+       rcu_read_lock();
+
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+       if (!chanctx_conf ||
+            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+               rcu_read_unlock();
+               return BT_COEX_LOOSE_LUT;
+       }
+
+       ret = BT_COEX_TX_DIS_LUT;
+
+       if (mvm->cfg->bt_shared_single_ant) {
+               rcu_read_unlock();
+               return ret;
+       }
+
+       phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+
+       if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
+               ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+       else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
+               ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+       /* else - default = TX TX disallowed */
+
+       rcu_read_unlock();
+
+       return ret;
+}
+
+int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
+{
+       struct iwl_bt_coex_cmd *bt_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .flags = CMD_SYNC,
+       };
+       int ret;
+       u32 flags;
+
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+               return 0;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+
+       bt_cmd->max_kill = 5;
+       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
+       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
+       bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
+       bt_cmd->bt4_tx_rx_max_freq0 = 15,
+
+       flags = iwlwifi_mod_params.bt_coex_active ?
+                       BT_COEX_NW : BT_COEX_DISABLE;
+       bt_cmd->flags = cpu_to_le32(flags);
+
+       bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
+                                           BT_VALID_BT_PRIO_BOOST |
+                                           BT_VALID_MAX_KILL |
+                                           BT_VALID_3W_TMRS |
+                                           BT_VALID_KILL_ACK |
+                                           BT_VALID_KILL_CTS |
+                                           BT_VALID_REDUCED_TX_POWER |
+                                           BT_VALID_LUT |
+                                           BT_VALID_WIFI_RX_SW_PRIO_BOOST |
+                                           BT_VALID_WIFI_TX_SW_PRIO_BOOST |
+                                           BT_VALID_ANT_ISOLATION |
+                                           BT_VALID_ANT_ISOLATION_THRS |
+                                           BT_VALID_TXTX_DELTA_FREQ_THRS |
+                                           BT_VALID_TXRX_MAX_FREQ_0 |
+                                           BT_VALID_SYNC_TO_SCO);
+
+       if (IWL_MVM_BT_COEX_SYNC2SCO)
+               bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
+
+       if (IWL_MVM_BT_COEX_CORUNNING) {
+               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
+                                                   BT_VALID_CORUN_LUT_40);
+               bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
+       }
+
+       if (IWL_MVM_BT_COEX_MPLUT) {
+               bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
+               bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
+       }
+
+       if (mvm->cfg->bt_shared_single_ant)
+               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
+                      sizeof(iwl_single_shared_ant));
+       else
+               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
+                      sizeof(iwl_combined_lookup));
+
+       /* Take first Co-running block LUT to get started */
+       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
+              sizeof(bt_cmd->bt4_corun_lut20));
+       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
+              sizeof(bt_cmd->bt4_corun_lut40));
+
+       memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+              sizeof(iwl_bt_prio_boost));
+       memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+              sizeof(iwl_bt_mprio_lut));
+       bt_cmd->kill_ack_msk =
+               cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
+       bt_cmd->kill_cts_msk =
+               cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
+
+       memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+       memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
+}
+
+static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
+                                          bool reduced_tx_power)
+{
+       enum iwl_bt_kill_msk bt_kill_msk;
+       struct iwl_bt_coex_cmd *bt_cmd;
+       struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .data[0] = &bt_cmd,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .flags = CMD_SYNC,
+       };
+       int ret = 0;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (reduced_tx_power) {
+               /* Reduced Tx power has precedence on the type of the profile */
+               bt_kill_msk = BT_KILL_MSK_REDUCED_TXPOW;
+       } else {
+               /* Low latency BT profile is active: give higher prio to BT */
+               if (BT_MBOX_MSG(notif, 3, SCO_STATE)  ||
+                   BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
+                   BT_MBOX_MSG(notif, 3, SNIFF_STATE))
+                       bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
+               else
+                       bt_kill_msk = BT_KILL_MSK_DEFAULT;
+       }
+
+       IWL_DEBUG_COEX(mvm,
+                      "Update kill_msk: %d - SCO %sactive A2DP %sactive SNIFF %sactive\n",
+                      bt_kill_msk,
+                      BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
+                      BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
+                      BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
+
+       /* Don't send HCMD if there is no update */
+       if (bt_kill_msk == mvm->bt_kill_msk)
+               return 0;
+
+       mvm->bt_kill_msk = bt_kill_msk;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
+
+       bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
+       bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+                                            BT_VALID_KILL_ACK |
+                                            BT_VALID_KILL_CTS);
+
+       IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
+                      iwl_bt_ack_kill_msk[bt_kill_msk],
+                      iwl_bt_cts_kill_msk[bt_kill_msk]);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
+}
+
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
+{
+       struct iwl_bt_coex_cmd *bt_cmd;
+       /* Send ASYNC since this can be sent from an atomic context */
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .flags = CMD_ASYNC,
+       };
+       struct iwl_mvm_sta *mvmsta;
+       int ret;
+
+       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+       if (!mvmsta)
+               return 0;
+
+       /* nothing to do */
+       if (mvmsta->bt_reduced_txpower_dbg ||
+           mvmsta->bt_reduced_txpower == enable)
+               return 0;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
+       if (!bt_cmd)
+               return -ENOMEM;
+       cmd.data[0] = bt_cmd;
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
+
+       bt_cmd->valid_bit_msk =
+               cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
+       bt_cmd->bt_reduced_tx_power = sta_id;
+
+       if (enable)
+               bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
+
+       IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
+                      enable ? "en" : "dis", sta_id);
+
+       mvmsta->bt_reduced_txpower = enable;
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       kfree(bt_cmd);
+       return ret;
+}
+
+struct iwl_bt_iterator_data {
+       struct iwl_bt_coex_profile_notif *notif;
+       struct iwl_mvm *mvm;
+       u32 num_bss_ifaces;
+       bool reduced_tx_power;
+       struct ieee80211_chanctx_conf *primary;
+       struct ieee80211_chanctx_conf *secondary;
+       bool primary_ll;
+};
+
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      bool enable, int rssi)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mvmvif->bf_data.last_bt_coex_event = rssi;
+       mvmvif->bf_data.bt_coex_max_thold =
+               enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+       mvmvif->bf_data.bt_coex_min_thold =
+               enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+}
+
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+                                     struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_bt_iterator_data *data = _data;
+       struct iwl_mvm *mvm = data->mvm;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum ieee80211_smps_mode smps_mode;
+       u32 bt_activity_grading;
+       int ave_rssi;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               /* default smps_mode for BSS / P2P client is AUTOMATIC */
+               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+               data->num_bss_ifaces++;
+
+               /*
+                * Count unassoc BSSes, relax SMSP constraints
+                * and disable reduced Tx Power
+                */
+               if (!vif->bss_conf.assoc) {
+                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+                                           smps_mode);
+                       if (iwl_mvm_bt_coex_reduced_txp(mvm,
+                                                       mvmvif->ap_sta_id,
+                                                       false))
+                               IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+                       return;
+               }
+               break;
+       case NL80211_IFTYPE_AP:
+               /* default smps_mode for AP / GO is OFF */
+               smps_mode = IEEE80211_SMPS_OFF;
+               if (!mvmvif->ap_ibss_active) {
+                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+                                           smps_mode);
+                       return;
+               }
+
+               /* the Ack / Cts kill mask must be default if AP / GO */
+               data->reduced_tx_power = false;
+               break;
+       default:
+               return;
+       }
+
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+       /* If channel context is invalid or not on 2.4GHz .. */
+       if ((!chanctx_conf ||
+            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+               /* ... relax constraints and disable rssi events */
+               iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+                                   smps_mode);
+               if (vif->type == NL80211_IFTYPE_STATION)
+                       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+               return;
+       }
+
+       bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+       if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+               smps_mode = IEEE80211_SMPS_STATIC;
+       else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+               smps_mode = vif->type == NL80211_IFTYPE_AP ?
+                               IEEE80211_SMPS_OFF :
+                               IEEE80211_SMPS_DYNAMIC;
+       IWL_DEBUG_COEX(data->mvm,
+                      "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
+                      mvmvif->id, data->notif->bt_status, bt_activity_grading,
+                      smps_mode);
+
+       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
+
+       /* low latency is always primary */
+       if (iwl_mvm_vif_low_latency(mvmvif)) {
+               data->primary_ll = true;
+
+               data->secondary = data->primary;
+               data->primary = chanctx_conf;
+       }
+
+       if (vif->type == NL80211_IFTYPE_AP) {
+               if (!mvmvif->ap_ibss_active)
+                       return;
+
+               if (chanctx_conf == data->primary)
+                       return;
+
+               if (!data->primary_ll) {
+                       /*
+                        * downgrade the current primary no matter what its
+                        * type is.
+                        */
+                       data->secondary = data->primary;
+                       data->primary = chanctx_conf;
+               } else {
+                       /* there is low latency vif - we will be secondary */
+                       data->secondary = chanctx_conf;
+               }
+               return;
+       }
+
+       /*
+        * STA / P2P Client, try to be primary if first vif. If we are in low
+        * latency mode, we are already in primary and just don't do much
+        */
+       if (!data->primary || data->primary == chanctx_conf)
+               data->primary = chanctx_conf;
+       else if (!data->secondary)
+               /* if secondary is not NULL, it might be a GO */
+               data->secondary = chanctx_conf;
+
+       /* don't reduce the Tx power if in loose scheme */
+       if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+           mvm->cfg->bt_shared_single_ant) {
+               data->reduced_tx_power = false;
+               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+               return;
+       }
+
+       /* reduced Txpower only if BT is on, so ...*/
+       if (!data->notif->bt_status) {
+               /* ... cancel reduced Tx power ... */
+               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
+                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+               data->reduced_tx_power = false;
+
+               /* ... and there is no need to get reports on RSSI any more. */
+               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+               return;
+       }
+
+       /* try to get the avg rssi from fw */
+       ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+
+       /* if the RSSI isn't valid, fake it is very low */
+       if (!ave_rssi)
+               ave_rssi = -100;
+       if (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) {
+               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
+                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+
+               /*
+                * bt_kill_msk can be BT_KILL_MSK_REDUCED_TXPOW only if all the
+                * BSS / P2P clients have rssi above threshold.
+                * We set the bt_kill_msk to BT_KILL_MSK_REDUCED_TXPOW before
+                * the iteration, if one interface's rssi isn't good enough,
+                * bt_kill_msk will be set to default values.
+                */
+       } else if (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) {
+               if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
+                       IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+
+               /*
+                * One interface hasn't rssi above threshold, bt_kill_msk must
+                * be set to default values.
+                */
+               data->reduced_tx_power = false;
+       }
+
+       /* Begin to monitor the RSSI: it may influence the reduced Tx power */
+       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+}
+
+static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
+{
+       struct iwl_bt_iterator_data data = {
+               .mvm = mvm,
+               .notif = &mvm->last_bt_notif,
+               .reduced_tx_power = true,
+       };
+       struct iwl_bt_coex_ci_cmd cmd = {};
+       u8 ci_bw_idx;
+
+       rcu_read_lock();
+       ieee80211_iterate_active_interfaces_atomic(
+                                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_bt_notif_iterator, &data);
+
+       if (data.primary) {
+               struct ieee80211_chanctx_conf *chan = data.primary;
+               if (WARN_ON(!chan->def.chan)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+                       ci_bw_idx = 0;
+                       cmd.co_run_bw_primary = 0;
+               } else {
+                       cmd.co_run_bw_primary = 1;
+                       if (chan->def.center_freq1 >
+                           chan->def.chan->center_freq)
+                               ci_bw_idx = 2;
+                       else
+                               ci_bw_idx = 1;
+               }
+
+               cmd.bt_primary_ci =
+                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+               cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+       }
+
+       if (data.secondary) {
+               struct ieee80211_chanctx_conf *chan = data.secondary;
+               if (WARN_ON(!data.secondary->def.chan)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+                       ci_bw_idx = 0;
+                       cmd.co_run_bw_secondary = 0;
+               } else {
+                       cmd.co_run_bw_secondary = 1;
+                       if (chan->def.center_freq1 >
+                           chan->def.chan->center_freq)
+                               ci_bw_idx = 2;
+                       else
+                               ci_bw_idx = 1;
+               }
+
+               cmd.bt_secondary_ci =
+                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+               cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
+       }
+
+       rcu_read_unlock();
+
+       /* Don't spam the fw with the same command over and over */
+       if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+                                        sizeof(cmd), &cmd))
+                       IWL_ERR(mvm, "Failed to send BT_CI cmd");
+               memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+       }
+
+       /*
+        * If there are no BSS / P2P client interfaces, reduced Tx Power is
+        * irrelevant since it is based on the RSSI coming from the beacon.
+        * Use BT_KILL_MSK_DEFAULT in that case.
+        */
+       data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
+
+       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
+
+/* upon association, the fw will send in BT Coex notification */
+int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb,
+                            struct iwl_device_cmd *dev_cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+
+
+       IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
+       IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
+                      notif->bt_status ? "ON" : "OFF");
+       IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
+       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+       IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+                      le32_to_cpu(notif->primary_ch_lut));
+       IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+                      le32_to_cpu(notif->secondary_ch_lut));
+       IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+                      le32_to_cpu(notif->bt_activity_grading));
+       IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
+                      notif->bt_agg_traffic_load);
+
+       /* remember this notification for future use: rssi fluctuations */
+       memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
+
+       iwl_mvm_bt_coex_notif_handle(mvm);
+
+       /*
+        * This is an async handler for a notification, returning anything other
+        * than 0 doesn't make sense even if HCMD failed.
+        */
+       return 0;
+}
+
+static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
+                                  struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+       struct iwl_bt_iterator_data *data = _data;
+       struct iwl_mvm *mvm = data->mvm;
+
+       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
+
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       /* If channel context is invalid or not on 2.4GHz - don't count it */
+       if (!chanctx_conf ||
+           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
+       if (vif->type != NL80211_IFTYPE_STATION ||
+           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+               return;
+
+       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+                                       lockdep_is_held(&mvm->mutex));
+
+       /* This can happen if the station has been removed right now */
+       if (IS_ERR_OR_NULL(sta))
+               return;
+
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+       data->num_bss_ifaces++;
+
+       /*
+        * This interface doesn't support reduced Tx power (because of low
+        * RSSI probably), then set bt_kill_msk to default values.
+        */
+       if (!mvmsta->bt_reduced_txpower)
+               data->reduced_tx_power = false;
+       /* else - possibly leave it to BT_KILL_MSK_REDUCED_TXPOW */
+}
+
+void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                          enum ieee80211_rssi_event rssi_event)
+{
+       struct iwl_mvm_vif *mvmvif = (void *)vif->drv_priv;
+       struct iwl_bt_iterator_data data = {
+               .mvm = mvm,
+               .reduced_tx_power = true,
+       };
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       /*
+        * Rssi update while not associated - can happen since the statistics
+        * are handled asynchronously
+        */
+       if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
+               return;
+
+       /* No BT - reports should be disabled */
+       if (!mvm->last_bt_notif.bt_status)
+               return;
+
+       IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
+                      rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
+
+       /*
+        * Check if rssi is good enough for reduced Tx power, but not in loose
+        * scheme.
+        */
+       if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+           iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
+               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+                                                 false);
+       else
+               ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
+
+       if (ret)
+               IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
+
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_bt_rssi_iterator, &data);
+
+       /*
+        * If there are no BSS / P2P client interfaces, reduced Tx Power is
+        * irrelevant since it is based on the RSSI coming from the beacon.
+        * Use BT_KILL_MSK_DEFAULT in that case.
+        */
+       data.reduced_tx_power = data.reduced_tx_power && data.num_bss_ifaces;
+
+       if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
+               IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT        (1200)
+
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+                               struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       enum iwl_bt_coex_lut_type lut_type;
+
+       if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+           BT_HIGH_TRAFFIC)
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+       if (lut_type == BT_COEX_LOOSE_LUT)
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+       /* tight coex, high bt traffic, reduce AGG time limit */
+       return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+                                    struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+       if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+           BT_HIGH_TRAFFIC)
+               return true;
+
+       /*
+        * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
+        * already killed.
+        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
+        * Tx.
+        */
+       return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
+}
+
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+                          struct ieee80211_tx_info *info, u8 ac)
+{
+       __le16 fc = hdr->frame_control;
+
+       if (info->band != IEEE80211_BAND_2GHZ)
+               return 0;
+
+       if (unlikely(mvm->bt_tx_prio))
+               return mvm->bt_tx_prio - 1;
+
+       /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
+       if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
+            is_multicast_ether_addr(hdr->addr1) ||
+            ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
+            ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
+               return 3;
+
+       switch (ac) {
+       case IEEE80211_AC_BE:
+               return 1;
+       case IEEE80211_AC_VO:
+               return 3;
+       case IEEE80211_AC_VI:
+               return 2;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
+{
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+               return;
+
+       iwl_mvm_bt_coex_notif_handle(mvm);
+}
+
+int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb,
+                                 struct iwl_device_cmd *dev_cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       u32 ant_isolation = le32_to_cpup((void *)pkt->data);
+       u8 __maybe_unused lower_bound, upper_bound;
+       u8 lut;
+
+       struct iwl_bt_coex_cmd *bt_cmd;
+       struct iwl_host_cmd cmd = {
+               .id = BT_CONFIG,
+               .len = { sizeof(*bt_cmd), },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .flags = CMD_SYNC,
+       };
+
+       if (!IWL_MVM_BT_COEX_CORUNNING)
+               return 0;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (ant_isolation ==  mvm->last_ant_isol)
+               return 0;
+
+       for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
+               if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
+                       break;
+
+       lower_bound = antenna_coupling_ranges[lut].range;
+
+       if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
+               upper_bound = antenna_coupling_ranges[lut + 1].range;
+       else
+               upper_bound = antenna_coupling_ranges[lut].range;
+
+       IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
+                      ant_isolation, lower_bound, upper_bound, lut);
+
+       mvm->last_ant_isol = ant_isolation;
+
+       if (mvm->last_corun_lut == lut)
+               return 0;
+
+       mvm->last_corun_lut = lut;
+
+       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+       if (!bt_cmd)
+               return 0;
+       cmd.data[0] = bt_cmd;
+
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
+       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+                                            BT_VALID_CORUN_LUT_20 |
+                                            BT_VALID_CORUN_LUT_40);
+
+       /* For the moment, use the same LUT for 20GHz and 40GHz */
+       memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
+              sizeof(bt_cmd->bt4_corun_lut20));
+
+       memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
+              sizeof(bt_cmd->bt4_corun_lut40));
+
+       return 0;
+}
index 036857698565809b302b6043032cf0793c45d171..51685693af2e47e7ddfb7491bf8f74f0a1d745ae 100644 (file)
@@ -78,5 +78,9 @@
 #define IWL_MVM_PS_SNOOZE_INTERVAL             25
 #define IWL_MVM_PS_SNOOZE_WINDOW               50
 #define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW                25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT       64
+#define IWL_MVM_BT_COEX_SYNC2SCO               1
+#define IWL_MVM_BT_COEX_CORUNNING              1
+#define IWL_MVM_BT_COEX_MPLUT                  1
 
 #endif /* __MVM_CONSTANTS_H */
index f36a7ee0267fd69d12ff557246e8332656826ae2..e56f5a0edf855331a1411e76406a143176b5e9d5 100644 (file)
@@ -376,139 +376,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
        return err;
 }
 
-static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
-                                     struct ieee80211_vif *vif)
-{
-       union {
-               struct iwl_proto_offload_cmd_v1 v1;
-               struct iwl_proto_offload_cmd_v2 v2;
-               struct iwl_proto_offload_cmd_v3_small v3s;
-               struct iwl_proto_offload_cmd_v3_large v3l;
-       } cmd = {};
-       struct iwl_host_cmd hcmd = {
-               .id = PROT_OFFLOAD_CONFIG_CMD,
-               .flags = CMD_SYNC,
-               .data[0] = &cmd,
-               .dataflags[0] = IWL_HCMD_DFL_DUP,
-       };
-       struct iwl_proto_offload_cmd_common *common;
-       u32 enabled = 0, size;
-       u32 capa_flags = mvm->fw->ucode_capa.flags;
-#if IS_ENABLED(CONFIG_IPV6)
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       int i;
-
-       if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
-           capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
-               struct iwl_ns_config *nsc;
-               struct iwl_targ_addr *addrs;
-               int n_nsc, n_addrs;
-               int c;
-
-               if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
-                       nsc = cmd.v3s.ns_config;
-                       n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
-                       addrs = cmd.v3s.targ_addrs;
-                       n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
-               } else {
-                       nsc = cmd.v3l.ns_config;
-                       n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
-                       addrs = cmd.v3l.targ_addrs;
-                       n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
-               }
-
-               if (mvmvif->num_target_ipv6_addrs)
-                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
-
-               /*
-                * For each address we have (and that will fit) fill a target
-                * address struct and combine for NS offload structs with the
-                * solicited node addresses.
-                */
-               for (i = 0, c = 0;
-                    i < mvmvif->num_target_ipv6_addrs &&
-                    i < n_addrs && c < n_nsc; i++) {
-                       struct in6_addr solicited_addr;
-                       int j;
-
-                       addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
-                                                 &solicited_addr);
-                       for (j = 0; j < c; j++)
-                               if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
-                                                 &solicited_addr) == 0)
-                                       break;
-                       if (j == c)
-                               c++;
-                       addrs[i].addr = mvmvif->target_ipv6_addrs[i];
-                       addrs[i].config_num = cpu_to_le32(j);
-                       nsc[j].dest_ipv6_addr = solicited_addr;
-                       memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
-               }
-
-               if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
-                       cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
-               else
-                       cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
-       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
-               if (mvmvif->num_target_ipv6_addrs) {
-                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
-                       memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
-               }
-
-               BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
-                            sizeof(mvmvif->target_ipv6_addrs[0]));
-
-               for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
-                                   IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
-                       memcpy(cmd.v2.target_ipv6_addr[i],
-                              &mvmvif->target_ipv6_addrs[i],
-                              sizeof(cmd.v2.target_ipv6_addr[i]));
-       } else {
-               if (mvmvif->num_target_ipv6_addrs) {
-                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
-                       memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
-               }
-
-               BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
-                            sizeof(mvmvif->target_ipv6_addrs[0]));
-
-               for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
-                                   IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
-                       memcpy(cmd.v1.target_ipv6_addr[i],
-                              &mvmvif->target_ipv6_addrs[i],
-                              sizeof(cmd.v1.target_ipv6_addr[i]));
-       }
-#endif
-
-       if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
-               common = &cmd.v3s.common;
-               size = sizeof(cmd.v3s);
-       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
-               common = &cmd.v3l.common;
-               size = sizeof(cmd.v3l);
-       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
-               common = &cmd.v2.common;
-               size = sizeof(cmd.v2);
-       } else {
-               common = &cmd.v1.common;
-               size = sizeof(cmd.v1);
-       }
-
-       if (vif->bss_conf.arp_addr_cnt) {
-               enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
-               common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
-               memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
-       }
-
-       if (!enabled)
-               return 0;
-
-       common->enabled = cpu_to_le32(enabled);
-
-       hcmd.len[0] = size;
-       return iwl_mvm_send_cmd(mvm, &hcmd);
-}
-
 enum iwl_mvm_tcp_packet_type {
        MVM_TCP_TX_SYN,
        MVM_TCP_RX_SYNACK,
@@ -846,8 +713,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        quota_cmd.quotas[0].id_and_color =
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
                                                mvmvif->phy_ctxt->color));
-       quota_cmd.quotas[0].quota = cpu_to_le32(100);
-       quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
+       quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+       quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
 
        for (i = 1; i < MAX_BINDINGS; i++)
                quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
@@ -927,6 +794,20 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                IWL_ERR(mvm, "failed to set non-QoS seqno\n");
 }
 
+static int
+iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
+                              const struct iwl_wowlan_config_cmd_v3 *cmd)
+{
+       /* start only with the v2 part of the command */
+       u16 cmd_len = sizeof(cmd->common);
+
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
+               cmd_len = sizeof(*cmd);
+
+       return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
+                                   cmd_len, cmd);
+}
+
 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                             struct cfg80211_wowlan *wowlan,
                             bool test)
@@ -939,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif;
        struct ieee80211_sta *ap_sta;
        struct iwl_mvm_sta *mvm_ap_sta;
-       struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+       struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
        struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
        struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
        struct iwl_d3_manager_config d3_cfg_cmd_data = {
@@ -961,9 +842,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                .tkip = &tkip_cmd,
                .use_tkip = false,
        };
-       int ret, i;
+       int ret;
        int len __maybe_unused;
-       u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
 
        if (!wowlan) {
                /*
@@ -980,8 +860,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
-       old_aux_sta_id = mvm->aux_sta.sta_id;
-
        /* see if there's only a single BSS vif and it's associated */
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1005,49 +883,41 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
 
-       /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
+       /* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
 
-       wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
+       wowlan_config_cmd.common.is_11n_connection =
+                                       ap_sta->ht_cap.ht_supported;
 
        /* Query the last used seqno and set it */
        ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
        if (ret < 0)
                goto out_noreset;
-       wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
+       wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
 
-       /*
-        * For QoS counters, we store the one to use next, so subtract 0x10
-        * since the uCode will add 0x10 *before* using the value while we
-        * increment after using the value (i.e. store the next value to use).
-        */
-       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-               u16 seq = mvm_ap_sta->tid_data[i].seq_number;
-               seq -= 0x10;
-               wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
-       }
+       iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
 
        if (wowlan->disconnect)
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
                                    IWL_WOWLAN_WAKEUP_LINK_CHANGE);
        if (wowlan->magic_pkt)
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
        if (wowlan->gtk_rekey_failure)
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
        if (wowlan->eap_identity_req)
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
        if (wowlan->four_way_handshake)
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
        if (wowlan->n_patterns)
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
 
        if (wowlan->rfkill_release)
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
 
        if (wowlan->tcp) {
@@ -1055,7 +925,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                 * Set the "link change" (really "link lost") flag as well
                 * since that implies losing the TCP connection.
                 */
-               wowlan_config_cmd.wakeup_filter |=
+               wowlan_config_cmd.common.wakeup_filter |=
                        cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
                                    IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
                                    IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
@@ -1066,16 +936,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        iwl_trans_stop_device(mvm->trans);
 
-       /*
-        * The D3 firmware still hardcodes the AP station ID for the
-        * BSS we're associated with as 0. Store the real STA ID here
-        * and assign 0. When we leave this function, we'll restore
-        * the original value for the resume code.
-        */
-       old_ap_sta_id = mvm_ap_sta->sta_id;
-       mvm_ap_sta->sta_id = 0;
-       mvmvif->ap_sta_id = 0;
-
        /*
         * Set the HW restart bit -- this is mostly true as we're
         * going to load new firmware and reprogram that, though
@@ -1096,16 +956,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        mvm->ptk_ivlen = 0;
        mvm->ptk_icvlen = 0;
 
-       /*
-        * The D3 firmware still hardcodes the AP station ID for the
-        * BSS we're associated with as 0. As a result, we have to move
-        * the auxiliary station to ID 1 so the ID 0 remains free for
-        * the AP station for later.
-        * We set the sta_id to 1 here, and reset it to its previous
-        * value (that we stored above) later.
-        */
-       mvm->aux_sta.sta_id = 1;
-
        ret = iwl_mvm_load_d3_fw(mvm);
        if (ret)
                goto out;
@@ -1173,9 +1023,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                }
        }
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
-                                  CMD_SYNC, sizeof(wowlan_config_cmd),
-                                  &wowlan_config_cmd);
+       ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
        if (ret)
                goto out;
 
@@ -1183,7 +1031,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        if (ret)
                goto out;
 
-       ret = iwl_mvm_send_proto_offload(mvm, vif);
+       ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
        if (ret)
                goto out;
 
@@ -1191,11 +1039,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        if (ret)
                goto out;
 
-       ret = iwl_mvm_power_update_device_mode(mvm);
+       ret = iwl_mvm_power_update_device(mvm);
        if (ret)
                goto out;
 
-       ret = iwl_mvm_power_update_mode(mvm, vif);
+       ret = iwl_mvm_power_update_mac(mvm, vif);
        if (ret)
                goto out;
 
@@ -1222,10 +1070,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        iwl_trans_d3_suspend(mvm->trans, test);
  out:
-       mvm->aux_sta.sta_id = old_aux_sta_id;
-       mvm_ap_sta->sta_id = old_ap_sta_id;
-       mvmvif->ap_sta_id = old_ap_sta_id;
-
        if (ret < 0)
                ieee80211_restart_hw(mvm->hw);
  out_noreset:
index 0e29cd83a06a83c158860a5523549edf7e3c44db..9b59e1d7ae71ea888973992cb88363ba2371fdad 100644 (file)
@@ -185,7 +185,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
 
        mutex_lock(&mvm->mutex);
        iwl_dbgfs_update_pm(mvm, vif, param, val);
-       ret = iwl_mvm_power_update_mode(mvm, vif);
+       ret = iwl_mvm_power_update_mac(mvm, vif);
        mutex_unlock(&mvm->mutex);
 
        return ret ?: count;
@@ -202,7 +202,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
        int bufsz = sizeof(buf);
        int pos;
 
-       pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+       pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
@@ -225,6 +225,29 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
 
        ap_sta_id = mvmvif->ap_sta_id;
 
+       switch (ieee80211_vif_type_p2p(vif)) {
+       case NL80211_IFTYPE_ADHOC:
+               pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+               break;
+       case NL80211_IFTYPE_STATION:
+               pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+               break;
+       case NL80211_IFTYPE_AP:
+               pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+               break;
+       case NL80211_IFTYPE_P2P_CLIENT:
+               pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+               break;
+       case NL80211_IFTYPE_P2P_GO:
+               pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+               break;
+       case NL80211_IFTYPE_P2P_DEVICE:
+               pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+               break;
+       default:
+               break;
+       }
+
        pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
                         mvmvif->id, mvmvif->color);
        pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
@@ -249,9 +272,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
                        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
 
                        pos += scnprintf(buf+pos, bufsz-pos,
-                                        "ap_sta_id %d - reduced Tx power %d\n",
+                                        "ap_sta_id %d - reduced Tx power %d force %d\n",
                                         ap_sta_id,
-                                        mvm_sta->bt_reduced_txpower);
+                                        mvm_sta->bt_reduced_txpower,
+                                        mvm_sta->bt_reduced_txpower_dbg);
                }
        }
 
@@ -269,6 +293,41 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
+                                          char *buf, size_t count,
+                                          loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       struct iwl_mvm_sta *mvmsta;
+       bool reduced_tx_power;
+       int ret;
+
+       if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+               return -ENOTCONN;
+
+       if (strtobool(buf, &reduced_tx_power) != 0)
+               return -EINVAL;
+
+       mutex_lock(&mvm->mutex);
+
+       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+       if (IS_ERR_OR_NULL(mvmsta)) {
+               mutex_unlock(&mvm->mutex);
+               return -ENOTCONN;
+       }
+
+       mvmsta->bt_reduced_txpower_dbg = false;
+       ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+                                         reduced_tx_power);
+       if (!ret)
+               mvmsta->bt_reduced_txpower_dbg = true;
+
+       mutex_unlock(&mvm->mutex);
+
+       return ret ? : count;
+}
+
 static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
                                enum iwl_dbgfs_bf_mask param, int value)
 {
@@ -403,9 +462,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
        mutex_lock(&mvm->mutex);
        iwl_dbgfs_update_bf(vif, param, value);
        if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
-               ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+               ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
        else
-               ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+               ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
        mutex_unlock(&mvm->mutex);
 
        return ret ?: count;
@@ -460,6 +519,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+                                          size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       u8 value;
+       int ret;
+
+       ret = kstrtou8(buf, 0, &value);
+       if (ret)
+               return ret;
+       if (value > 1)
+               return -EINVAL;
+
+       mutex_lock(&mvm->mutex);
+       iwl_mvm_update_low_latency(mvm, vif, value);
+       mutex_unlock(&mvm->mutex);
+
+       return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+                                         char __user *user_buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       char buf[3];
+
+       buf[0] = mvmvif->low_latency ? '1' : '0';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -473,6 +567,8 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
 MVM_DEBUGFS_READ_FILE_OPS(mac_params);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -496,15 +592,18 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
        }
 
-       if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+       if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
+           iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
            ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
             (vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
-             mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
+             mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)))
                MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
                                         S_IRUSR);
 
-       MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
-                                S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
+                                S_IRUSR | S_IWUSR);
 
        if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
            mvmvif == mvm->bf_allowed_vif)
index 369d4c90e669020a6fbb924122e2691247e54d7b..1b52deea60812e4f6ac42c94412b1ecf518f44f7 100644 (file)
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  *****************************************************************************/
+#include <linux/vmalloc.h>
+
 #include "mvm.h"
 #include "sta.h"
 #include "iwl-io.h"
 #include "iwl-prph.h"
 #include "debugfs.h"
+#include "fw-error-dump.h"
 
 static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
                                        size_t count, loff_t *ppos)
@@ -90,7 +93,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
 static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
                                         size_t count, loff_t *ppos)
 {
-       struct ieee80211_sta *sta;
+       struct iwl_mvm_sta *mvmsta;
        int sta_id, drain, ret;
 
        if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
@@ -105,19 +108,63 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
 
        mutex_lock(&mvm->mutex);
 
-       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
-                                       lockdep_is_held(&mvm->mutex));
-       if (IS_ERR_OR_NULL(sta))
+       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+       if (!mvmsta)
                ret = -ENOENT;
        else
-               ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
-                       count;
+               ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
 
        mutex_unlock(&mvm->mutex);
 
        return ret;
 }
 
+static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
+{
+       struct iwl_mvm *mvm = inode->i_private;
+       int ret;
+
+       if (!mvm)
+               return -EINVAL;
+
+       mutex_lock(&mvm->mutex);
+       if (!mvm->fw_error_dump) {
+               ret = -ENODATA;
+               goto out;
+       }
+
+       file->private_data = mvm->fw_error_dump;
+       mvm->fw_error_dump = NULL;
+       kfree(mvm->fw_error_sram);
+       mvm->fw_error_sram = NULL;
+       mvm->fw_error_sram_len = 0;
+       ret = 0;
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
+static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
+                                           char __user *user_buf,
+                                           size_t count, loff_t *ppos)
+{
+       struct iwl_fw_error_dump_file *dump_file = file->private_data;
+
+       return simple_read_from_buffer(user_buf, count, ppos,
+                                      dump_file,
+                                      le32_to_cpu(dump_file->file_len));
+}
+
+static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
+                                          struct file *file)
+{
+       vfree(file->private_data);
+
+       return 0;
+}
+
 static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
                                   size_t count, loff_t *ppos)
 {
@@ -251,7 +298,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
        }
 
        mutex_lock(&mvm->mutex);
-       ret = iwl_mvm_power_update_device_mode(mvm);
+       ret = iwl_mvm_power_update_device(mvm);
        mutex_unlock(&mvm->mutex);
 
        return ret ?: count;
@@ -351,6 +398,9 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
                         le32_to_cpu(notif->secondary_ch_lut));
        pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
                         le32_to_cpu(notif->bt_activity_grading));
+       pos += scnprintf(buf+pos, bufsz-pos,
+                        "antenna isolation = %d CORUN LUT index = %d\n",
+                        mvm->last_ant_isol, mvm->last_corun_lut);
 
        mutex_unlock(&mvm->mutex);
 
@@ -393,6 +443,22 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static ssize_t
+iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
+                          size_t count, loff_t *ppos)
+{
+       u32 bt_tx_prio;
+
+       if (sscanf(buf, "%u", &bt_tx_prio) != 1)
+               return -EINVAL;
+       if (bt_tx_prio > 4)
+               return -EINVAL;
+
+       mvm->bt_tx_prio = bt_tx_prio;
+
+       return count;
+}
+
 #define PRINT_STATS_LE32(_str, _val)                                   \
                         pos += scnprintf(buf + pos, bufsz - pos,       \
                                          fmt_table, _str,              \
@@ -532,6 +598,80 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
 }
 #undef PRINT_STAT_LE32
 
+static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
+                                         char __user *user_buf, size_t count,
+                                         loff_t *ppos,
+                                         struct iwl_mvm_frame_stats *stats)
+{
+       char *buff, *pos, *endpos;
+       int idx, i;
+       int ret;
+       static const size_t bufsz = 1024;
+
+       buff = kmalloc(bufsz, GFP_KERNEL);
+       if (!buff)
+               return -ENOMEM;
+
+       spin_lock_bh(&mvm->drv_stats_lock);
+
+       pos = buff;
+       endpos = pos + bufsz;
+
+       pos += scnprintf(pos, endpos - pos,
+                        "Legacy/HT/VHT\t:\t%d/%d/%d\n",
+                        stats->legacy_frames,
+                        stats->ht_frames,
+                        stats->vht_frames);
+       pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
+                        stats->bw_20_frames,
+                        stats->bw_40_frames,
+                        stats->bw_80_frames);
+       pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
+                        stats->ngi_frames,
+                        stats->sgi_frames);
+       pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
+                        stats->siso_frames,
+                        stats->mimo2_frames);
+       pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
+                        stats->fail_frames,
+                        stats->success_frames);
+       pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
+                        stats->agg_frames);
+       pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
+                        stats->ampdu_count);
+       pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
+                        stats->ampdu_count > 0 ?
+                        (stats->agg_frames / stats->ampdu_count) : 0);
+
+       pos += scnprintf(pos, endpos - pos, "Last Rates\n");
+
+       idx = stats->last_frame_idx - 1;
+       for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
+               idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
+               if (stats->last_rates[idx] == 0)
+                       continue;
+               pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
+                                (int)(ARRAY_SIZE(stats->last_rates) - i));
+               pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
+       }
+       spin_unlock_bh(&mvm->drv_stats_lock);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+       kfree(buff);
+
+       return ret;
+}
+
+static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
+                                          char __user *user_buf, size_t count,
+                                          loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+
+       return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
+                                         &mvm->drv_rx_stats);
+}
+
 static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
                                          size_t count, loff_t *ppos)
 {
@@ -592,7 +732,7 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
                return -EINVAL;
        if (scan_rx_ant > ANT_ABC)
                return -EINVAL;
-       if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+       if (scan_rx_ant & ~mvm->fw->valid_rx_ant)
                return -EINVAL;
 
        mvm->scan_rx_ant = scan_rx_ant;
@@ -600,6 +740,187 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
        return count;
 }
 
+#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
+                                           char __user *user_buf,
+                                           size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       struct iwl_bcast_filter_cmd cmd;
+       const struct iwl_fw_bcast_filter *filter;
+       char *buf;
+       int bufsz = 1024;
+       int i, j, pos = 0;
+       ssize_t ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+       if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+               ADD_TEXT("None\n");
+               mutex_unlock(&mvm->mutex);
+               goto out;
+       }
+       mutex_unlock(&mvm->mutex);
+
+       for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+               filter = &cmd.filters[i];
+
+               ADD_TEXT("Filter [%d]:\n", i);
+               ADD_TEXT("\tDiscard=%d\n", filter->discard);
+               ADD_TEXT("\tFrame Type: %s\n",
+                        filter->frame_type ? "IPv4" : "Generic");
+
+               for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+                       const struct iwl_fw_bcast_filter_attr *attr;
+
+                       attr = &filter->attrs[j];
+                       if (!attr->mask)
+                               break;
+
+                       ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
+                                j, attr->offset,
+                                attr->offset_type ? "IP End" :
+                                                    "Payload Start",
+                                be32_to_cpu(attr->mask),
+                                be32_to_cpu(attr->val),
+                                le16_to_cpu(attr->reserved1));
+               }
+       }
+out:
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
+                                            size_t count, loff_t *ppos)
+{
+       int pos, next_pos;
+       struct iwl_fw_bcast_filter filter = {};
+       struct iwl_bcast_filter_cmd cmd;
+       u32 filter_id, attr_id, mask, value;
+       int err = 0;
+
+       if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
+                  &filter.frame_type, &pos) != 3)
+               return -EINVAL;
+
+       if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+           filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
+               return -EINVAL;
+
+       for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
+            attr_id++) {
+               struct iwl_fw_bcast_filter_attr *attr =
+                               &filter.attrs[attr_id];
+
+               if (pos >= count)
+                       break;
+
+               if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
+                          &attr->offset, &attr->offset_type,
+                          &mask, &value, &next_pos) != 4)
+                       return -EINVAL;
+
+               attr->mask = cpu_to_be32(mask);
+               attr->val = cpu_to_be32(value);
+               if (mask)
+                       filter.num_attrs++;
+
+               pos += next_pos;
+       }
+
+       mutex_lock(&mvm->mutex);
+       memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
+              &filter, sizeof(filter));
+
+       /* send updated bcast filtering configuration */
+       if (mvm->dbgfs_bcast_filtering.override &&
+           iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+                                          sizeof(cmd), &cmd);
+       mutex_unlock(&mvm->mutex);
+
+       return err ?: count;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
+                                                char __user *user_buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       struct iwl_bcast_filter_cmd cmd;
+       char *buf;
+       int bufsz = 1024;
+       int i, pos = 0;
+       ssize_t ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+       if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+               ADD_TEXT("None\n");
+               mutex_unlock(&mvm->mutex);
+               goto out;
+       }
+       mutex_unlock(&mvm->mutex);
+
+       for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+               const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
+
+               ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
+                        i, mac->default_discard, mac->attached_filters);
+       }
+out:
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
+                                                 char *buf, size_t count,
+                                                 loff_t *ppos)
+{
+       struct iwl_bcast_filter_cmd cmd;
+       struct iwl_fw_bcast_mac mac = {};
+       u32 mac_id, attached_filters;
+       int err = 0;
+
+       if (!mvm->bcast_filters)
+               return -ENOENT;
+
+       if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
+                  &attached_filters) != 3)
+               return -EINVAL;
+
+       if (mac_id >= ARRAY_SIZE(cmd.macs) ||
+           mac.default_discard > 1 ||
+           attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
+               return -EINVAL;
+
+       mac.attached_filters = cpu_to_le16(attached_filters);
+
+       mutex_lock(&mvm->mutex);
+       memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
+              &mac, sizeof(mac));
+
+       /* send updated bcast filtering configuration */
+       if (mvm->dbgfs_bcast_filtering.override &&
+           iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+               err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+                                          sizeof(cmd), &cmd);
+       mutex_unlock(&mvm->mutex);
+
+       return err ?: count;
+}
+#endif
+
 #ifdef CONFIG_PM_SLEEP
 static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
                                       size_t count, loff_t *ppos)
@@ -658,15 +979,117 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
 }
 #endif
 
+#define PRINT_MVM_REF(ref) do {                                        \
+       if (test_bit(ref, mvm->ref_bitmap))                     \
+               pos += scnprintf(buf + pos, bufsz - pos,        \
+                                "\t(0x%lx) %s\n",              \
+                                BIT(ref), #ref);               \
+} while (0)
+
+static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       int pos = 0;
+       char buf[256];
+       const size_t bufsz = sizeof(buf);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
+                        mvm->ref_bitmap[0]);
+
+       PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+       PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+       PRINT_MVM_REF(IWL_MVM_REF_ROC);
+       PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+       PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+       PRINT_MVM_REF(IWL_MVM_REF_USER);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
+                                        size_t count, loff_t *ppos)
+{
+       unsigned long value;
+       int ret;
+       bool taken;
+
+       ret = kstrtoul(buf, 10, &value);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&mvm->mutex);
+
+       taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
+       if (value == 1 && !taken)
+               iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+       else if (value == 0 && taken)
+               iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+       else
+               ret = -EINVAL;
+
+       mutex_unlock(&mvm->mutex);
+
+       if (ret < 0)
+               return ret;
+       return count;
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
-#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do {                  \
-               if (!debugfs_create_file(#name, mode, parent, mvm,      \
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do {     \
+               if (!debugfs_create_file(alias, mode, parent, mvm,      \
                                         &iwl_dbgfs_##name##_ops))      \
                        goto err;                                       \
        } while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+       MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t
+iwl_dbgfs_prph_reg_read(struct file *file,
+                       char __user *user_buf,
+                       size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       int pos = 0;
+       char buf[32];
+       const size_t bufsz = sizeof(buf);
+
+       if (!mvm->dbgfs_prph_reg_addr)
+               return -EINVAL;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
+               mvm->dbgfs_prph_reg_addr,
+               iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
+                        size_t count, loff_t *ppos)
+{
+       u8 args;
+       u32 value;
+
+       args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
+       /* if we only want to set the reg address - nothing more to do */
+       if (args == 1)
+               goto out;
+
+       /* otherwise, make sure we have both address and value */
+       if (args != 2)
+               return -EINVAL;
+
+       iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+out:
+       return count;
+}
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
 
 /* Device wide debugfs entries */
 MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
@@ -677,9 +1100,23 @@ MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
 MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
 MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+
+static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
+        .open = iwl_dbgfs_fw_error_dump_open,
+        .read = iwl_dbgfs_fw_error_dump_read,
+        .release = iwl_dbgfs_fw_error_dump_release,
+};
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
+#endif
 
 #ifdef CONFIG_PM_SLEEP
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
@@ -687,24 +1124,52 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
 
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
 {
+       struct dentry *bcast_dir __maybe_unused;
        char buf[100];
 
+       spin_lock_init(&mvm->drv_stats_lock);
+
        mvm->debugfs_dir = dbgfs_dir;
 
        MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
                MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
                                     S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
                             S_IWUSR | S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
+       MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+               bcast_dir = debugfs_create_dir("bcast_filtering",
+                                              mvm->debugfs_dir);
+               if (!bcast_dir)
+                       goto err;
+
+               if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
+                               bcast_dir,
+                               &mvm->dbgfs_bcast_filtering.override))
+                       goto err;
+
+               MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
+                                          bcast_dir, S_IWUSR | S_IRUSR);
+               MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
+                                          bcast_dir, S_IWUSR | S_IRUSR);
+       }
+#endif
+
 #ifdef CONFIG_PM_SLEEP
        MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
deleted file mode 100644 (file)
index 1b4e54d..0000000
+++ /dev/null
@@ -1,361 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __fw_api_bt_coex_h__
-#define __fw_api_bt_coex_h__
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#define BITS(nb) (BIT(nb) - 1)
-
-/**
- * enum iwl_bt_coex_flags - flags for BT_COEX command
- * @BT_CH_PRIMARY_EN:
- * @BT_CH_SECONDARY_EN:
- * @BT_NOTIF_COEX_OFF:
- * @BT_COEX_MODE_POS:
- * @BT_COEX_MODE_MSK:
- * @BT_COEX_DISABLE:
- * @BT_COEX_2W:
- * @BT_COEX_3W:
- * @BT_COEX_NW:
- * @BT_USE_DEFAULTS:
- * @BT_SYNC_2_BT_DISABLE:
- * @BT_COEX_CORUNNING_TBL_EN:
- *
- * The COEX_MODE must be set for each command. Even if it is not changed.
- */
-enum iwl_bt_coex_flags {
-       BT_CH_PRIMARY_EN                = BIT(0),
-       BT_CH_SECONDARY_EN              = BIT(1),
-       BT_NOTIF_COEX_OFF               = BIT(2),
-       BT_COEX_MODE_POS                = 3,
-       BT_COEX_MODE_MSK                = BITS(3) << BT_COEX_MODE_POS,
-       BT_COEX_DISABLE                 = 0x0 << BT_COEX_MODE_POS,
-       BT_COEX_2W                      = 0x1 << BT_COEX_MODE_POS,
-       BT_COEX_3W                      = 0x2 << BT_COEX_MODE_POS,
-       BT_COEX_NW                      = 0x3 << BT_COEX_MODE_POS,
-       BT_USE_DEFAULTS                 = BIT(6),
-       BT_SYNC_2_BT_DISABLE            = BIT(7),
-       BT_COEX_CORUNNING_TBL_EN        = BIT(8),
-       BT_COEX_MPLUT_TBL_EN            = BIT(9),
-       /* Bit 10 is reserved */
-       BT_COEX_WF_PRIO_BOOST_CHECK_EN  = BIT(11),
-};
-
-/*
- * indicates what has changed in the BT_COEX command.
- * BT_VALID_ENABLE must be set for each command. Commands without this bit will
- * discarded by the firmware
- */
-enum iwl_bt_coex_valid_bit_msk {
-       BT_VALID_ENABLE                 = BIT(0),
-       BT_VALID_BT_PRIO_BOOST          = BIT(1),
-       BT_VALID_MAX_KILL               = BIT(2),
-       BT_VALID_3W_TMRS                = BIT(3),
-       BT_VALID_KILL_ACK               = BIT(4),
-       BT_VALID_KILL_CTS               = BIT(5),
-       BT_VALID_REDUCED_TX_POWER       = BIT(6),
-       BT_VALID_LUT                    = BIT(7),
-       BT_VALID_WIFI_RX_SW_PRIO_BOOST  = BIT(8),
-       BT_VALID_WIFI_TX_SW_PRIO_BOOST  = BIT(9),
-       BT_VALID_MULTI_PRIO_LUT         = BIT(10),
-       BT_VALID_TRM_KICK_FILTER        = BIT(11),
-       BT_VALID_CORUN_LUT_20           = BIT(12),
-       BT_VALID_CORUN_LUT_40           = BIT(13),
-       BT_VALID_ANT_ISOLATION          = BIT(14),
-       BT_VALID_ANT_ISOLATION_THRS     = BIT(15),
-       BT_VALID_TXTX_DELTA_FREQ_THRS   = BIT(16),
-       BT_VALID_TXRX_MAX_FREQ_0        = BIT(17),
-       BT_VALID_SYNC_TO_SCO            = BIT(18),
-};
-
-/**
- * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
- * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
- * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
- *
- * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
- * reduces its Tx power, it can work along with BT, hence reducing the amount
- * of WiFi frames being killed by BT.
- */
-enum iwl_bt_reduced_tx_power {
-       BT_REDUCED_TX_POWER_CTL         = BIT(0),
-       BT_REDUCED_TX_POWER_DATA        = BIT(1),
-};
-
-enum iwl_bt_coex_lut_type {
-       BT_COEX_TIGHT_LUT = 0,
-       BT_COEX_LOOSE_LUT,
-       BT_COEX_TX_DIS_LUT,
-
-       BT_COEX_MAX_LUT,
-};
-
-#define BT_COEX_LUT_SIZE (12)
-#define BT_COEX_CORUN_LUT_SIZE (32)
-#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
-#define BT_COEX_BOOST_SIZE (4)
-#define BT_REDUCED_TX_POWER_BIT BIT(7)
-
-/**
- * struct iwl_bt_coex_cmd - bt coex configuration command
- * @flags:&enum iwl_bt_coex_flags
- * @max_kill:
- * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @bt4_antenna_isolation:
- * @bt4_antenna_isolation_thr:
- * @bt4_tx_tx_delta_freq_thr:
- * @bt4_tx_rx_max_freq0:
- * @bt_prio_boost:
- * @wifi_tx_prio_boost: SW boost of wifi tx priority
- * @wifi_rx_prio_boost: SW boost of wifi rx priority
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @decision_lut:
- * @bt4_multiprio_lut:
- * @bt4_corun_lut20:
- * @bt4_corun_lut40:
- * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
- *
- * The structure is used for the BT_COEX command.
- */
-struct iwl_bt_coex_cmd {
-       __le32 flags;
-       u8 max_kill;
-       u8 bt_reduced_tx_power;
-       u8 reserved[2];
-
-       u8 bt4_antenna_isolation;
-       u8 bt4_antenna_isolation_thr;
-       u8 bt4_tx_tx_delta_freq_thr;
-       u8 bt4_tx_rx_max_freq0;
-
-       __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
-       __le32 wifi_tx_prio_boost;
-       __le32 wifi_rx_prio_boost;
-       __le32 kill_ack_msk;
-       __le32 kill_cts_msk;
-
-       __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
-       __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
-       __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
-       __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
-
-       __le32 valid_bit_msk;
-} __packed; /* BT_COEX_CMD_API_S_VER_3 */
-
-/**
- * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
- * @bt_primary_ci:
- * @bt_secondary_ci:
- * @co_run_bw_primary:
- * @co_run_bw_secondary:
- * @primary_ch_phy_id:
- * @secondary_ch_phy_id:
- *
- * Used for BT_COEX_CI command
- */
-struct iwl_bt_coex_ci_cmd {
-       __le64 bt_primary_ci;
-       __le64 bt_secondary_ci;
-
-       u8 co_run_bw_primary;
-       u8 co_run_bw_secondary;
-       u8 primary_ch_phy_id;
-       u8 secondary_ch_phy_id;
-} __packed; /* BT_CI_MSG_API_S_VER_1 */
-
-#define BT_MBOX(n_dw, _msg, _pos, _nbits)      \
-       BT_MBOX##n_dw##_##_msg##_POS = (_pos),  \
-       BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
-
-enum iwl_bt_mxbox_dw0 {
-       BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
-       BT_MBOX(0, LE_PROF1, 3, 1),
-       BT_MBOX(0, LE_PROF2, 4, 1),
-       BT_MBOX(0, LE_PROF_OTHER, 5, 1),
-       BT_MBOX(0, CHL_SEQ_N, 8, 4),
-       BT_MBOX(0, INBAND_S, 13, 1),
-       BT_MBOX(0, LE_MIN_RSSI, 16, 4),
-       BT_MBOX(0, LE_SCAN, 20, 1),
-       BT_MBOX(0, LE_ADV, 21, 1),
-       BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
-       BT_MBOX(0, OPEN_CON_1, 28, 2),
-};
-
-enum iwl_bt_mxbox_dw1 {
-       BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
-       BT_MBOX(1, IP_SR, 4, 1),
-       BT_MBOX(1, LE_MSTR, 5, 1),
-       BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
-       BT_MBOX(1, MSG_TYPE, 16, 3),
-       BT_MBOX(1, SSN, 19, 2),
-};
-
-enum iwl_bt_mxbox_dw2 {
-       BT_MBOX(2, SNIFF_ACT, 0, 3),
-       BT_MBOX(2, PAG, 3, 1),
-       BT_MBOX(2, INQUIRY, 4, 1),
-       BT_MBOX(2, CONN, 5, 1),
-       BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
-       BT_MBOX(2, DISC, 13, 1),
-       BT_MBOX(2, SCO_TX_ACT, 16, 2),
-       BT_MBOX(2, SCO_RX_ACT, 18, 2),
-       BT_MBOX(2, ESCO_RE_TX, 20, 2),
-       BT_MBOX(2, SCO_DURATION, 24, 6),
-};
-
-enum iwl_bt_mxbox_dw3 {
-       BT_MBOX(3, SCO_STATE, 0, 1),
-       BT_MBOX(3, SNIFF_STATE, 1, 1),
-       BT_MBOX(3, A2DP_STATE, 2, 1),
-       BT_MBOX(3, ACL_STATE, 3, 1),
-       BT_MBOX(3, MSTR_STATE, 4, 1),
-       BT_MBOX(3, OBX_STATE, 5, 1),
-       BT_MBOX(3, OPEN_CON_2, 8, 2),
-       BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
-       BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
-       BT_MBOX(3, INBAND_P, 13, 1),
-       BT_MBOX(3, MSG_TYPE_2, 16, 3),
-       BT_MBOX(3, SSN_2, 19, 2),
-       BT_MBOX(3, UPDATE_REQUEST, 21, 1),
-};
-
-#define BT_MBOX_MSG(_notif, _num, _field)                                   \
-       ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
-       >> BT_MBOX##_num##_##_field##_POS)
-
-enum iwl_bt_activity_grading {
-       BT_OFF                  = 0,
-       BT_ON_NO_CONNECTION     = 1,
-       BT_LOW_TRAFFIC          = 2,
-       BT_HIGH_TRAFFIC         = 3,
-};
-
-/**
- * struct iwl_bt_coex_profile_notif - notification about BT coex
- * @mbox_msg: message from BT to WiFi
- * @msg_idx: the index of the message
- * @bt_status: 0 - off, 1 - on
- * @bt_open_conn: number of BT connections open
- * @bt_traffic_load: load of BT traffic
- * @bt_agg_traffic_load: aggregated load of BT traffic
- * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
- * @primary_ch_lut: LUT used for primary channel
- * @secondary_ch_lut: LUT used for secondary channel
- * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
- */
-struct iwl_bt_coex_profile_notif {
-       __le32 mbox_msg[4];
-       __le32 msg_idx;
-       u8 bt_status;
-       u8 bt_open_conn;
-       u8 bt_traffic_load;
-       u8 bt_agg_traffic_load;
-       u8 bt_ci_compliance;
-       u8 reserved[3];
-
-       __le32 primary_ch_lut;
-       __le32 secondary_ch_lut;
-       __le32 bt_activity_grading;
-} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
-
-enum iwl_bt_coex_prio_table_event {
-       BT_COEX_PRIO_TBL_EVT_INIT_CALIB1                = 0,
-       BT_COEX_PRIO_TBL_EVT_INIT_CALIB2                = 1,
-       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1        = 2,
-       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2        = 3,
-       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1       = 4,
-       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2       = 5,
-       BT_COEX_PRIO_TBL_EVT_DTIM                       = 6,
-       BT_COEX_PRIO_TBL_EVT_SCAN52                     = 7,
-       BT_COEX_PRIO_TBL_EVT_SCAN24                     = 8,
-       BT_COEX_PRIO_TBL_EVT_IDLE                       = 9,
-       BT_COEX_PRIO_TBL_EVT_MAX                        = 16,
-}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
-
-enum iwl_bt_coex_prio_table_prio {
-       BT_COEX_PRIO_TBL_DISABLED       = 0,
-       BT_COEX_PRIO_TBL_PRIO_LOW       = 1,
-       BT_COEX_PRIO_TBL_PRIO_HIGH      = 2,
-       BT_COEX_PRIO_TBL_PRIO_BYPASS    = 3,
-       BT_COEX_PRIO_TBL_PRIO_COEX_OFF  = 4,
-       BT_COEX_PRIO_TBL_PRIO_COEX_ON   = 5,
-       BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
-       BT_COEX_PRIO_TBL_MAX            = 8,
-}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
-
-#define BT_COEX_PRIO_TBL_SHRD_ANT_POS     (0)
-#define BT_COEX_PRIO_TBL_PRIO_POS         (1)
-#define BT_COEX_PRIO_TBL_RESERVED_POS     (4)
-
-/**
- * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
- * @prio_tbl:
- */
-struct iwl_bt_coex_prio_tbl_cmd {
-       u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
-} __packed;
-
-#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
new file mode 100644 (file)
index 0000000..21877e5
--- /dev/null
@@ -0,0 +1,352 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_bt_coex_h__
+#define __fw_api_bt_coex_h__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define BITS(nb) (BIT(nb) - 1)
+
+/**
+ * enum iwl_bt_coex_flags - flags for BT_COEX command
+ * @BT_COEX_MODE_POS:
+ * @BT_COEX_MODE_MSK:
+ * @BT_COEX_DISABLE:
+ * @BT_COEX_2W:
+ * @BT_COEX_3W:
+ * @BT_COEX_NW:
+ * @BT_COEX_SYNC2SCO:
+ * @BT_COEX_CORUNNING:
+ * @BT_COEX_MPLUT:
+ *
+ * The COEX_MODE must be set for each command. Even if it is not changed.
+ */
+enum iwl_bt_coex_flags {
+       BT_COEX_MODE_POS                = 3,
+       BT_COEX_MODE_MSK                = BITS(3) << BT_COEX_MODE_POS,
+       BT_COEX_DISABLE                 = 0x0 << BT_COEX_MODE_POS,
+       BT_COEX_2W                      = 0x1 << BT_COEX_MODE_POS,
+       BT_COEX_3W                      = 0x2 << BT_COEX_MODE_POS,
+       BT_COEX_NW                      = 0x3 << BT_COEX_MODE_POS,
+       BT_COEX_SYNC2SCO                = BIT(7),
+       BT_COEX_CORUNNING               = BIT(8),
+       BT_COEX_MPLUT                   = BIT(9),
+};
+
+/*
+ * indicates what has changed in the BT_COEX command.
+ * BT_VALID_ENABLE must be set for each command. Commands without this bit will
+ * discarded by the firmware
+ */
+enum iwl_bt_coex_valid_bit_msk {
+       BT_VALID_ENABLE                 = BIT(0),
+       BT_VALID_BT_PRIO_BOOST          = BIT(1),
+       BT_VALID_MAX_KILL               = BIT(2),
+       BT_VALID_3W_TMRS                = BIT(3),
+       BT_VALID_KILL_ACK               = BIT(4),
+       BT_VALID_KILL_CTS               = BIT(5),
+       BT_VALID_REDUCED_TX_POWER       = BIT(6),
+       BT_VALID_LUT                    = BIT(7),
+       BT_VALID_WIFI_RX_SW_PRIO_BOOST  = BIT(8),
+       BT_VALID_WIFI_TX_SW_PRIO_BOOST  = BIT(9),
+       BT_VALID_MULTI_PRIO_LUT         = BIT(10),
+       BT_VALID_TRM_KICK_FILTER        = BIT(11),
+       BT_VALID_CORUN_LUT_20           = BIT(12),
+       BT_VALID_CORUN_LUT_40           = BIT(13),
+       BT_VALID_ANT_ISOLATION          = BIT(14),
+       BT_VALID_ANT_ISOLATION_THRS     = BIT(15),
+       BT_VALID_TXTX_DELTA_FREQ_THRS   = BIT(16),
+       BT_VALID_TXRX_MAX_FREQ_0        = BIT(17),
+       BT_VALID_SYNC_TO_SCO            = BIT(18),
+};
+
+/**
+ * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
+ * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
+ * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
+ *
+ * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
+ * reduces its Tx power, it can work along with BT, hence reducing the amount
+ * of WiFi frames being killed by BT.
+ */
+enum iwl_bt_reduced_tx_power {
+       BT_REDUCED_TX_POWER_CTL         = BIT(0),
+       BT_REDUCED_TX_POWER_DATA        = BIT(1),
+};
+
+enum iwl_bt_coex_lut_type {
+       BT_COEX_TIGHT_LUT = 0,
+       BT_COEX_LOOSE_LUT,
+       BT_COEX_TX_DIS_LUT,
+
+       BT_COEX_MAX_LUT,
+};
+
+#define BT_COEX_LUT_SIZE (12)
+#define BT_COEX_CORUN_LUT_SIZE (32)
+#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
+#define BT_COEX_BOOST_SIZE (4)
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
+
+/**
+ * struct iwl_bt_coex_cmd - bt coex configuration command
+ * @flags:&enum iwl_bt_coex_flags
+ * @max_kill:
+ * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
+ * @bt4_antenna_isolation:
+ * @bt4_antenna_isolation_thr:
+ * @bt4_tx_tx_delta_freq_thr:
+ * @bt4_tx_rx_max_freq0:
+ * @bt_prio_boost:
+ * @wifi_tx_prio_boost: SW boost of wifi tx priority
+ * @wifi_rx_prio_boost: SW boost of wifi rx priority
+ * @kill_ack_msk:
+ * @kill_cts_msk:
+ * @decision_lut:
+ * @bt4_multiprio_lut:
+ * @bt4_corun_lut20:
+ * @bt4_corun_lut40:
+ * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwl_bt_coex_cmd {
+       __le32 flags;
+       u8 max_kill;
+       u8 bt_reduced_tx_power;
+       u8 reserved[2];
+
+       u8 bt4_antenna_isolation;
+       u8 bt4_antenna_isolation_thr;
+       u8 bt4_tx_tx_delta_freq_thr;
+       u8 bt4_tx_rx_max_freq0;
+
+       __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
+       __le32 wifi_tx_prio_boost;
+       __le32 wifi_rx_prio_boost;
+       __le32 kill_ack_msk;
+       __le32 kill_cts_msk;
+
+       __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
+       __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
+       __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
+       __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
+
+       __le32 valid_bit_msk;
+} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci:
+ * @bt_secondary_ci:
+ * @co_run_bw_primary:
+ * @co_run_bw_secondary:
+ * @primary_ch_phy_id:
+ * @secondary_ch_phy_id:
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+       __le64 bt_primary_ci;
+       __le64 bt_secondary_ci;
+
+       u8 co_run_bw_primary;
+       u8 co_run_bw_secondary;
+       u8 primary_ch_phy_id;
+       u8 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_1 */
+
+#define BT_MBOX(n_dw, _msg, _pos, _nbits)      \
+       BT_MBOX##n_dw##_##_msg##_POS = (_pos),  \
+       BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
+
+enum iwl_bt_mxbox_dw0 {
+       BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
+       BT_MBOX(0, LE_PROF1, 3, 1),
+       BT_MBOX(0, LE_PROF2, 4, 1),
+       BT_MBOX(0, LE_PROF_OTHER, 5, 1),
+       BT_MBOX(0, CHL_SEQ_N, 8, 4),
+       BT_MBOX(0, INBAND_S, 13, 1),
+       BT_MBOX(0, LE_MIN_RSSI, 16, 4),
+       BT_MBOX(0, LE_SCAN, 20, 1),
+       BT_MBOX(0, LE_ADV, 21, 1),
+       BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
+       BT_MBOX(0, OPEN_CON_1, 28, 2),
+};
+
+enum iwl_bt_mxbox_dw1 {
+       BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
+       BT_MBOX(1, IP_SR, 4, 1),
+       BT_MBOX(1, LE_MSTR, 5, 1),
+       BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
+       BT_MBOX(1, MSG_TYPE, 16, 3),
+       BT_MBOX(1, SSN, 19, 2),
+};
+
+enum iwl_bt_mxbox_dw2 {
+       BT_MBOX(2, SNIFF_ACT, 0, 3),
+       BT_MBOX(2, PAG, 3, 1),
+       BT_MBOX(2, INQUIRY, 4, 1),
+       BT_MBOX(2, CONN, 5, 1),
+       BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
+       BT_MBOX(2, DISC, 13, 1),
+       BT_MBOX(2, SCO_TX_ACT, 16, 2),
+       BT_MBOX(2, SCO_RX_ACT, 18, 2),
+       BT_MBOX(2, ESCO_RE_TX, 20, 2),
+       BT_MBOX(2, SCO_DURATION, 24, 6),
+};
+
+enum iwl_bt_mxbox_dw3 {
+       BT_MBOX(3, SCO_STATE, 0, 1),
+       BT_MBOX(3, SNIFF_STATE, 1, 1),
+       BT_MBOX(3, A2DP_STATE, 2, 1),
+       BT_MBOX(3, ACL_STATE, 3, 1),
+       BT_MBOX(3, MSTR_STATE, 4, 1),
+       BT_MBOX(3, OBX_STATE, 5, 1),
+       BT_MBOX(3, OPEN_CON_2, 8, 2),
+       BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
+       BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
+       BT_MBOX(3, INBAND_P, 13, 1),
+       BT_MBOX(3, MSG_TYPE_2, 16, 3),
+       BT_MBOX(3, SSN_2, 19, 2),
+       BT_MBOX(3, UPDATE_REQUEST, 21, 1),
+};
+
+#define BT_MBOX_MSG(_notif, _num, _field)                                   \
+       ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
+       >> BT_MBOX##_num##_##_field##_POS)
+
+enum iwl_bt_activity_grading {
+       BT_OFF                  = 0,
+       BT_ON_NO_CONNECTION     = 1,
+       BT_LOW_TRAFFIC          = 2,
+       BT_HIGH_TRAFFIC         = 3,
+};
+
+/**
+ * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * @mbox_msg: message from BT to WiFi
+ * @msg_idx: the index of the message
+ * @bt_status: 0 - off, 1 - on
+ * @bt_open_conn: number of BT connections open
+ * @bt_traffic_load: load of BT traffic
+ * @bt_agg_traffic_load: aggregated load of BT traffic
+ * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @primary_ch_lut: LUT used for primary channel
+ * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
+ */
+struct iwl_bt_coex_profile_notif {
+       __le32 mbox_msg[4];
+       __le32 msg_idx;
+       u8 bt_status;
+       u8 bt_open_conn;
+       u8 bt_traffic_load;
+       u8 bt_agg_traffic_load;
+       u8 bt_ci_compliance;
+       u8 reserved[3];
+
+       __le32 primary_ch_lut;
+       __le32 secondary_ch_lut;
+       __le32 bt_activity_grading;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
+
+enum iwl_bt_coex_prio_table_event {
+       BT_COEX_PRIO_TBL_EVT_INIT_CALIB1                = 0,
+       BT_COEX_PRIO_TBL_EVT_INIT_CALIB2                = 1,
+       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1        = 2,
+       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2        = 3,
+       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1       = 4,
+       BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2       = 5,
+       BT_COEX_PRIO_TBL_EVT_DTIM                       = 6,
+       BT_COEX_PRIO_TBL_EVT_SCAN52                     = 7,
+       BT_COEX_PRIO_TBL_EVT_SCAN24                     = 8,
+       BT_COEX_PRIO_TBL_EVT_IDLE                       = 9,
+       BT_COEX_PRIO_TBL_EVT_MAX                        = 16,
+}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
+
+enum iwl_bt_coex_prio_table_prio {
+       BT_COEX_PRIO_TBL_DISABLED       = 0,
+       BT_COEX_PRIO_TBL_PRIO_LOW       = 1,
+       BT_COEX_PRIO_TBL_PRIO_HIGH      = 2,
+       BT_COEX_PRIO_TBL_PRIO_BYPASS    = 3,
+       BT_COEX_PRIO_TBL_PRIO_COEX_OFF  = 4,
+       BT_COEX_PRIO_TBL_PRIO_COEX_ON   = 5,
+       BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
+       BT_COEX_PRIO_TBL_MAX            = 8,
+}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
+
+#define BT_COEX_PRIO_TBL_SHRD_ANT_POS     (0)
+#define BT_COEX_PRIO_TBL_PRIO_POS         (1)
+#define BT_COEX_PRIO_TBL_RESERVED_POS     (4)
+
+/**
+ * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
+ * @prio_tbl:
+ */
+struct iwl_bt_coex_prio_tbl_cmd {
+       u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
+} __packed;
+
+#endif /* __fw_api_bt_coex_h__ */
index 8415ff312d0eec8139bae02fcdb1e45e1ee49284..10fcc1a79ebddf3087d7de7c2c29389849a425fa 100644 (file)
@@ -231,11 +231,15 @@ enum iwl_wowlan_wakeup_filters {
        IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT              = BIT(8),
        IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS              = BIT(9),
        IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE        = BIT(10),
-       /* BIT(11) reserved */
+       IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL           = BIT(11),
        IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET          = BIT(12),
+       IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET             = BIT(13),
+       IWL_WOWLAN_WAKEUP_HOST_TIMER                    = BIT(14),
+       IWL_WOWLAN_WAKEUP_RX_FRAME                      = BIT(15),
+       IWL_WOWLAN_WAKEUP_BCN_FILTERING                 = BIT(16),
 }; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
 
-struct iwl_wowlan_config_cmd {
+struct iwl_wowlan_config_cmd_v2 {
        __le32 wakeup_filter;
        __le16 non_qos_seq;
        __le16 qos_seq[8];
@@ -243,6 +247,12 @@ struct iwl_wowlan_config_cmd {
        u8 is_11n_connection;
 } __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
 
+struct iwl_wowlan_config_cmd_v3 {
+       struct iwl_wowlan_config_cmd_v2 common;
+       u8 offloading_tid;
+       u8 reserved[3];
+} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
+
 /*
  * WOWLAN_TSC_RSC_PARAMS
  */
index 884c0872530883ae55646b0c177d9d2ae172e1da..cbbcd8e284e4bec620eb7e272b02941b3805aa0b 100644 (file)
@@ -301,54 +301,65 @@ struct iwl_beacon_filter_cmd {
 
 /* Beacon filtering and beacon abort */
 #define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_D0I3 20
 #define IWL_BF_ENERGY_DELTA_MAX 255
 #define IWL_BF_ENERGY_DELTA_MIN 0
 
 #define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
 #define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
 #define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
 
 #define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_D0I3 72
 #define IWL_BF_ROAMING_STATE_MAX 255
 #define IWL_BF_ROAMING_STATE_MIN 0
 
 #define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_D0I3 112
 #define IWL_BF_TEMP_THRESHOLD_MAX 255
 #define IWL_BF_TEMP_THRESHOLD_MIN 0
 
 #define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
 #define IWL_BF_TEMP_FAST_FILTER_MAX 255
 #define IWL_BF_TEMP_FAST_FILTER_MIN 0
 
 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255
 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0
 
 #define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
 
 #define IWL_BF_DEBUG_FLAG_DEFAULT 0
+#define IWL_BF_DEBUG_FLAG_D0I3 0
 
 #define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_D0I3 1024
 #define IWL_BF_ESCAPE_TIMER_MAX 1024
 #define IWL_BF_ESCAPE_TIMER_MIN 0
 
 #define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D0I3 6
 #define IWL_BA_ESCAPE_TIMER_D3 9
 #define IWL_BA_ESCAPE_TIMER_MAX 1024
 #define IWL_BA_ESCAPE_TIMER_MIN 0
 
 #define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
 
-#define IWL_BF_CMD_CONFIG_DEFAULTS                                          \
-       .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT),         \
-       .bf_roaming_energy_delta =                                           \
-               cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT),            \
-       .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT),       \
-       .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT),     \
-       .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
-       .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
-       .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT),             \
-       .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),         \
-       .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
+#define IWL_BF_CMD_CONFIG(mode)                                             \
+       .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode),          \
+       .bf_roaming_energy_delta =                                            \
+               cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode),             \
+       .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode),        \
+       .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode),      \
+       .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode),  \
+       .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode),  \
+       .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode),              \
+       .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode),          \
+       .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
 
+#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
+#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
 #endif
index 85057219cc43ff4c4edbd3e606a9254dae9772e5..39148b5bb33262596e1dea348c1ae32c9a2c9166 100644 (file)
@@ -257,7 +257,8 @@ enum {
 
 /* Bit 17-18: (0) SS, (1) SS*2 */
 #define RATE_MCS_STBC_POS              17
-#define RATE_MCS_STBC_MSK              (1 << RATE_MCS_STBC_POS)
+#define RATE_MCS_HT_STBC_MSK           (3 << RATE_MCS_STBC_POS)
+#define RATE_MCS_VHT_STBC_MSK          (1 << RATE_MCS_STBC_POS)
 
 /* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
 #define RATE_MCS_BF_POS                        19
index 1b60fdff6a561098beaf9cd020c9c1d20946dda8..d636478672626e9436c12dc2313d3288d94303ed 100644 (file)
@@ -199,11 +199,14 @@ enum iwl_sta_modify_flag {
  * @STA_SLEEP_STATE_AWAKE:
  * @STA_SLEEP_STATE_PS_POLL:
  * @STA_SLEEP_STATE_UAPSD:
+ * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ *     (last) released frame
  */
 enum iwl_sta_sleep_flag {
-       STA_SLEEP_STATE_AWAKE   = 0,
-       STA_SLEEP_STATE_PS_POLL = BIT(0),
-       STA_SLEEP_STATE_UAPSD   = BIT(1),
+       STA_SLEEP_STATE_AWAKE           = 0,
+       STA_SLEEP_STATE_PS_POLL         = BIT(0),
+       STA_SLEEP_STATE_UAPSD           = BIT(1),
+       STA_SLEEP_STATE_MOREDATA        = BIT(2),
 };
 
 /* STA ID and color bits definitions */
@@ -318,13 +321,15 @@ struct iwl_mvm_add_sta_cmd_v5 {
 } __packed; /* ADD_STA_CMD_API_S_VER_5 */
 
 /**
- * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
- * VER_6 of this command is quite similar to VER_5 except
+ * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
+ * VER_7 of this command is quite similar to VER_5 except
  * exclusion of all fields related to the security key installation.
+ * It only differs from VER_6 by the "awake_acs" field that is
+ * reserved and ignored in VER_6.
  */
-struct iwl_mvm_add_sta_cmd_v6 {
+struct iwl_mvm_add_sta_cmd_v7 {
        u8 add_modify;
-       u8 reserved1;
+       u8 awake_acs;
        __le16 tid_disable_tx;
        __le32 mac_id_n_color;
        u8 addr[ETH_ALEN];      /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@@ -342,7 +347,7 @@ struct iwl_mvm_add_sta_cmd_v6 {
        __le16 assoc_id;
        __le16 beamform_flags;
        __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
 
 /**
  * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
@@ -432,5 +437,15 @@ struct iwl_mvm_wep_key_cmd {
        struct iwl_mvm_wep_key wep_key[0];
 } __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
 
+/**
+ * struct iwl_mvm_eosp_notification - EOSP notification from firmware
+ * @remain_frame_count: # of frames remaining, non-zero if SP was cut
+ *     short by GO absence
+ * @sta_id: station ID
+ */
+struct iwl_mvm_eosp_notification {
+       __le32 remain_frame_count;
+       __le32 sta_id;
+} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
 
 #endif /* __fw_api_sta_h__ */
index b674c2a2b51c8910b47811ad73ed876a58fb22f2..8e122f3a7a74e8a97914a13d9821a7229bc7c2bc 100644 (file)
@@ -76,6 +76,8 @@
  * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
  * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
  * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
+ *     on old firmwares).
  * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
  * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
  *     Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
@@ -107,6 +109,7 @@ enum iwl_tx_flags {
        TX_CMD_FLG_VHT_NDPA             = BIT(8),
        TX_CMD_FLG_HT_NDPA              = BIT(9),
        TX_CMD_FLG_CSI_FDBK2HOST        = BIT(10),
+       TX_CMD_FLG_BT_PRIO_POS          = 11,
        TX_CMD_FLG_BT_DIS               = BIT(12),
        TX_CMD_FLG_SEQ_CTL              = BIT(13),
        TX_CMD_FLG_MORE_FRAG            = BIT(14),
index 989d7dbdca6c31f4a13bb9e70a6cf9a669cf9d33..6e75b52588de3ca68a44c41ca339df1e57eae37f 100644 (file)
@@ -70,7 +70,7 @@
 #include "fw-api-mac.h"
 #include "fw-api-power.h"
 #include "fw-api-d3.h"
-#include "fw-api-bt-coex.h"
+#include "fw-api-coex.h"
 
 /* maximal number of Tx queues in any platform */
 #define IWL_MVM_MAX_QUEUES     20
@@ -95,6 +95,7 @@ enum {
        /* PHY context commands */
        PHY_CONTEXT_CMD = 0x8,
        DBG_CFG = 0x9,
+       ANTENNA_COUPLING_NOTIFICATION = 0xa,
 
        /* station table */
        ADD_STA_KEY = 0x17,
@@ -163,6 +164,7 @@ enum {
        TX_ANT_CONFIGURATION_CMD = 0x98,
        BT_CONFIG = 0x9b,
        STATISTICS_NOTIFICATION = 0x9d,
+       EOSP_NOTIFICATION = 0x9e,
        REDUCE_TX_POWER_CMD = 0x9f,
 
        /* RF-KILL commands and notifications */
@@ -190,6 +192,7 @@ enum {
        REPLY_DEBUG_CMD = 0xf0,
        DEBUG_LOG_MSG = 0xf7,
 
+       BCAST_FILTER_CMD = 0xcf,
        MCAST_FILTER_CMD = 0xd0,
 
        /* D3 commands/notifications */
@@ -197,6 +200,7 @@ enum {
        PROT_OFFLOAD_CONFIG_CMD = 0xd4,
        OFFLOADS_QUERY_CMD = 0xd5,
        REMOTE_WAKE_CONFIG_CMD = 0xd6,
+       D0I3_END_CMD = 0xed,
 
        /* for WoWLAN in particular */
        WOWLAN_PATTERNS = 0xe0,
@@ -313,14 +317,12 @@ enum {
 
 /* Section types for NVM_ACCESS_CMD */
 enum {
-       NVM_SECTION_TYPE_HW = 0,
-       NVM_SECTION_TYPE_SW,
-       NVM_SECTION_TYPE_PAPD,
-       NVM_SECTION_TYPE_BT,
-       NVM_SECTION_TYPE_CALIBRATION,
-       NVM_SECTION_TYPE_PRODUCTION,
-       NVM_SECTION_TYPE_POST_FCS_CALIB,
-       NVM_NUM_OF_SECTIONS,
+       NVM_SECTION_TYPE_SW = 1,
+       NVM_SECTION_TYPE_REGULATORY = 3,
+       NVM_SECTION_TYPE_CALIBRATION = 4,
+       NVM_SECTION_TYPE_PRODUCTION = 5,
+       NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
+       NVM_MAX_NUM_SECTIONS = 12,
 };
 
 /**
@@ -412,6 +414,35 @@ struct mvm_alive_resp {
        __le32 scd_base_ptr;            /* SRAM address for SCD */
 } __packed; /* ALIVE_RES_API_S_VER_1 */
 
+struct mvm_alive_resp_ver2 {
+       __le16 status;
+       __le16 flags;
+       u8 ucode_minor;
+       u8 ucode_major;
+       __le16 id;
+       u8 api_minor;
+       u8 api_major;
+       u8 ver_subtype;
+       u8 ver_type;
+       u8 mac;
+       u8 opt;
+       __le16 reserved2;
+       __le32 timestamp;
+       __le32 error_event_table_ptr;   /* SRAM address for error log */
+       __le32 log_event_table_ptr;     /* SRAM address for LMAC event log */
+       __le32 cpu_register_ptr;
+       __le32 dbgm_config_ptr;
+       __le32 alive_counter_ptr;
+       __le32 scd_base_ptr;            /* SRAM address for SCD */
+       __le32 st_fwrd_addr;            /* pointer to Store and forward */
+       __le32 st_fwrd_size;
+       u8 umac_minor;                  /* UMAC version: minor */
+       u8 umac_major;                  /* UMAC version: major */
+       __le16 umac_id;                 /* UMAC version: id */
+       __le32 error_info_addr;         /* SRAM address for UMAC error log */
+       __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_2 */
+
 /* Error response/notification */
 enum {
        FW_ERR_UNKNOWN_CMD = 0x0,
@@ -682,6 +713,7 @@ enum {
        TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
        TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
        TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+       T2_V2_START_IMMEDIATELY = BIT(11),
 
        TE_V2_NOTIF_MSK = 0xff,
 
@@ -1159,6 +1191,90 @@ struct iwl_mcast_filter_cmd {
        u8 addr_list[0];
 } __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
 
+#define MAX_BCAST_FILTERS 8
+#define MAX_BCAST_FILTER_ATTRS 2
+
+/**
+ * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
+ * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
+ * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
+ *     start of ip payload).
+ */
+enum iwl_mvm_bcast_filter_attr_offset {
+       BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
+       BCAST_FILTER_OFFSET_IP_END = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
+ * @offset_type:       &enum iwl_mvm_bcast_filter_attr_offset.
+ * @offset:    starting offset of this pattern.
+ * @val:               value to match - big endian (MSB is the first
+ *             byte to match from offset pos).
+ * @mask:      mask to match (big endian).
+ */
+struct iwl_fw_bcast_filter_attr {
+       u8 offset_type;
+       u8 offset;
+       __le16 reserved1;
+       __be32 val;
+       __be32 mask;
+} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
+
+/**
+ * enum iwl_mvm_bcast_filter_frame_type - filter frame type
+ * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
+ * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
+ */
+enum iwl_mvm_bcast_filter_frame_type {
+       BCAST_FILTER_FRAME_TYPE_ALL = 0,
+       BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter - broadcast filter
+ * @discard: discard frame (1) or let it pass (0).
+ * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
+ * @num_attrs: number of valid attributes in this filter.
+ * @attrs: attributes of this filter. a filter is considered matched
+ *     only when all its attributes are matched (i.e. AND relationship)
+ */
+struct iwl_fw_bcast_filter {
+       u8 discard;
+       u8 frame_type;
+       u8 num_attrs;
+       u8 reserved1;
+       struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
+} __packed; /* BCAST_FILTER_S_VER_1 */
+
+/**
+ * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
+ * @default_discard: default action for this mac (discard (1) / pass (0)).
+ * @attached_filters: bitmap of relevant filters for this mac.
+ */
+struct iwl_fw_bcast_mac {
+       u8 default_discard;
+       u8 reserved1;
+       __le16 attached_filters;
+} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
+
+/**
+ * struct iwl_bcast_filter_cmd - broadcast filtering configuration
+ * @disable: enable (0) / disable (1)
+ * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
+ * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
+ * @filters: broadcast filters
+ * @macs: broadcast filtering configuration per-mac
+ */
+struct iwl_bcast_filter_cmd {
+       u8 disable;
+       u8 max_bcast_filters;
+       u8 max_macs;
+       u8 reserved1;
+       struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
+       struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
+} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
+
 struct mvm_statistics_dbg {
        __le32 burst_check;
        __le32 burst_count;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h b/drivers/net/wireless/iwlwifi/mvm/fw-error-dump.h
new file mode 100644 (file)
index 0000000..58c8941
--- /dev/null
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_error_dump_h__
+#define __fw_error_dump_h__
+
+#include <linux/types.h>
+
+#define IWL_FW_ERROR_DUMP_BARKER       0x14789632
+
+/**
+ * enum iwl_fw_error_dump_type - types of data in the dump file
+ * @IWL_FW_ERROR_DUMP_SRAM:
+ * @IWL_FW_ERROR_DUMP_REG:
+ */
+enum iwl_fw_error_dump_type {
+       IWL_FW_ERROR_DUMP_SRAM = 0,
+       IWL_FW_ERROR_DUMP_REG = 1,
+
+       IWL_FW_ERROR_DUMP_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: %enum iwl_fw_error_dump_type
+ * @len: the length starting from %data - must be a multiplier of 4.
+ * @data: the data itself padded to be a multiplier of 4.
+ */
+struct iwl_fw_error_dump_data {
+       __le32 type;
+       __le32 len;
+       __u8 data[];
+} __packed __aligned(4);
+
+/**
+ * struct iwl_fw_error_dump_file - the layout of the header of the file
+ * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
+ * @file_len: the length of all the file starting from %barker
+ * @data: array of %struct iwl_fw_error_dump_data
+ */
+struct iwl_fw_error_dump_file {
+       __le32 barker;
+       __le32 file_len;
+       u8 data[0];
+} __packed __aligned(4);
+
+#endif /* __fw_error_dump_h__ */
index c03d39541f9ee50d7f179982d4a688d3f90c4246..7ce20062f32d443be34fe87865d91afd71a0a014 100644 (file)
@@ -110,18 +110,48 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                container_of(notif_wait, struct iwl_mvm, notif_wait);
        struct iwl_mvm_alive_data *alive_data = data;
        struct mvm_alive_resp *palive;
-
-       palive = (void *)pkt->data;
-
-       mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
-       mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
-       alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
-
-       alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
-       IWL_DEBUG_FW(mvm,
-                    "Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
-                    le16_to_cpu(palive->status), palive->ver_type,
-                    palive->ver_subtype, palive->flags);
+       struct mvm_alive_resp_ver2 *palive2;
+
+       if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+               palive = (void *)pkt->data;
+
+               mvm->support_umac_log = false;
+               mvm->error_event_table =
+                       le32_to_cpu(palive->error_event_table_ptr);
+               mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+               alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+               alive_data->valid = le16_to_cpu(palive->status) ==
+                                   IWL_ALIVE_STATUS_OK;
+               IWL_DEBUG_FW(mvm,
+                            "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+                            le16_to_cpu(palive->status), palive->ver_type,
+                            palive->ver_subtype, palive->flags);
+       } else {
+               palive2 = (void *)pkt->data;
+
+               mvm->error_event_table =
+                       le32_to_cpu(palive2->error_event_table_ptr);
+               mvm->log_event_table =
+                       le32_to_cpu(palive2->log_event_table_ptr);
+               alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
+               mvm->umac_error_event_table =
+                       le32_to_cpu(palive2->error_info_addr);
+
+               alive_data->valid = le16_to_cpu(palive2->status) ==
+                                   IWL_ALIVE_STATUS_OK;
+               if (mvm->umac_error_event_table)
+                       mvm->support_umac_log = true;
+
+               IWL_DEBUG_FW(mvm,
+                            "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+                            le16_to_cpu(palive2->status), palive2->ver_type,
+                            palive2->ver_subtype, palive2->flags);
+
+               IWL_DEBUG_FW(mvm,
+                            "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+                            palive2->umac_major, palive2->umac_minor);
+       }
 
        return true;
 }
@@ -292,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        }
 
        /* Send TX valid antennas before triggering calibrations */
-       ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+       ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
        if (ret)
                goto error;
 
@@ -328,8 +358,6 @@ out:
                                        GFP_KERNEL);
                if (!mvm->nvm_data)
                        return -ENOMEM;
-               mvm->nvm_data->valid_rx_ant = 1;
-               mvm->nvm_data->valid_tx_ant = 1;
                mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
                mvm->nvm_data->bands[0].n_channels = 1;
                mvm->nvm_data->bands[0].n_bitrates = 1;
@@ -341,8 +369,6 @@ out:
        return ret;
 }
 
-#define UCODE_CALIB_TIMEOUT    (2*HZ)
-
 int iwl_mvm_up(struct iwl_mvm *mvm)
 {
        int ret, i;
@@ -394,7 +420,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
 
-       ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+       ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
        if (ret)
                goto error;
 
@@ -439,10 +465,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
-       ret = iwl_mvm_power_update_device_mode(mvm);
+       /* Initialize tx backoffs to the minimal possible */
+       iwl_mvm_tt_tx_backoff(mvm, 0);
+
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
+               ret = iwl_power_legacy_set_cam_mode(mvm);
+               if (ret)
+                       goto error;
+       }
+
+       ret = iwl_mvm_power_update_device(mvm);
        if (ret)
                goto error;
 
+       /* allow FW/transport low power modes if not during restart */
+       if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
        return 0;
  error:
@@ -466,7 +505,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
                goto error;
        }
 
-       ret = iwl_send_tx_ant_cfg(mvm, iwl_fw_valid_tx_ant(mvm->fw));
+       ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
        if (ret)
                goto error;
 
index 6b4ea6bf8ffeaf13273b1f79ec46057290254e31..e3b3cf4dbd77a461916589f3dc59f4f11aead0c5 100644 (file)
@@ -94,6 +94,8 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
        int ret;
 
        switch (mode) {
+       case IWL_LED_BLINK:
+               IWL_ERR(mvm, "Blink led mode not supported, used default\n");
        case IWL_LED_DEFAULT:
        case IWL_LED_RF_STATE:
                mode = IWL_LED_RF_STATE;
index ba723d50939a5582ea3aaf02c8d1fa4e9ae7388c..9ccec10bba166299cc91cf1706992937127d277a 100644 (file)
@@ -90,6 +90,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
 {
        struct iwl_mvm_mac_iface_iterator_data *data = _data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       u16 min_bi;
 
        /* Skip the interface for which we are trying to assign a tsf_id  */
        if (vif == data->vif)
@@ -114,42 +115,57 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
        switch (data->vif->type) {
        case NL80211_IFTYPE_STATION:
                /*
-                * The new interface is client, so if the existing one
-                * we're iterating is an AP, and both interfaces have the
-                * same beacon interval, the same TSF should be used to
-                * avoid drift between the new client and existing AP,
-                * the existing AP will get drift updates from the new
-                * client context in this case
+                * The new interface is a client, so if the one we're iterating
+                * is an AP, and the beacon interval of the AP is a multiple or
+                * divisor of the beacon interval of the client, the same TSF
+                * should be used to avoid drift between the new client and
+                * existing AP. The existing AP will get drift updates from the
+                * new client context in this case.
                 */
-               if (vif->type == NL80211_IFTYPE_AP) {
-                       if (data->preferred_tsf == NUM_TSF_IDS &&
-                           test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
-                           (vif->bss_conf.beacon_int ==
-                            data->vif->bss_conf.beacon_int)) {
-                               data->preferred_tsf = mvmvif->tsf_id;
-                               return;
-                       }
+               if (vif->type != NL80211_IFTYPE_AP ||
+                   data->preferred_tsf != NUM_TSF_IDS ||
+                   !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+                       break;
+
+               min_bi = min(data->vif->bss_conf.beacon_int,
+                            vif->bss_conf.beacon_int);
+
+               if (!min_bi)
+                       break;
+
+               if ((data->vif->bss_conf.beacon_int -
+                    vif->bss_conf.beacon_int) % min_bi == 0) {
+                       data->preferred_tsf = mvmvif->tsf_id;
+                       return;
                }
                break;
+
        case NL80211_IFTYPE_AP:
                /*
-                * The new interface is AP/GO, so in case both interfaces
-                * have the same beacon interval, it should get drift
-                * updates from an existing client or use the same
-                * TSF as an existing GO. There's no drift between
-                * TSFs internally but if they used different TSFs
-                * then a new client MAC could update one of them
-                * and cause drift that way.
+                * The new interface is AP/GO, so if its beacon interval is a
+                * multiple or a divisor of the beacon interval of an existing
+                * interface, it should get drift updates from an existing
+                * client or use the same TSF as an existing GO. There's no
+                * drift between TSFs internally but if they used different
+                * TSFs then a new client MAC could update one of them and
+                * cause drift that way.
                 */
-               if (vif->type == NL80211_IFTYPE_STATION ||
-                   vif->type == NL80211_IFTYPE_AP) {
-                       if (data->preferred_tsf == NUM_TSF_IDS &&
-                           test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
-                           (vif->bss_conf.beacon_int ==
-                            data->vif->bss_conf.beacon_int)) {
-                               data->preferred_tsf = mvmvif->tsf_id;
-                               return;
-                       }
+               if ((vif->type != NL80211_IFTYPE_AP &&
+                    vif->type != NL80211_IFTYPE_STATION) ||
+                   data->preferred_tsf != NUM_TSF_IDS ||
+                   !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+                       break;
+
+               min_bi = min(data->vif->bss_conf.beacon_int,
+                            vif->bss_conf.beacon_int);
+
+               if (!min_bi)
+                       break;
+
+               if ((data->vif->bss_conf.beacon_int -
+                    vif->bss_conf.beacon_int) % min_bi == 0) {
+                       data->preferred_tsf = mvmvif->tsf_id;
+                       return;
                }
                break;
        default:
@@ -936,7 +952,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
                                             TX_CMD_FLG_TSF);
 
        mvm->mgmt_last_antenna_idx =
-               iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+               iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
                                     mvm->mgmt_last_antenna_idx);
 
        beacon_cmd.tx.rate_n_flags =
index c35b8661b39539403bd5c2503ac7ec6208cb99e0..4dd9ff43b8b68e6f9b509ed0650d8c5d5a31dd8d 100644 (file)
@@ -66,7 +66,9 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ip.h>
+#include <linux/if_arp.h>
 #include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
 #include <net/tcp.h>
 
 #include "iwl-op-mode.h"
@@ -128,6 +130,117 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
 };
 #endif
 
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+/*
+ * Use the reserved field to indicate magic values.
+ * these values will only be used internally by the driver,
+ * and won't make it to the fw (reserved will be 0).
+ * BC_FILTER_MAGIC_IP - configure the val of this attribute to
+ *     be the vif's ip address. in case there is not a single
+ *     ip address (0, or more than 1), this attribute will
+ *     be skipped.
+ * BC_FILTER_MAGIC_MAC - set the val of this attribute to
+ *     the LSB bytes of the vif's mac address
+ */
+enum {
+       BC_FILTER_MAGIC_NONE = 0,
+       BC_FILTER_MAGIC_IP,
+       BC_FILTER_MAGIC_MAC,
+};
+
+static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
+       {
+               /* arp */
+               .discard = 0,
+               .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
+               .attrs = {
+                       {
+                               /* frame type - arp, hw type - ethernet */
+                               .offset_type =
+                                       BCAST_FILTER_OFFSET_PAYLOAD_START,
+                               .offset = sizeof(rfc1042_header),
+                               .val = cpu_to_be32(0x08060001),
+                               .mask = cpu_to_be32(0xffffffff),
+                       },
+                       {
+                               /* arp dest ip */
+                               .offset_type =
+                                       BCAST_FILTER_OFFSET_PAYLOAD_START,
+                               .offset = sizeof(rfc1042_header) + 2 +
+                                         sizeof(struct arphdr) +
+                                         ETH_ALEN + sizeof(__be32) +
+                                         ETH_ALEN,
+                               .mask = cpu_to_be32(0xffffffff),
+                               /* mark it as special field */
+                               .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
+                       },
+               },
+       },
+       {
+               /* dhcp offer bcast */
+               .discard = 0,
+               .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
+               .attrs = {
+                       {
+                               /* udp dest port - 68 (bootp client)*/
+                               .offset_type = BCAST_FILTER_OFFSET_IP_END,
+                               .offset = offsetof(struct udphdr, dest),
+                               .val = cpu_to_be32(0x00440000),
+                               .mask = cpu_to_be32(0xffff0000),
+                       },
+                       {
+                               /* dhcp - lsb bytes of client hw address */
+                               .offset_type = BCAST_FILTER_OFFSET_IP_END,
+                               .offset = 38,
+                               .mask = cpu_to_be32(0xffffffff),
+                               /* mark it as special field */
+                               .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
+                       },
+               },
+       },
+       /* last filter must be empty */
+       {},
+};
+#endif
+
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+       if (!iwl_mvm_is_d0i3_supported(mvm))
+               return;
+
+       IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+       WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
+       iwl_trans_ref(mvm->trans);
+}
+
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+       if (!iwl_mvm_is_d0i3_supported(mvm))
+               return;
+
+       IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+       WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
+       iwl_trans_unref(mvm->trans);
+}
+
+static void
+iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
+{
+       int i;
+
+       if (!iwl_mvm_is_d0i3_supported(mvm))
+               return;
+
+       for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
+               if (ref == i)
+                       continue;
+
+               IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
+               clear_bit(i, mvm->ref_bitmap);
+               iwl_trans_unref(mvm->trans);
+       }
+}
+
 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
 {
        int i;
@@ -168,6 +281,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        hw->queues = mvm->first_agg_queue;
        hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
+       hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+                                   IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+       hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
        hw->rate_control_algorithm = "iwl-mvm-rs";
 
        /*
@@ -179,7 +295,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
            !iwlwifi_mod_params.sw_crypto)
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
+       if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
                hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
                hw->uapsd_queues = IWL_UAPSD_AC_INFO;
                hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -203,6 +319,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
                                       REGULATORY_DISABLE_BEACON_HINTS;
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
+               hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
        hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
        hw->wiphy->n_iface_combinations =
                ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -246,7 +365,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-       if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
                hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
                hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
                hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -256,8 +375,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
-                              NL80211_FEATURE_P2P_GO_OPPPS |
-                              NL80211_FEATURE_LOW_PRIORITY_SCAN;
+                              NL80211_FEATURE_P2P_GO_OPPPS;
 
        mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 
@@ -289,6 +407,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        }
 #endif
 
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+       /* assign default bcast filtering configuration */
+       mvm->bcast_filters = iwl_mvm_default_bcast_filters;
+#endif
+
        ret = iwl_mvm_leds_init(mvm);
        if (ret)
                return ret;
@@ -300,11 +423,55 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        return ret;
 }
 
+static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
+                            struct ieee80211_sta *sta,
+                            struct sk_buff *skb)
+{
+       struct iwl_mvm_sta *mvmsta;
+       bool defer = false;
+
+       /*
+        * double check the IN_D0I3 flag both before and after
+        * taking the spinlock, in order to prevent taking
+        * the spinlock when not needed.
+        */
+       if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
+               return false;
+
+       spin_lock(&mvm->d0i3_tx_lock);
+       /*
+        * testing the flag again ensures the skb dequeue
+        * loop (on d0i3 exit) hasn't run yet.
+        */
+       if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
+               goto out;
+
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
+           mvmsta->sta_id != mvm->d0i3_ap_sta_id)
+               goto out;
+
+       __skb_queue_tail(&mvm->d0i3_tx, skb);
+       ieee80211_stop_queues(mvm->hw);
+
+       /* trigger wakeup */
+       iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
+       iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
+
+       defer = true;
+out:
+       spin_unlock(&mvm->d0i3_tx_lock);
+       return defer;
+}
+
 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
                           struct ieee80211_tx_control *control,
                           struct sk_buff *skb)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       struct ieee80211_sta *sta = control->sta;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (void *)skb->data;
 
        if (iwl_mvm_is_radio_killed(mvm)) {
                IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
@@ -315,8 +482,18 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
            !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
                goto drop;
 
-       if (control->sta) {
-               if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+       /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
+       if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
+                    ieee80211_is_mgmt(hdr->frame_control) &&
+                    !ieee80211_is_deauth(hdr->frame_control) &&
+                    !ieee80211_is_disassoc(hdr->frame_control) &&
+                    !ieee80211_is_action(hdr->frame_control)))
+               sta = NULL;
+
+       if (sta) {
+               if (iwl_mvm_defer_tx(mvm, sta, skb))
+                       return;
+               if (iwl_mvm_tx_skb(mvm, skb, sta))
                        goto drop;
                return;
        }
@@ -354,6 +531,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
+       bool tx_agg_ref = false;
 
        IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
                     sta->addr, tid, action);
@@ -361,6 +539,23 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
        if (!(mvm->nvm_data->sku_cap_11n_enable))
                return -EACCES;
 
+       /* return from D0i3 before starting a new Tx aggregation */
+       if (action == IEEE80211_AMPDU_TX_START) {
+               iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
+               tx_agg_ref = true;
+
+               /*
+                * wait synchronously until D0i3 exit to get the correct
+                * sequence number for the tid
+                */
+               if (!wait_event_timeout(mvm->d0i3_exit_waitq,
+                         !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
+                       WARN_ON_ONCE(1);
+                       iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+                       return -EIO;
+               }
+       }
+
        mutex_lock(&mvm->mutex);
 
        switch (action) {
@@ -398,6 +593,13 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
        }
        mutex_unlock(&mvm->mutex);
 
+       /*
+        * If the tid is marked as started, we won't use it for offloaded
+        * traffic on the next D0i3 entry. It's safe to unref.
+        */
+       if (tx_agg_ref)
+               iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+
        return ret;
 }
 
@@ -422,6 +624,15 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
 
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
+
+       iwl_mvm_fw_error_dump(mvm);
+
+       /* notify the userspace about the error we had */
+       kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
+#endif
+
        iwl_trans_stop_device(mvm->trans);
 
        mvm->scan_status = IWL_MVM_SCAN_NONE;
@@ -434,6 +645,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
                iwl_mvm_cleanup_iterator, mvm);
 
        mvm->p2p_device_vif = NULL;
+       mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
 
        iwl_mvm_reset_phy_ctxts(mvm);
        memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -441,6 +653,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 
        ieee80211_wake_queues(mvm->hw);
 
+       /* cleanup all stale references (scan, roc), but keep the
+        * ucode_down ref until reconfig is complete */
+       iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
        mvm->vif_count = 0;
        mvm->rx_ba_sessions = 0;
 }
@@ -470,11 +686,15 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
        mutex_lock(&mvm->mutex);
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+       iwl_mvm_d0i3_enable_tx(mvm, NULL);
        ret = iwl_mvm_update_quotas(mvm, NULL);
        if (ret)
                IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
                        ret);
 
+       /* allow transport/FW low power modes */
+       iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
        mutex_unlock(&mvm->mutex);
 }
 
@@ -482,9 +702,14 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
+       flush_work(&mvm->d0i3_exit_work);
        flush_work(&mvm->async_handlers_wk);
 
        mutex_lock(&mvm->mutex);
+
+       /* disallow low power states when the FW is down */
+       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
        /* async_handlers_wk is now blocked */
 
        /*
@@ -510,14 +735,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
        cancel_work_sync(&mvm->async_handlers_wk);
 }
 
-static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
-                                         struct ieee80211_vif *vif)
-{
-       struct iwl_mvm *mvm = data;
-
-       iwl_mvm_power_update_mode(mvm, vif);
-}
-
 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
 {
        u16 i;
@@ -585,7 +802,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
            vif->type == NL80211_IFTYPE_ADHOC) {
                u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
                ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
-                                              qmask);
+                                              qmask,
+                                              ieee80211_vif_type_p2p(vif));
                if (ret) {
                        IWL_ERR(mvm, "Failed to allocate bcast sta\n");
                        goto out_release;
@@ -599,10 +817,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        if (ret)
                goto out_release;
 
-       iwl_mvm_power_disable(mvm, vif);
+       ret = iwl_mvm_power_update_mac(mvm, vif);
+       if (ret)
+               goto out_release;
 
        /* beacon filtering */
-       ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+       ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
        if (ret)
                goto out_remove_mac;
 
@@ -661,11 +881,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
        if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
                mvm->vif_count--;
 
-       /* TODO: remove this when legacy PM will be discarded */
-       ieee80211_iterate_active_interfaces(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_power_update_iterator, mvm);
-
        iwl_mvm_mac_ctxt_release(mvm, vif);
  out_unlock:
        mutex_unlock(&mvm->mutex);
@@ -754,11 +969,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
        if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
                mvm->vif_count--;
 
-       /* TODO: remove this when legacy PM will be discarded */
-       ieee80211_iterate_active_interfaces(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_power_update_iterator, mvm);
-
+       iwl_mvm_power_update_mac(mvm, vif);
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
 out_release:
@@ -876,6 +1087,156 @@ out:
        *total_flags = 0;
 }
 
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+struct iwl_bcast_iter_data {
+       struct iwl_mvm *mvm;
+       struct iwl_bcast_filter_cmd *cmd;
+       u8 current_filter;
+};
+
+static void
+iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
+                        const struct iwl_fw_bcast_filter *in_filter,
+                        struct iwl_fw_bcast_filter *out_filter)
+{
+       struct iwl_fw_bcast_filter_attr *attr;
+       int i;
+
+       memcpy(out_filter, in_filter, sizeof(*out_filter));
+
+       for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
+               attr = &out_filter->attrs[i];
+
+               if (!attr->mask)
+                       break;
+
+               switch (attr->reserved1) {
+               case cpu_to_le16(BC_FILTER_MAGIC_IP):
+                       if (vif->bss_conf.arp_addr_cnt != 1) {
+                               attr->mask = 0;
+                               continue;
+                       }
+
+                       attr->val = vif->bss_conf.arp_addr_list[0];
+                       break;
+               case cpu_to_le16(BC_FILTER_MAGIC_MAC):
+                       attr->val = *(__be32 *)&vif->addr[2];
+                       break;
+               default:
+                       break;
+               }
+               attr->reserved1 = 0;
+               out_filter->num_attrs++;
+       }
+}
+
+static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
+                                         struct ieee80211_vif *vif)
+{
+       struct iwl_bcast_iter_data *data = _data;
+       struct iwl_mvm *mvm = data->mvm;
+       struct iwl_bcast_filter_cmd *cmd = data->cmd;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_fw_bcast_mac *bcast_mac;
+       int i;
+
+       if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
+               return;
+
+       bcast_mac = &cmd->macs[mvmvif->id];
+
+       /* enable filtering only for associated stations */
+       if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+               return;
+
+       bcast_mac->default_discard = 1;
+
+       /* copy all configured filters */
+       for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
+               /*
+                * Make sure we don't exceed our filters limit.
+                * if there is still a valid filter to be configured,
+                * be on the safe side and just allow bcast for this mac.
+                */
+               if (WARN_ON_ONCE(data->current_filter >=
+                                ARRAY_SIZE(cmd->filters))) {
+                       bcast_mac->default_discard = 0;
+                       bcast_mac->attached_filters = 0;
+                       break;
+               }
+
+               iwl_mvm_set_bcast_filter(vif,
+                                        &mvm->bcast_filters[i],
+                                        &cmd->filters[data->current_filter]);
+
+               /* skip current filter if it contains no attributes */
+               if (!cmd->filters[data->current_filter].num_attrs)
+                       continue;
+
+               /* attach the filter to current mac */
+               bcast_mac->attached_filters |=
+                               cpu_to_le16(BIT(data->current_filter));
+
+               data->current_filter++;
+       }
+}
+
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+                                   struct iwl_bcast_filter_cmd *cmd)
+{
+       struct iwl_bcast_iter_data iter_data = {
+               .mvm = mvm,
+               .cmd = cmd,
+       };
+
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
+       cmd->max_macs = ARRAY_SIZE(cmd->macs);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* use debugfs filters/macs if override is configured */
+       if (mvm->dbgfs_bcast_filtering.override) {
+               memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
+                      sizeof(cmd->filters));
+               memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
+                      sizeof(cmd->macs));
+               return true;
+       }
+#endif
+
+       /* if no filters are configured, do nothing */
+       if (!mvm->bcast_filters)
+               return false;
+
+       /* configure and attach these filters for each associated sta vif */
+       ieee80211_iterate_active_interfaces(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_bcast_filter_iterator, &iter_data);
+
+       return true;
+}
+static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif)
+{
+       struct iwl_bcast_filter_cmd cmd;
+
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
+               return 0;
+
+       if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+               return 0;
+
+       return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+                                   sizeof(cmd), &cmd);
+}
+#else
+static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+                                                struct ieee80211_vif *vif)
+{
+       return 0;
+}
+#endif
+
 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                                             struct ieee80211_vif *vif,
                                             struct ieee80211_bss_conf *bss_conf,
@@ -928,6 +1289,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
 
                        iwl_mvm_sf_update(mvm, vif, false);
                        iwl_mvm_power_vif_assoc(mvm, vif);
+                       if (vif->p2p)
+                               iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
                } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
                        /*
                         * If update fails - SF might be running in associated
@@ -940,27 +1303,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                        ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
                        if (ret)
                                IWL_ERR(mvm, "failed to remove AP station\n");
+
+                       if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+                               mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
                        mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
                        /* remove quota for this interface */
                        ret = iwl_mvm_update_quotas(mvm, NULL);
                        if (ret)
                                IWL_ERR(mvm, "failed to update quotas\n");
+
+                       if (vif->p2p)
+                               iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
                }
 
                iwl_mvm_recalc_multicast(mvm);
+               iwl_mvm_configure_bcast_filter(mvm, vif);
 
                /* reset rssi values */
                mvmvif->bf_data.ave_beacon_signal = 0;
 
-               if (!(mvm->fw->ucode_capa.flags &
-                                       IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
-                       /* Workaround for FW bug, otherwise FW disables device
-                        * power save upon disassociation
-                        */
-                       ret = iwl_mvm_power_update_mode(mvm, vif);
-                       if (ret)
-                               IWL_ERR(mvm, "failed to update power mode\n");
-               }
                iwl_mvm_bt_coex_vif_change(mvm);
                iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
                                    IEEE80211_SMPS_AUTOMATIC);
@@ -971,9 +1332,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                 */
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
+               WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
        } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
                              BSS_CHANGED_QOS)) {
-               ret = iwl_mvm_power_update_mode(mvm, vif);
+               ret = iwl_mvm_power_update_mac(mvm, vif);
                if (ret)
                        IWL_ERR(mvm, "failed to update power mode\n");
        }
@@ -987,10 +1349,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
                /* reset cqm events tracking */
                mvmvif->bf_data.last_cqm_event = 0;
-               ret = iwl_mvm_update_beacon_filter(mvm, vif);
+               ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
                if (ret)
                        IWL_ERR(mvm, "failed to update CQM thresholds\n");
        }
+
+       if (changes & BSS_CHANGED_ARP_FILTER) {
+               IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+               iwl_mvm_configure_bcast_filter(mvm, vif);
+       }
 }
 
 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
@@ -1024,8 +1391,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        if (ret)
                goto out_remove;
 
-       mvmvif->ap_ibss_active = true;
-
        /* Send the bcast station. At this stage the TBTT and DTIM time events
         * are added and applied to the scheduler */
        ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
@@ -1036,8 +1401,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        mvmvif->ap_ibss_active = true;
 
        /* power updated needs to be done before quotas */
-       mvm->bound_vif_cnt++;
-       iwl_mvm_power_update_binding(mvm, vif, true);
+       iwl_mvm_power_update_mac(mvm, vif);
 
        ret = iwl_mvm_update_quotas(mvm, vif);
        if (ret)
@@ -1047,14 +1411,15 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        if (vif->p2p && mvm->p2p_device_vif)
                iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
 
+       iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
+
        iwl_mvm_bt_coex_vif_change(mvm);
 
        mutex_unlock(&mvm->mutex);
        return 0;
 
 out_quota_failed:
-       mvm->bound_vif_cnt--;
-       iwl_mvm_power_update_binding(mvm, vif, false);
+       iwl_mvm_power_update_mac(mvm, vif);
        mvmvif->ap_ibss_active = false;
        iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
 out_unbind:
@@ -1080,6 +1445,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
 
        iwl_mvm_bt_coex_vif_change(mvm);
 
+       iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
+
        /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
        if (vif->p2p && mvm->p2p_device_vif)
                iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -1088,8 +1455,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
        iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
        iwl_mvm_binding_remove_vif(mvm, vif);
 
-       mvm->bound_vif_cnt--;
-       iwl_mvm_power_update_binding(mvm, vif, false);
+       iwl_mvm_power_update_mac(mvm, vif);
 
        iwl_mvm_mac_ctxt_remove(mvm, vif);
 
@@ -1103,26 +1469,20 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
                                 u32 changes)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
-                                             BSS_CHANGED_HT |
-                                             BSS_CHANGED_BANDWIDTH;
-       int ret;
 
        /* Changes will be applied when the AP/IBSS is started */
        if (!mvmvif->ap_ibss_active)
                return;
 
-       if (changes & ht_change) {
-               ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
-               if (ret)
-                       IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
-       }
+       if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+                      BSS_CHANGED_BANDWIDTH) &&
+           iwl_mvm_mac_ctxt_changed(mvm, vif))
+               IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
 
        /* Need to send a new beacon template to the FW */
-       if (changes & BSS_CHANGED_BEACON) {
-               if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
-                       IWL_WARN(mvm, "Failed updating beacon data\n");
-       }
+       if (changes & BSS_CHANGED_BEACON &&
+           iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+               IWL_WARN(mvm, "Failed updating beacon data\n");
 }
 
 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -1162,13 +1522,30 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
-       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-               ret = iwl_mvm_scan_request(mvm, vif, req);
-       else
+       switch (mvm->scan_status) {
+       case IWL_MVM_SCAN_SCHED:
+               ret = iwl_mvm_sched_scan_stop(mvm);
+               if (ret) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+               break;
+       case IWL_MVM_SCAN_NONE:
+               break;
+       default:
                ret = -EBUSY;
+               goto out;
+       }
 
-       mutex_unlock(&mvm->mutex);
+       iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
 
+       ret = iwl_mvm_scan_request(mvm, vif, req);
+       if (ret)
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
+       mutex_unlock(&mvm->mutex);
+       /* make sure to flush the Rx handler before the next scan arrives */
+       iwl_mvm_wait_for_async_handlers(mvm);
        return ret;
 }
 
@@ -1186,20 +1563,32 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
 
 static void
 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
-                                 struct ieee80211_sta *sta, u16 tid,
+                                 struct ieee80211_sta *sta, u16 tids,
                                  int num_frames,
                                  enum ieee80211_frame_release_type reason,
                                  bool more_data)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-       /* TODO: how do we tell the fw to send frames for a specific TID */
+       /* Called when we need to transmit (a) frame(s) from mac80211 */
 
-       /*
-        * The fw will send EOSP notification when the last frame will be
-        * transmitted.
-        */
-       iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
+       iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+                                         tids, more_data, false);
+}
+
+static void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+                                   struct ieee80211_sta *sta, u16 tids,
+                                   int num_frames,
+                                   enum ieee80211_frame_release_type reason,
+                                   bool more_data)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       /* Called when we need to transmit (a) frame(s) from agg queue */
+
+       iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+                                         tids, more_data, true);
 }
 
 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1209,11 +1598,25 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       int tid;
 
        switch (cmd) {
        case STA_NOTIFY_SLEEP:
                if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
                        ieee80211_sta_block_awake(hw, sta, true);
+               spin_lock_bh(&mvmsta->lock);
+               for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+                       struct iwl_mvm_tid_data *tid_data;
+
+                       tid_data = &mvmsta->tid_data[tid];
+                       if (tid_data->state != IWL_AGG_ON &&
+                           tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+                               continue;
+                       if (iwl_mvm_tid_queued(tid_data) == 0)
+                               continue;
+                       ieee80211_sta_set_buffered(sta, tid, true);
+               }
+               spin_unlock_bh(&mvmsta->lock);
                /*
                 * The fw updates the STA to be asleep. Tx packets on the Tx
                 * queues to this station will not be transmitted. The fw will
@@ -1304,12 +1707,14 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
                /* enable beacon filtering */
-               WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
+               if (vif->bss_conf.dtim_period)
+                       WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
+                                                            CMD_SYNC));
                ret = 0;
        } else if (old_state == IEEE80211_STA_AUTHORIZED &&
                   new_state == IEEE80211_STA_ASSOC) {
                /* disable beacon filtering */
-               WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
+               WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
                ret = 0;
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH) {
@@ -1401,9 +1806,26 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
 
-       if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
-               IWL_DEBUG_SCAN(mvm,
-                              "SCHED SCAN request during internal scan - abort\n");
+       switch (mvm->scan_status) {
+       case IWL_MVM_SCAN_OS:
+               IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
+               ret = iwl_mvm_cancel_scan(mvm);
+               if (ret) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+
+               /*
+                * iwl_mvm_rx_scan_complete() will be called soon but will
+                * not reset the scan status as it won't be IWL_MVM_SCAN_OS
+                * any more since we queue the next scan immediately (below).
+                * We make sure it is called before the next scan starts by
+                * flushing the async-handlers work.
+                */
+               break;
+       case IWL_MVM_SCAN_NONE:
+               break;
+       default:
                ret = -EBUSY;
                goto out;
        }
@@ -1425,17 +1847,23 @@ err:
        mvm->scan_status = IWL_MVM_SCAN_NONE;
 out:
        mutex_unlock(&mvm->mutex);
+       /* make sure to flush the Rx handler before the next scan arrives */
+       iwl_mvm_wait_for_async_handlers(mvm);
        return ret;
 }
 
-static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
-                                       struct ieee80211_vif *vif)
+static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_sched_scan_stop(mvm);
+       ret = iwl_mvm_sched_scan_stop(mvm);
        mutex_unlock(&mvm->mutex);
+       iwl_mvm_wait_for_async_handlers(mvm);
+
+       return ret;
 }
 
 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
@@ -1773,8 +2201,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
         * Power state must be updated before quotas,
         * otherwise fw will complain.
         */
-       mvm->bound_vif_cnt++;
-       iwl_mvm_power_update_binding(mvm, vif, true);
+       iwl_mvm_power_update_mac(mvm, vif);
 
        /* Setting the quota at this stage is only required for monitor
         * interfaces. For the other types, the bss_info changed flow
@@ -1791,8 +2218,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
 
  out_remove_binding:
        iwl_mvm_binding_remove_vif(mvm, vif);
-       mvm->bound_vif_cnt--;
-       iwl_mvm_power_update_binding(mvm, vif, false);
+       iwl_mvm_power_update_mac(mvm, vif);
  out_unlock:
        mutex_unlock(&mvm->mutex);
        if (ret)
@@ -1824,8 +2250,7 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
        }
 
        iwl_mvm_binding_remove_vif(mvm, vif);
-       mvm->bound_vif_cnt--;
-       iwl_mvm_power_update_binding(mvm, vif, false);
+       iwl_mvm_power_update_mac(mvm, vif);
 
 out_unlock:
        mvmvif->phy_ctxt = NULL;
@@ -1892,8 +2317,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
                        return -EINVAL;
 
                if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
-                       return iwl_mvm_enable_beacon_filter(mvm, vif);
-               return iwl_mvm_disable_beacon_filter(mvm, vif);
+                       return iwl_mvm_enable_beacon_filter(mvm, vif,
+                                                           CMD_SYNC);
+               return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
        }
 
        return -EOPNOTSUPP;
@@ -1914,7 +2340,7 @@ static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
 }
 #endif
 
-struct ieee80211_ops iwl_mvm_hw_ops = {
+const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
        .ampdu_action = iwl_mvm_mac_ampdu_action,
        .start = iwl_mvm_mac_start,
@@ -1932,6 +2358,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
        .sta_state = iwl_mvm_mac_sta_state,
        .sta_notify = iwl_mvm_mac_sta_notify,
        .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+       .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
        .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
        .sta_rc_update = iwl_mvm_sta_rc_update,
        .conf_tx = iwl_mvm_mac_conf_tx,
index 2b0ba1fc3c82fb457a897dbb8fe08d528b12d762..d564233a65da6157c1aaf16a099ddf94b3be933e 100644 (file)
@@ -91,9 +91,7 @@ enum iwl_mvm_tx_fifo {
        IWL_MVM_TX_FIFO_MCAST = 5,
 };
 
-extern struct ieee80211_ops iwl_mvm_hw_ops;
-extern const struct iwl_mvm_power_ops pm_legacy_ops;
-extern const struct iwl_mvm_power_ops pm_mac_ops;
+extern const struct ieee80211_ops iwl_mvm_hw_ops;
 
 /**
  * struct iwl_mvm_mod_params - module parameters for iwlmvm
@@ -159,20 +157,6 @@ enum iwl_power_scheme {
                                         IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
 #define IWL_UAPSD_MAX_SP               IEEE80211_WMM_IE_STA_QOSINFO_SP_2
 
-struct iwl_mvm_power_ops {
-       int (*power_update_mode)(struct iwl_mvm *mvm,
-                                struct ieee80211_vif *vif);
-       int (*power_update_device_mode)(struct iwl_mvm *mvm);
-       int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-       void (*power_update_binding)(struct iwl_mvm *mvm,
-                                    struct ieee80211_vif *vif, bool assign);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                               char *buf, int bufsz);
-#endif
-};
-
-
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 enum iwl_dbgfs_pm_mask {
        MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
@@ -239,6 +223,19 @@ enum iwl_mvm_smps_type_request {
        NUM_IWL_MVM_SMPS_REQ,
 };
 
+enum iwl_mvm_ref_type {
+       IWL_MVM_REF_UCODE_DOWN,
+       IWL_MVM_REF_SCAN,
+       IWL_MVM_REF_ROC,
+       IWL_MVM_REF_P2P_CLIENT,
+       IWL_MVM_REF_AP_IBSS,
+       IWL_MVM_REF_USER,
+       IWL_MVM_REF_TX,
+       IWL_MVM_REF_TX_AGG,
+
+       IWL_MVM_REF_COUNT,
+};
+
 /**
 * struct iwl_mvm_vif_bf_data - beacon filtering related data
 * @bf_enabled: indicates if beacon filtering is enabled
@@ -269,7 +266,9 @@ struct iwl_mvm_vif_bf_data {
  * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
  *     should get quota etc.
  * @monitor_active: indicates that monitor context is configured, and that the
- * interface should get quota etc.
+ *     interface should get quota etc.
+ * @low_latency: indicates that this interface is in low-latency mode
+ *     (VMACLowLatencyMode)
  * @queue_params: QoS params for this MAC
  * @bcast_sta: station used for broadcast packets. Used by the following
  *  vifs: P2P_DEVICE, GO and AP.
@@ -285,6 +284,7 @@ struct iwl_mvm_vif {
        bool uploaded;
        bool ap_ibss_active;
        bool monitor_active;
+       bool low_latency;
        struct iwl_mvm_vif_bf_data bf_data;
 
        u32 ap_beacon_time;
@@ -319,13 +319,13 @@ struct iwl_mvm_vif {
 
        bool seqno_valid;
        u16 seqno;
+#endif
 
 #if IS_ENABLED(CONFIG_IPV6)
        /* IPv6 addresses for WoWLAN */
        struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
        int num_target_ipv6_addrs;
 #endif
-#endif
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        struct iwl_mvm *mvm;
@@ -333,14 +333,13 @@ struct iwl_mvm_vif {
        struct dentry *dbgfs_slink;
        struct iwl_dbgfs_pm dbgfs_pm;
        struct iwl_dbgfs_bf dbgfs_bf;
+       struct iwl_mac_power_cmd mac_pwr_cmd;
 #endif
 
        enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
 
        /* FW identified misbehaving AP */
        u8 uapsd_misbehaving_bssid[ETH_ALEN];
-
-       bool pm_prevented;
 };
 
 static inline struct iwl_mvm_vif *
@@ -349,6 +348,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
        return (void *)vif->drv_priv;
 }
 
+extern const u8 tid_to_mac80211_ac[];
+
 enum iwl_scan_status {
        IWL_MVM_SCAN_NONE,
        IWL_MVM_SCAN_OS,
@@ -415,6 +416,7 @@ struct iwl_tt_params {
  * @ct_kill_exit: worker to exit thermal kill
  * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
  * @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
  * @params: Parameters to configure the thermal throttling algorithm.
  * @throttle: Is thermal throttling is active?
  */
@@ -422,10 +424,33 @@ struct iwl_mvm_tt_mgmt {
        struct delayed_work ct_kill_exit;
        bool dynamic_smps;
        u32 tx_backoff;
+       u32 min_backoff;
        const struct iwl_tt_params *params;
        bool throttle;
 };
 
+#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
+
+struct iwl_mvm_frame_stats {
+       u32 legacy_frames;
+       u32 ht_frames;
+       u32 vht_frames;
+       u32 bw_20_frames;
+       u32 bw_40_frames;
+       u32 bw_80_frames;
+       u32 bw_160_frames;
+       u32 sgi_frames;
+       u32 ngi_frames;
+       u32 siso_frames;
+       u32 mimo2_frames;
+       u32 agg_frames;
+       u32 ampdu_count;
+       u32 success_frames;
+       u32 fail_frames;
+       u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
+       int last_frame_idx;
+};
+
 struct iwl_mvm {
        /* for logger access */
        struct device *dev;
@@ -457,6 +482,8 @@ struct iwl_mvm {
        bool init_ucode_complete;
        u32 error_event_table;
        u32 log_event_table;
+       u32 umac_error_event_table;
+       bool support_umac_log;
 
        u32 ampdu_ref;
 
@@ -470,7 +497,7 @@ struct iwl_mvm {
 
        struct iwl_nvm_data *nvm_data;
        /* NVM sections */
-       struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+       struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 
        /* EEPROM MAC addresses */
        struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@@ -494,6 +521,17 @@ struct iwl_mvm {
        /* rx chain antennas set through debugfs for the scan command */
        u8 scan_rx_ant;
 
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+       /* broadcast filters to configure for each associated station */
+       const struct iwl_fw_bcast_filter *bcast_filters;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct {
+               u32 override; /* u32 for debugfs_create_bool */
+               struct iwl_bcast_filter_cmd cmd;
+       } dbgfs_bcast_filtering;
+#endif
+#endif
+
        /* Internal station */
        struct iwl_mvm_int_sta aux_sta;
 
@@ -506,6 +544,7 @@ struct iwl_mvm {
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        struct dentry *debugfs_dir;
        u32 dbgfs_sram_offset, dbgfs_sram_len;
+       u32 dbgfs_prph_reg_addr;
        bool disable_power_off;
        bool disable_power_off_d3;
 
@@ -513,6 +552,9 @@ struct iwl_mvm {
        struct debugfs_blob_wrapper nvm_sw_blob;
        struct debugfs_blob_wrapper nvm_calib_blob;
        struct debugfs_blob_wrapper nvm_prod_blob;
+
+       struct iwl_mvm_frame_stats drv_rx_stats;
+       spinlock_t drv_stats_lock;
 #endif
 
        struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -526,10 +568,16 @@ struct iwl_mvm {
         */
        unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
 
+       /* A bitmap of reference types taken by the driver. */
+       unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
+
        u8 vif_count;
 
        /* -1 for always, 0 for never, >0 for that many times */
        s8 restart_fw;
+       void *fw_error_dump;
+       void *fw_error_sram;
+       u32 fw_error_sram_len;
 
        struct led_classdev led;
 
@@ -548,17 +596,27 @@ struct iwl_mvm {
 #endif
 #endif
 
+       /* d0i3 */
+       u8 d0i3_ap_sta_id;
+       bool d0i3_offloading;
+       struct work_struct d0i3_exit_work;
+       struct sk_buff_head d0i3_tx;
+       /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
+       spinlock_t d0i3_tx_lock;
+       wait_queue_head_t d0i3_exit_waitq;
+
        /* BT-Coex */
        u8 bt_kill_msk;
        struct iwl_bt_coex_profile_notif last_bt_notif;
        struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
+       u32 last_ant_isol;
+       u8 last_corun_lut;
+       u8 bt_tx_prio;
 
        /* Thermal Throttling and CTkill */
        struct iwl_mvm_tt_mgmt thermal_throttle;
        s32 temperature;        /* Celsius */
 
-       const struct iwl_mvm_power_ops *pm_ops;
-
 #ifdef CONFIG_NL80211_TESTMODE
        u32 noa_duration;
        struct ieee80211_vif *noa_vif;
@@ -569,10 +627,10 @@ struct iwl_mvm {
        u8 first_agg_queue;
        u8 last_agg_queue;
 
-       u8 bound_vif_cnt;
-
        /* Indicate if device power save is allowed */
-       bool ps_prevented;
+       bool ps_disabled;
+       /* Indicate if device power management is allowed */
+       bool pm_disabled;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -587,6 +645,7 @@ enum iwl_mvm_status {
        IWL_MVM_STATUS_HW_CTKILL,
        IWL_MVM_STATUS_ROC_RUNNING,
        IWL_MVM_STATUS_IN_HW_RESTART,
+       IWL_MVM_STATUS_IN_D0I3,
 };
 
 static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
@@ -595,6 +654,30 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
               test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
 }
 
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+       struct ieee80211_sta *sta;
+
+       if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+               return NULL;
+
+       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+                                       lockdep_is_held(&mvm->mutex));
+
+       /* This can happen if the station has been removed right now */
+       if (IS_ERR_OR_NULL(sta))
+               return NULL;
+
+       return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
+{
+       return mvm->trans->cfg->d0i3 &&
+              (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -619,7 +702,10 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
                               struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
-void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
+void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+#endif
 u8 first_antenna(u8 mask);
 u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 
@@ -645,6 +731,11 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
+static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
+{
+       flush_work(&mvm->async_handlers_wk);
+}
+
 /* Statistics */
 int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
                                struct iwl_rx_cmd_buffer *rxb,
@@ -661,6 +752,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
 
 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+                                   struct iwl_bcast_filter_cmd *cmd);
 
 /*
  * FW notifications / CMD responses handlers
@@ -676,6 +769,9 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                         struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb,
+                                 struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                          struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
@@ -730,7 +826,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                             struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                             struct iwl_device_cmd *cmd);
-void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
+int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
 int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -744,7 +840,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
                                       struct cfg80211_sched_scan_request *req);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                             struct cfg80211_sched_scan_request *req);
-void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
 int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
                                  struct iwl_rx_cmd_buffer *rxb,
                                  struct iwl_device_cmd *cmd);
@@ -772,49 +868,24 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 /* rate scaling */
 int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
+                               struct iwl_mvm_frame_stats *stats,
+                               u32 rate, bool agg);
+int rs_pretty_print_rate(char *buf, const u32 rate);
 
-/* power managment */
-static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
-                                           struct ieee80211_vif *vif)
-{
-       return mvm->pm_ops->power_update_mode(mvm, vif);
-}
-
-static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
-                                       struct ieee80211_vif *vif)
-{
-       return mvm->pm_ops->power_disable(mvm, vif);
-}
-
-static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
-{
-       if (mvm->pm_ops->power_update_device_mode)
-               return mvm->pm_ops->power_update_device_mode(mvm);
-       return 0;
-}
+/* power management */
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
 
-static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
-                                               struct ieee80211_vif *vif,
-                                               bool assign)
-{
-       if (mvm->pm_ops->power_update_binding)
-               mvm->pm_ops->power_update_binding(mvm, vif, assign);
-}
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                                char *buf, int bufsz);
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
                                             struct iwl_rx_cmd_buffer *rxb,
                                             struct iwl_device_cmd *cmd);
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
-                                           struct ieee80211_vif *vif,
-                                           char *buf, int bufsz)
-{
-       return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
-}
-#endif
-
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
 void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
 
@@ -840,6 +911,17 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
 }
 #endif
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+                               struct iwl_wowlan_config_cmd_v2 *cmd);
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+                              struct ieee80211_vif *vif,
+                              bool disable_offloading,
+                              u32 cmd_flags);
+
+/* D0i3 */
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
 
 /* BT Coex */
 int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -850,10 +932,13 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event rssi_event);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
-u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
-                                  struct ieee80211_sta *sta);
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+                               struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta);
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+                          struct ieee80211_tx_info *info, u8 ac);
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
 
 enum iwl_bt_kill_msk {
        BT_KILL_MSK_DEFAULT,
@@ -875,25 +960,53 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
                                         struct iwl_beacon_filter_cmd *cmd)
 {}
 #endif
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *vif,
+                                  bool enable, u32 flags);
 int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
-                                struct ieee80211_vif *vif);
+                                struct ieee80211_vif *vif,
+                                u32 flags);
 int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
-                                 struct ieee80211_vif *vif);
-int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
-                                  struct iwl_beacon_filter_cmd *cmd);
+                                 struct ieee80211_vif *vif,
+                                 u32 flags);
 int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
                                struct ieee80211_vif *vif, bool enable);
 int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
-                                 struct ieee80211_vif *vif);
+                                struct ieee80211_vif *vif,
+                                bool force,
+                                u32 flags);
 
 /* SMPS */
 void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                enum iwl_mvm_smps_type_request req_type,
                                enum ieee80211_smps_mode smps_request);
 
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                              bool value);
+/* get SystemLowLatencyMode - only needed for beacon threshold? */
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+       /*
+        * should this consider associated/active/... state?
+        *
+        * Normally low-latency should only be active on interfaces
+        * that are active, but at least with debugfs it can also be
+        * enabled on interfaces that aren't active. However, when
+        * interface aren't active then they aren't added into the
+        * binding, so this has no real impact. For now, just return
+        * the current desired low-latency state.
+        */
+
+       return mvmvif->low_latency;
+}
+
 /* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
 
index 35b71af78d029d90311b878f640f84c38b9d331d..cf2d09f53782b0227f9a47d9a8761af8b5ea7ffe 100644 (file)
 #include "iwl-eeprom-read.h"
 #include "iwl-nvm-parse.h"
 
-/* list of NVM sections we are allowed/need to read */
-static const int nvm_to_read[] = {
-       NVM_SECTION_TYPE_HW,
-       NVM_SECTION_TYPE_SW,
-       NVM_SECTION_TYPE_CALIBRATION,
-       NVM_SECTION_TYPE_PRODUCTION,
-};
-
 /* Default NVM size to read */
 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
 #define IWL_MAX_NVM_SECTION_SIZE 7000
@@ -236,24 +228,39 @@ static struct iwl_nvm_data *
 iwl_parse_nvm_sections(struct iwl_mvm *mvm)
 {
        struct iwl_nvm_section *sections = mvm->nvm_sections;
-       const __le16 *hw, *sw, *calib;
+       const __le16 *hw, *sw, *calib, *regulatory, *mac_override;
 
        /* Checking for required sections */
-       if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
-           !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
-               IWL_ERR(mvm, "Can't parse empty NVM sections\n");
-               return NULL;
+       if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+                   !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+                       IWL_ERR(mvm, "Can't parse empty NVM sections\n");
+                       return NULL;
+               }
+       } else {
+               if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+                   !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data ||
+                   !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
+                       IWL_ERR(mvm,
+                               "Can't parse empty family 8000 NVM sections\n");
+                       return NULL;
+               }
        }
 
        if (WARN_ON(!mvm->cfg))
                return NULL;
 
-       hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+       hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
        calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+       regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+       mac_override =
+               (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+
        return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
-                                 iwl_fw_valid_tx_ant(mvm->fw),
-                                 iwl_fw_valid_rx_ant(mvm->fw));
+                                 regulatory, mac_override,
+                                 mvm->fw->valid_tx_ant,
+                                 mvm->fw->valid_rx_ant);
 }
 
 #define MAX_NVM_FILE_LEN       16384
@@ -293,6 +300,8 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
 
 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
 #define NVM_WORD2_ID(x) (x >> 12)
+#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
+#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
 
        IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
 
@@ -343,8 +352,16 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
                        break;
                }
 
-               section_size = 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
-               section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+               if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+                       section_size =
+                               2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+                       section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+               } else {
+                       section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
+                                               le16_to_cpu(file_sec->word2));
+                       section_id = NVM_WORD1_ID_FAMILY_8000(
+                                               le16_to_cpu(file_sec->word1));
+               }
 
                if (section_size > IWL_MAX_NVM_SECTION_SIZE) {
                        IWL_ERR(mvm, "ERROR - section too large (%d)\n",
@@ -367,7 +384,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
                        break;
                }
 
-               if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+               if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
                         "Invalid NVM section ID %d\n", section_id)) {
                        ret = -EINVAL;
                        break;
@@ -414,6 +431,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
 {
        int ret, i, section;
        u8 *nvm_buffer, *temp;
+       int nvm_to_read[NVM_MAX_NUM_SECTIONS];
+       int num_of_sections_to_read;
+
+       if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+               return -EINVAL;
 
        /* load external NVM if configured */
        if (iwlwifi_mod_params.nvm_file) {
@@ -422,6 +444,22 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
                if (ret)
                        return ret;
        } else {
+               /* list of NVM sections we are allowed/need to read */
+               if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+                       nvm_to_read[0] = mvm->cfg->nvm_hw_section_num;
+                       nvm_to_read[1] = NVM_SECTION_TYPE_SW;
+                       nvm_to_read[2] = NVM_SECTION_TYPE_CALIBRATION;
+                       nvm_to_read[3] = NVM_SECTION_TYPE_PRODUCTION;
+                       num_of_sections_to_read = 4;
+               } else {
+                       nvm_to_read[0] = NVM_SECTION_TYPE_SW;
+                       nvm_to_read[1] = NVM_SECTION_TYPE_CALIBRATION;
+                       nvm_to_read[2] = NVM_SECTION_TYPE_PRODUCTION;
+                       nvm_to_read[3] = NVM_SECTION_TYPE_REGULATORY;
+                       nvm_to_read[4] = NVM_SECTION_TYPE_MAC_OVERRIDE;
+                       num_of_sections_to_read = 5;
+               }
+
                /* Read From FW NVM */
                IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
 
@@ -430,7 +468,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
                                     GFP_KERNEL);
                if (!nvm_buffer)
                        return -ENOMEM;
-               for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+               for (i = 0; i < num_of_sections_to_read; i++) {
                        section = nvm_to_read[i];
                        /* we override the constness for initial read */
                        ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
@@ -446,10 +484,6 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
                        switch (section) {
-                       case NVM_SECTION_TYPE_HW:
-                               mvm->nvm_hw_blob.data = temp;
-                               mvm->nvm_hw_blob.size = ret;
-                               break;
                        case NVM_SECTION_TYPE_SW:
                                mvm->nvm_sw_blob.data = temp;
                                mvm->nvm_sw_blob.size  = ret;
@@ -463,6 +497,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
                                mvm->nvm_prod_blob.size  = ret;
                                break;
                        default:
+                               if (section == mvm->cfg->nvm_hw_section_num) {
+                                       mvm->nvm_hw_blob.data = temp;
+                                       mvm->nvm_hw_blob.size = ret;
+                                       break;
+                               }
                                WARN(1, "section: %d", section);
                        }
 #endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c
new file mode 100644 (file)
index 0000000..9bfb95e
--- /dev/null
@@ -0,0 +1,215 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include "mvm.h"
+
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+                               struct iwl_wowlan_config_cmd_v2 *cmd)
+{
+       int i;
+
+       /*
+        * For QoS counters, we store the one to use next, so subtract 0x10
+        * since the uCode will add 0x10 *before* using the value while we
+        * increment after using the value (i.e. store the next value to use).
+        */
+       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+               u16 seq = mvm_ap_sta->tid_data[i].seq_number;
+               seq -= 0x10;
+               cmd->qos_seq[i] = cpu_to_le16(seq);
+       }
+}
+
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+                              struct ieee80211_vif *vif,
+                              bool disable_offloading,
+                              u32 cmd_flags)
+{
+       union {
+               struct iwl_proto_offload_cmd_v1 v1;
+               struct iwl_proto_offload_cmd_v2 v2;
+               struct iwl_proto_offload_cmd_v3_small v3s;
+               struct iwl_proto_offload_cmd_v3_large v3l;
+       } cmd = {};
+       struct iwl_host_cmd hcmd = {
+               .id = PROT_OFFLOAD_CONFIG_CMD,
+               .flags = cmd_flags,
+               .data[0] = &cmd,
+               .dataflags[0] = IWL_HCMD_DFL_DUP,
+       };
+       struct iwl_proto_offload_cmd_common *common;
+       u32 enabled = 0, size;
+       u32 capa_flags = mvm->fw->ucode_capa.flags;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int i;
+
+       if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+           capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+               struct iwl_ns_config *nsc;
+               struct iwl_targ_addr *addrs;
+               int n_nsc, n_addrs;
+               int c;
+
+               if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+                       nsc = cmd.v3s.ns_config;
+                       n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+                       addrs = cmd.v3s.targ_addrs;
+                       n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+               } else {
+                       nsc = cmd.v3l.ns_config;
+                       n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+                       addrs = cmd.v3l.targ_addrs;
+                       n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+               }
+
+               if (mvmvif->num_target_ipv6_addrs)
+                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+               /*
+                * For each address we have (and that will fit) fill a target
+                * address struct and combine for NS offload structs with the
+                * solicited node addresses.
+                */
+               for (i = 0, c = 0;
+                    i < mvmvif->num_target_ipv6_addrs &&
+                    i < n_addrs && c < n_nsc; i++) {
+                       struct in6_addr solicited_addr;
+                       int j;
+
+                       addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+                                                 &solicited_addr);
+                       for (j = 0; j < c; j++)
+                               if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+                                                 &solicited_addr) == 0)
+                                       break;
+                       if (j == c)
+                               c++;
+                       addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+                       addrs[i].config_num = cpu_to_le32(j);
+                       nsc[j].dest_ipv6_addr = solicited_addr;
+                       memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+               }
+
+               if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+                       cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+               else
+                       cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+               if (mvmvif->num_target_ipv6_addrs) {
+                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+                       memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+               }
+
+               BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+                            sizeof(mvmvif->target_ipv6_addrs[0]));
+
+               for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+                                   IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
+                       memcpy(cmd.v2.target_ipv6_addr[i],
+                              &mvmvif->target_ipv6_addrs[i],
+                              sizeof(cmd.v2.target_ipv6_addr[i]));
+       } else {
+               if (mvmvif->num_target_ipv6_addrs) {
+                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+                       memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+               }
+
+               BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+                            sizeof(mvmvif->target_ipv6_addrs[0]));
+
+               for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+                                   IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
+                       memcpy(cmd.v1.target_ipv6_addr[i],
+                              &mvmvif->target_ipv6_addrs[i],
+                              sizeof(cmd.v1.target_ipv6_addr[i]));
+       }
+#endif
+
+       if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+               common = &cmd.v3s.common;
+               size = sizeof(cmd.v3s);
+       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+               common = &cmd.v3l.common;
+               size = sizeof(cmd.v3l);
+       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+               common = &cmd.v2.common;
+               size = sizeof(cmd.v2);
+       } else {
+               common = &cmd.v1.common;
+               size = sizeof(cmd.v1);
+       }
+
+       if (vif->bss_conf.arp_addr_cnt) {
+               enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
+               common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+               memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+       }
+
+       if (!disable_offloading)
+               common->enabled = cpu_to_le32(enabled);
+
+       hcmd.len[0] = size;
+       return iwl_mvm_send_cmd(mvm, &hcmd);
+}
index a3d43de342d7389b9b8ea2d5fcbfe7a25b18136a..9545d7fdd4bfc69dfb1fb8c4e07de097d58b6ea7 100644 (file)
@@ -61,6 +61,7 @@
  *
  *****************************************************************************/
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 #include <net/mac80211.h>
 
 #include "iwl-notif-wait.h"
@@ -78,6 +79,7 @@
 #include "iwl-prph.h"
 #include "rs.h"
 #include "fw-api-scan.h"
+#include "fw-error-dump.h"
 #include "time-event.h"
 
 /*
@@ -185,9 +187,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
         * (PCIe power is lost before PERST# is asserted), causing ME FW
         * to lose ownership and not being able to obtain it back.
         */
-       iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
-                              APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
-                              ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+       if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+                                      APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+                                      ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
 }
 
 struct iwl_rx_handlers {
@@ -219,13 +222,17 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
        RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
        RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
+       RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
+                  iwl_mvm_rx_ant_coupling_notif, true),
 
        RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
 
+       RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
+
        RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
-       RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+       RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
-                  iwl_mvm_rx_scan_offload_complete_notif, false),
+                  iwl_mvm_rx_scan_offload_complete_notif, true),
        RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
                   false),
 
@@ -242,7 +249,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
 #undef RX_HANDLER
 #define CMD(x) [x] = #x
 
-static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
+static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(MVM_ALIVE),
        CMD(REPLY_ERROR),
        CMD(INIT_COMPLETE_NOTIF),
@@ -284,9 +291,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(BEACON_NOTIFICATION),
        CMD(BEACON_TEMPLATE_CMD),
        CMD(STATISTICS_NOTIFICATION),
+       CMD(EOSP_NOTIFICATION),
        CMD(REDUCE_TX_POWER_CMD),
        CMD(TX_ANT_CONFIGURATION_CMD),
        CMD(D3_CONFIG_CMD),
+       CMD(D0I3_END_CMD),
        CMD(PROT_OFFLOAD_CONFIG_CMD),
        CMD(OFFLOADS_QUERY_CMD),
        CMD(REMOTE_WAKE_CONFIG_CMD),
@@ -309,17 +318,37 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(BT_PROFILE_NOTIFICATION),
        CMD(BT_CONFIG),
        CMD(MCAST_FILTER_CMD),
+       CMD(BCAST_FILTER_CMD),
        CMD(REPLY_SF_CFG_CMD),
        CMD(REPLY_BEACON_FILTERING_CMD),
        CMD(REPLY_THERMAL_MNG_BACKOFF),
        CMD(MAC_PM_POWER_TABLE),
        CMD(BT_COEX_CI),
        CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+       CMD(ANTENNA_COUPLING_NOTIFICATION),
 };
 #undef CMD
 
 /* this forward declaration can avoid to export the function */
 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+
+static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
+{
+       const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
+
+       if (!pwr_tx_backoff)
+               return 0;
+
+       while (pwr_tx_backoff->pwr) {
+               if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
+                       return pwr_tx_backoff->backoff;
+
+               pwr_tx_backoff++;
+       }
+
+       return 0;
+}
 
 static struct iwl_op_mode *
 iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
@@ -333,6 +362,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                TX_CMD,
        };
        int err, scan_size;
+       u32 min_backoff;
+
+       /*
+        * We use IWL_MVM_STATION_COUNT to check the validity of the station
+        * index all over the driver - check that its value corresponds to the
+        * array size.
+        */
+       BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
 
        /********************************
         * 1. Allocating and configuring HW data
@@ -373,6 +410,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
        INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
        INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+       INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+
+       spin_lock_init(&mvm->d0i3_tx_lock);
+       skb_queue_head_init(&mvm->d0i3_tx);
+       init_waitqueue_head(&mvm->d0i3_exit_waitq);
 
        SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
 
@@ -421,7 +463,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
                 mvm->cfg->name, mvm->trans->hw_rev);
 
-       iwl_mvm_tt_initialize(mvm);
+       min_backoff = calc_min_backoff(trans, cfg);
+       iwl_mvm_tt_initialize(mvm, min_backoff);
 
        /*
         * If the NVM exists in an external file,
@@ -462,13 +505,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (err)
                goto out_unregister;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
-               mvm->pm_ops = &pm_mac_ops;
-       else
-               mvm->pm_ops = &pm_legacy_ops;
-
        memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
 
+       /* rpm starts with a taken ref. only set the appropriate bit here. */
+       set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
+
        return op_mode;
 
  out_unregister:
@@ -495,6 +536,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        ieee80211_unregister_hw(mvm->hw);
 
        kfree(mvm->scan_cmd);
+       vfree(mvm->fw_error_dump);
+       kfree(mvm->fw_error_sram);
        kfree(mvm->mcast_filter_cmd);
        mvm->mcast_filter_cmd = NULL;
 
@@ -508,7 +551,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        mvm->phy_db = NULL;
 
        iwl_free_nvm_data(mvm->nvm_data);
-       for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+       for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
        ieee80211_free_hw(mvm->hw);
@@ -658,7 +701,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
        wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
 }
 
-static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
@@ -667,9 +710,9 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
        else
                clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
 
-       if (state && mvm->cur_ucode != IWL_UCODE_INIT)
-               iwl_trans_stop_device(mvm->trans);
        wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
+
+       return state && mvm->cur_ucode != IWL_UCODE_INIT;
 }
 
 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
@@ -702,6 +745,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
 {
        iwl_abort_notification_waits(&mvm->notif_wait);
 
+       /*
+        * This is a bit racy, but worst case we tell mac80211 about
+        * a stopped/aborted scan when that was already done which
+        * is not a problem. It is necessary to abort any os scan
+        * here because mac80211 requires having the scan cleared
+        * before restarting.
+        * We'll reset the scan_status to NONE in restart cleanup in
+        * the next start() call from mac80211. If restart isn't called
+        * (no fw restart) scan status will stay busy.
+        */
+       switch (mvm->scan_status) {
+       case IWL_MVM_SCAN_NONE:
+               break;
+       case IWL_MVM_SCAN_OS:
+               ieee80211_scan_completed(mvm->hw, true);
+               break;
+       case IWL_MVM_SCAN_SCHED:
+               /* Sched scan will be restarted by mac80211 in restart_hw. */
+               if (!mvm->restart_fw)
+                       ieee80211_sched_scan_stopped(mvm->hw);
+               break;
+       }
+
        /*
         * If we're restarting already, don't cycle restarts.
         * If INIT fw asserted, it will likely fail again.
@@ -733,25 +799,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
                INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
                schedule_work(&reprobe->work);
        } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
-               /*
-                * This is a bit racy, but worst case we tell mac80211 about
-                * a stopped/aborted (sched) scan when that was already done
-                * which is not a problem. It is necessary to abort any scan
-                * here because mac80211 requires having the scan cleared
-                * before restarting.
-                * We'll reset the scan_status to NONE in restart cleanup in
-                * the next start() call from mac80211.
-                */
-               switch (mvm->scan_status) {
-               case IWL_MVM_SCAN_NONE:
-                       break;
-               case IWL_MVM_SCAN_OS:
-                       ieee80211_scan_completed(mvm->hw, true);
-                       break;
-               case IWL_MVM_SCAN_SCHED:
-                       ieee80211_sched_scan_stopped(mvm->hw);
-                       break;
-               }
+               /* don't let the transport/FW power down */
+               iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
 
                if (mvm->restart_fw > 0)
                        mvm->restart_fw--;
@@ -759,13 +808,52 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
        }
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
+{
+       struct iwl_fw_error_dump_file *dump_file;
+       struct iwl_fw_error_dump_data *dump_data;
+       u32 file_len;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (mvm->fw_error_dump)
+               return;
+
+       file_len = mvm->fw_error_sram_len +
+                  sizeof(*dump_file) +
+                  sizeof(*dump_data);
+
+       dump_file = vmalloc(file_len);
+       if (!dump_file)
+               return;
+
+       mvm->fw_error_dump = dump_file;
+
+       dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+       dump_file->file_len = cpu_to_le32(file_len);
+       dump_data = (void *)dump_file->data;
+       dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
+       dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
+
+       /*
+        * No need for lock since at the stage the FW isn't loaded. So it
+        * can't assert - we are the only one who can possibly be accessing
+        * mvm->fw_error_sram right now.
+        */
+       memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
+}
+#endif
+
 static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
 {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 
        iwl_mvm_dump_nic_error_log(mvm);
-       if (!mvm->restart_fw)
-               iwl_mvm_dump_sram(mvm);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       iwl_mvm_fw_error_sram_dump(mvm);
+#endif
 
        iwl_mvm_nic_restart(mvm);
 }
@@ -778,6 +866,323 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
        iwl_mvm_nic_restart(mvm);
 }
 
+struct iwl_d0i3_iter_data {
+       struct iwl_mvm *mvm;
+       u8 ap_sta_id;
+       u8 vif_count;
+       u8 offloading_tid;
+       bool disable_offloading;
+};
+
+static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct iwl_d0i3_iter_data *iter_data)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct ieee80211_sta *ap_sta;
+       struct iwl_mvm_sta *mvmsta;
+       u32 available_tids = 0;
+       u8 tid;
+
+       if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
+                   mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+               return false;
+
+       ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
+       if (IS_ERR_OR_NULL(ap_sta))
+               return false;
+
+       mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
+       spin_lock_bh(&mvmsta->lock);
+       for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+               struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+               /*
+                * in case of pending tx packets, don't use this tid
+                * for offloading in order to prevent reuse of the same
+                * qos seq counters.
+                */
+               if (iwl_mvm_tid_queued(tid_data))
+                       continue;
+
+               if (tid_data->state != IWL_AGG_OFF)
+                       continue;
+
+               available_tids |= BIT(tid);
+       }
+       spin_unlock_bh(&mvmsta->lock);
+
+       /*
+        * disallow protocol offloading if we have no available tid
+        * (with no pending frames and no active aggregation,
+        * as we don't handle "holes" properly - the scheduler needs the
+        * frame's seq number and TFD index to match)
+        */
+       if (!available_tids)
+               return true;
+
+       /* for simplicity, just use the first available tid */
+       iter_data->offloading_tid = ffs(available_tids) - 1;
+       return false;
+}
+
+static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_d0i3_iter_data *data = _data;
+       struct iwl_mvm *mvm = data->mvm;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+
+       IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+       if (vif->type != NL80211_IFTYPE_STATION ||
+           !vif->bss_conf.assoc)
+               return;
+
+       /*
+        * in case of pending tx packets or active aggregations,
+        * avoid offloading features in order to prevent reuse of
+        * the same qos seq counters.
+        */
+       if (iwl_mvm_disallow_offloading(mvm, vif, data))
+               data->disable_offloading = true;
+
+       iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+       iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
+
+       /*
+        * on init/association, mvm already configures POWER_TABLE_CMD
+        * and REPLY_MCAST_FILTER_CMD, so currently don't
+        * reconfigure them (we might want to use different
+        * params later on, though).
+        */
+       data->ap_sta_id = mvmvif->ap_sta_id;
+       data->vif_count++;
+}
+
+static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
+                                   struct iwl_wowlan_config_cmd_v3 *cmd,
+                                   struct iwl_d0i3_iter_data *iter_data)
+{
+       struct ieee80211_sta *ap_sta;
+       struct iwl_mvm_sta *mvm_ap_sta;
+
+       if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
+               return;
+
+       rcu_read_lock();
+
+       ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
+       if (IS_ERR_OR_NULL(ap_sta))
+               goto out;
+
+       mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+       cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
+       cmd->offloading_tid = iter_data->offloading_tid;
+
+       /*
+        * The d0i3 uCode takes care of the nonqos counters,
+        * so configure only the qos seq ones.
+        */
+       iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
+out:
+       rcu_read_unlock();
+}
+static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+       int ret;
+       struct iwl_d0i3_iter_data d0i3_iter_data = {
+               .mvm = mvm,
+       };
+       struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
+               .common = {
+                       .wakeup_filter =
+                               cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+                                           IWL_WOWLAN_WAKEUP_BEACON_MISS |
+                                           IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+                                           IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+               },
+       };
+       struct iwl_d3_manager_config d3_cfg_cmd = {
+               .min_sleep_time = cpu_to_le32(1000),
+       };
+
+       IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+
+       /* make sure we have no running tx while configuring the qos */
+       set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+       synchronize_net();
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_enter_d0i3_iterator,
+                                                  &d0i3_iter_data);
+       if (d0i3_iter_data.vif_count == 1) {
+               mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+               mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
+       } else {
+               WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+               mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+               mvm->d0i3_offloading = false;
+       }
+
+       iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
+       ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+                                  sizeof(wowlan_config_cmd),
+                                  &wowlan_config_cmd);
+       if (ret)
+               return ret;
+
+       return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
+                                   flags | CMD_MAKE_TRANS_IDLE,
+                                   sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+}
+
+static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
+                                      struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = _data;
+       u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
+
+       IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+       if (vif->type != NL80211_IFTYPE_STATION ||
+           !vif->bss_conf.assoc)
+               return;
+
+       iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+}
+
+static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
+                                        struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
+           mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+               ieee80211_connection_loss(vif);
+}
+
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
+{
+       struct ieee80211_sta *sta = NULL;
+       struct iwl_mvm_sta *mvm_ap_sta;
+       int i;
+       bool wake_queues = false;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       spin_lock_bh(&mvm->d0i3_tx_lock);
+
+       if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
+               goto out;
+
+       IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
+
+       /* get the sta in order to update seq numbers and re-enqueue skbs */
+       sta = rcu_dereference_protected(
+                       mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
+                       lockdep_is_held(&mvm->mutex));
+
+       if (IS_ERR_OR_NULL(sta)) {
+               sta = NULL;
+               goto out;
+       }
+
+       if (mvm->d0i3_offloading && qos_seq) {
+               /* update qos seq numbers if offloading was enabled */
+               mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
+               for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+                       u16 seq = le16_to_cpu(qos_seq[i]);
+                       /* firmware stores last-used one, we store next one */
+                       seq += 0x10;
+                       mvm_ap_sta->tid_data[i].seq_number = seq;
+               }
+       }
+out:
+       /* re-enqueue (or drop) all packets */
+       while (!skb_queue_empty(&mvm->d0i3_tx)) {
+               struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
+
+               if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
+                       ieee80211_free_txskb(mvm->hw, skb);
+
+               /* if the skb_queue is not empty, we need to wake queues */
+               wake_queues = true;
+       }
+       clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+       wake_up(&mvm->d0i3_exit_waitq);
+       mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+       if (wake_queues)
+               ieee80211_wake_queues(mvm->hw);
+
+       spin_unlock_bh(&mvm->d0i3_tx_lock);
+}
+
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
+{
+       struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+       struct iwl_host_cmd get_status_cmd = {
+               .id = WOWLAN_GET_STATUSES,
+               .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+       };
+       struct iwl_wowlan_status_v6 *status;
+       int ret;
+       u32 disconnection_reasons, wakeup_reasons;
+       __le16 *qos_seq = NULL;
+
+       mutex_lock(&mvm->mutex);
+       ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
+       if (ret)
+               goto out;
+
+       if (!get_status_cmd.resp_pkt)
+               goto out;
+
+       status = (void *)get_status_cmd.resp_pkt->data;
+       wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+       qos_seq = status->qos_seq_ctr;
+
+       IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+
+       disconnection_reasons =
+               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+               IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+       if (wakeup_reasons & disconnection_reasons)
+               ieee80211_iterate_active_interfaces(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_d0i3_disconnect_iter, mvm);
+
+       iwl_free_resp(&get_status_cmd);
+out:
+       iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+       struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+       u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+                   CMD_WAKE_UP_TRANS;
+       int ret;
+
+       IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+       if (ret)
+               goto out;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_mvm_exit_d0i3_iterator,
+                                                  mvm);
+out:
+       schedule_work(&mvm->d0i3_exit_work);
+       return ret;
+}
+
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .start = iwl_op_mode_mvm_start,
        .stop = iwl_op_mode_mvm_stop,
@@ -789,4 +1194,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
        .nic_error = iwl_mvm_nic_error,
        .cmd_queue_full = iwl_mvm_cmd_queue_full,
        .nic_config = iwl_mvm_nic_config,
+       .enter_d0i3 = iwl_mvm_enter_d0i3,
+       .exit_d0i3 = iwl_mvm_exit_d0i3,
 };
index b7268c0b33339c975a93eca4450d145bfa27137b..237efe0ac1c44dab52d375ced3cb84912aabe082 100644 (file)
@@ -156,13 +156,13 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
        idle_cnt = chains_static;
        active_cnt = chains_dynamic;
 
-       cmd->rxchain_info = cpu_to_le32(iwl_fw_valid_rx_ant(mvm->fw) <<
+       cmd->rxchain_info = cpu_to_le32(mvm->fw->valid_rx_ant <<
                                        PHY_RX_CHAIN_VALID_POS);
        cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
        cmd->rxchain_info |= cpu_to_le32(active_cnt <<
                                         PHY_RX_CHAIN_MIMO_CNT_POS);
 
-       cmd->txchain_info = cpu_to_le32(iwl_fw_valid_tx_ant(mvm->fw));
+       cmd->txchain_info = cpu_to_le32(mvm->fw->valid_tx_ant);
 }
 
 /*
index d9eab3b7bb9f871a37b818e5a63e1320a12d7189..6b636eab33391cbec4957180efe2e74d2ad07388 100644 (file)
 
 #define POWER_KEEP_ALIVE_PERIOD_SEC    25
 
+static
 int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
-                                  struct iwl_beacon_filter_cmd *cmd)
+                                  struct iwl_beacon_filter_cmd *cmd,
+                                  u32 flags)
 {
-       int ret;
-
-       ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
-                                  sizeof(struct iwl_beacon_filter_cmd), cmd);
-
-       if (!ret) {
-               IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
-                               le32_to_cpu(cmd->ba_enable_beacon_abort));
-               IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
-                               le32_to_cpu(cmd->ba_escape_timer));
-               IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
-                               le32_to_cpu(cmd->bf_debug_flag));
-               IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
-                               le32_to_cpu(cmd->bf_enable_beacon_filter));
-               IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
-                               le32_to_cpu(cmd->bf_energy_delta));
-               IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
-                               le32_to_cpu(cmd->bf_escape_timer));
-               IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
-                               le32_to_cpu(cmd->bf_roaming_energy_delta));
-               IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
-                               le32_to_cpu(cmd->bf_roaming_state));
-               IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
-                               le32_to_cpu(cmd->bf_temp_threshold));
-               IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
-                               le32_to_cpu(cmd->bf_temp_fast_filter));
-               IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
-                               le32_to_cpu(cmd->bf_temp_slow_filter));
-       }
-       return ret;
+       IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+                       le32_to_cpu(cmd->ba_enable_beacon_abort));
+       IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+                       le32_to_cpu(cmd->ba_escape_timer));
+       IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+                       le32_to_cpu(cmd->bf_debug_flag));
+       IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+                       le32_to_cpu(cmd->bf_enable_beacon_filter));
+       IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+                       le32_to_cpu(cmd->bf_energy_delta));
+       IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+                       le32_to_cpu(cmd->bf_escape_timer));
+       IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+                       le32_to_cpu(cmd->bf_roaming_energy_delta));
+       IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+                       le32_to_cpu(cmd->bf_roaming_state));
+       IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+                       le32_to_cpu(cmd->bf_temp_threshold));
+       IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+                       le32_to_cpu(cmd->bf_temp_fast_filter));
+       IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+                       le32_to_cpu(cmd->bf_temp_slow_filter));
+
+       return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+                                   sizeof(struct iwl_beacon_filter_cmd), cmd);
 }
 
 static
@@ -145,7 +142,7 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
        mvmvif->bf_data.ba_enabled = enable;
        iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
        iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
-       return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+       return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
 }
 
 static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -301,8 +298,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
        cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
 
-       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
-           mvm->ps_prevented)
+       if (mvm->ps_disabled)
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -312,7 +308,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
            mvmvif->dbgfs_pm.disable_power_off)
                cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
 #endif
-       if (!vif->bss_conf.ps || mvmvif->pm_prevented)
+       if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
+           mvm->pm_disabled)
                return;
 
        cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -419,72 +416,44 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 #endif /* CONFIG_IWLWIFI_DEBUGFS */
 }
 
-static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *vif)
 {
-       int ret;
-       bool ba_enable;
        struct iwl_mac_power_cmd cmd = {};
 
        if (vif->type != NL80211_IFTYPE_STATION)
                return 0;
 
        if (vif->p2p &&
-           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS))
+           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM))
                return 0;
 
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
        iwl_mvm_power_log(mvm, &cmd);
-
-       ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
-                                  sizeof(cmd), &cmd);
-       if (ret)
-               return ret;
-
-       ba_enable = !!(cmd.flags &
-                      cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
-       return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
-                                    struct ieee80211_vif *vif)
-{
-       struct iwl_mac_power_cmd cmd = {};
-       struct iwl_mvm_vif *mvmvif __maybe_unused =
-               iwl_mvm_vif_from_mac80211(vif);
-
-       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
-               return 0;
-
-       cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
-                                                          mvmvif->color));
-
-       if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
-               cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
-           mvmvif->dbgfs_pm.disable_power_off)
-               cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+       memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
 #endif
-       iwl_mvm_power_log(mvm, &cmd);
 
-       return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
+       return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
                                    sizeof(cmd), &cmd);
 }
 
-static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
 {
        struct iwl_device_power_cmd cmd = {
                .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
        };
 
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+               return 0;
+
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
                return 0;
 
-       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
-           force_disable)
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+               mvm->ps_disabled = true;
+
+       if (mvm->ps_disabled)
                cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -501,11 +470,6 @@ static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
                                    &cmd);
 }
 
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
-{
-       return _iwl_mvm_power_update_device(mvm, false);
-}
-
 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -544,44 +508,176 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
-                                          struct ieee80211_vif *vif)
+struct iwl_power_constraint {
+       struct ieee80211_vif *bf_vif;
+       struct ieee80211_vif *bss_vif;
+       struct ieee80211_vif *p2p_vif;
+       u16 bss_phyctx_id;
+       u16 p2p_phyctx_id;
+       bool pm_disabled;
+       bool ps_disabled;
+       struct iwl_mvm *mvm;
+};
+
+static void iwl_mvm_power_iterator(void *_data, u8 *mac,
+                                  struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_mvm *mvm = _data;
-       int ret;
+       struct iwl_power_constraint *power_iterator = _data;
+       struct iwl_mvm *mvm = power_iterator->mvm;
+
+       switch (ieee80211_vif_type_p2p(vif)) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               break;
+
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_AP:
+               /* no BSS power mgmt if we have an active AP */
+               if (mvmvif->ap_ibss_active)
+                       power_iterator->pm_disabled = true;
+               break;
+
+       case NL80211_IFTYPE_MONITOR:
+               /* no BSS power mgmt and no device power save */
+               power_iterator->pm_disabled = true;
+               power_iterator->ps_disabled = true;
+               break;
+
+       case NL80211_IFTYPE_P2P_CLIENT:
+               if (mvmvif->phy_ctxt)
+                       power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
+
+               /* we should have only one P2P vif */
+               WARN_ON(power_iterator->p2p_vif);
+               power_iterator->p2p_vif = vif;
+
+               IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
+                               power_iterator->p2p_phyctx_id,
+                               power_iterator->bss_phyctx_id);
+               if (!(mvm->fw->ucode_capa.flags &
+                     IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM)) {
+                       /* no BSS power mgmt if we have a P2P client*/
+                       power_iterator->pm_disabled = true;
+               } else if (power_iterator->p2p_phyctx_id < MAX_PHYS &&
+                          power_iterator->bss_phyctx_id < MAX_PHYS &&
+                          power_iterator->p2p_phyctx_id ==
+                          power_iterator->bss_phyctx_id) {
+                       power_iterator->pm_disabled = true;
+               }
+               break;
+
+       case NL80211_IFTYPE_STATION:
+               if (mvmvif->phy_ctxt)
+                       power_iterator->bss_phyctx_id = mvmvif->phy_ctxt->id;
+
+               /* we should have only one BSS vif */
+               WARN_ON(power_iterator->bss_vif);
+               power_iterator->bss_vif = vif;
+
+               if (mvmvif->bf_data.bf_enabled &&
+                   !WARN_ON(power_iterator->bf_vif))
+                       power_iterator->bf_vif = vif;
+
+               IWL_DEBUG_POWER(mvm, "bss: p2p_id=%d, bss_id=%d\n",
+                               power_iterator->p2p_phyctx_id,
+                               power_iterator->bss_phyctx_id);
+               if (mvm->fw->ucode_capa.flags &
+                   IWL_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM &&
+                       (power_iterator->p2p_phyctx_id < MAX_PHYS &&
+                        power_iterator->bss_phyctx_id < MAX_PHYS &&
+                        power_iterator->p2p_phyctx_id ==
+                        power_iterator->bss_phyctx_id))
+                       power_iterator->pm_disabled = true;
+               break;
+
+       default:
+               break;
+       }
+}
 
-       mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+static void
+iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
+                                   struct iwl_power_constraint *constraint)
+{
+       lockdep_assert_held(&mvm->mutex);
+
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+               constraint->pm_disabled = true;
+               constraint->ps_disabled = true;
+       }
 
-       ret = iwl_mvm_power_mac_update_mode(mvm, vif);
-       WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                           IEEE80211_IFACE_ITER_NORMAL,
+                                           iwl_mvm_power_iterator, constraint);
 }
 
-static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
-                                         struct ieee80211_vif *vif,
-                                         bool assign)
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_power_constraint constraint = {
+                   .p2p_phyctx_id = MAX_PHYS,
+                   .bss_phyctx_id = MAX_PHYS,
+                   .mvm = mvm,
+       };
+       bool ba_enable;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+               return 0;
+
+       iwl_mvm_power_get_global_constraint(mvm, &constraint);
+       mvm->ps_disabled = constraint.ps_disabled;
+       mvm->pm_disabled = constraint.pm_disabled;
+
+       /* don't update device power state unless we add / remove monitor */
        if (vif->type == NL80211_IFTYPE_MONITOR) {
-               int ret = _iwl_mvm_power_update_device(mvm, assign);
-               mvm->ps_prevented = assign;
-               WARN_ONCE(ret, "Failed to update power device state\n");
+               ret = iwl_mvm_power_update_device(mvm);
+               if (ret)
+                       return ret;
        }
 
-       ieee80211_iterate_active_interfaces(mvm->hw,
-                                           IEEE80211_IFACE_ITER_NORMAL,
-                                           iwl_mvm_power_binding_iterator,
-                                           mvm);
+       if (constraint.bss_vif) {
+               ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
+               if (ret)
+                       return ret;
+       }
+
+       if (constraint.p2p_vif) {
+               ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
+               if (ret)
+                       return ret;
+       }
+
+       if (!constraint.bf_vif)
+               return 0;
+
+       vif = constraint.bf_vif;
+       mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
+                     !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
+
+       return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
 }
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
-                                       struct ieee80211_vif *vif, char *buf,
-                                       int bufsz)
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif, char *buf,
+                                int bufsz)
 {
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mac_power_cmd cmd = {};
        int pos = 0;
 
-       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+       if (WARN_ON(!(mvm->fw->ucode_capa.flags &
+                     IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
+               return 0;
+
+       mutex_lock(&mvm->mutex);
+       memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+       mutex_unlock(&mvm->mutex);
 
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
                pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
@@ -685,32 +781,46 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
 }
 #endif
 
-int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
-                                struct ieee80211_vif *vif)
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+                                        struct ieee80211_vif *vif,
+                                        struct iwl_beacon_filter_cmd *cmd,
+                                        u32 cmd_flags,
+                                        bool d0i3)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_beacon_filter_cmd cmd = {
-               IWL_BF_CMD_CONFIG_DEFAULTS,
-               .bf_enable_beacon_filter = cpu_to_le32(1),
-       };
        int ret;
 
        if (mvmvif != mvm->bf_allowed_vif ||
            vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
-       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
-       iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
-       ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+       if (!d0i3)
+               iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+       ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
 
-       if (!ret)
+       /* don't change bf_enabled in case of temporary d0i3 configuration */
+       if (!ret && !d0i3)
                mvmvif->bf_data.bf_enabled = true;
 
        return ret;
 }
 
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif,
+                                u32 flags)
+{
+       struct iwl_beacon_filter_cmd cmd = {
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter = cpu_to_le32(1),
+       };
+
+       return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+}
+
 int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
-                                 struct ieee80211_vif *vif)
+                                 struct ieee80211_vif *vif,
+                                 u32 flags)
 {
        struct iwl_beacon_filter_cmd cmd = {};
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -720,7 +830,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
            vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
-       ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+       ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
 
        if (!ret)
                mvmvif->bf_data.bf_enabled = false;
@@ -728,23 +838,89 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
        return ret;
 }
 
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
-                                struct ieee80211_vif *vif)
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+                                  struct ieee80211_vif *vif,
+                                  bool enable, u32 flags)
 {
+       int ret;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mac_power_cmd cmd = {};
 
-       if (!mvmvif->bf_data.bf_enabled)
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
-       return iwl_mvm_enable_beacon_filter(mvm, vif);
-}
+       if (!vif->bss_conf.assoc)
+               return 0;
 
-const struct iwl_mvm_power_ops pm_mac_ops = {
-       .power_update_mode = iwl_mvm_power_mac_update_mode,
-       .power_update_device_mode = iwl_mvm_power_update_device,
-       .power_disable = iwl_mvm_power_mac_disable,
-       .power_update_binding = _iwl_mvm_power_update_binding,
+       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+       if (enable) {
+               /* configure skip over dtim up to 300 msec */
+               int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+               int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+               if (WARN_ON(!dtimper_msec))
+                       return 0;
+
+               cmd.flags |=
+                       cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd.skip_dtim_periods = 300 / dtimper_msec;
+       }
+       iwl_mvm_power_log(mvm, &cmd);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
-       .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+       memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
 #endif
-};
+       ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
+                                  sizeof(cmd), &cmd);
+       if (ret)
+               return ret;
+
+       /* configure beacon filtering */
+       if (mvmvif != mvm->bf_allowed_vif)
+               return 0;
+
+       if (enable) {
+               struct iwl_beacon_filter_cmd cmd_bf = {
+                       IWL_BF_CMD_CONFIG_D0I3,
+                       .bf_enable_beacon_filter = cpu_to_le32(1),
+               };
+               ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+                                                   flags, true);
+       } else {
+               if (mvmvif->bf_data.bf_enabled)
+                       ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+               else
+                       ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+       }
+
+       return ret;
+}
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif,
+                                bool force,
+                                u32 flags)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (mvmvif != mvm->bf_allowed_vif)
+               return 0;
+
+       if (!mvmvif->bf_data.bf_enabled) {
+               /* disable beacon filtering explicitly if force is true */
+               if (force)
+                       return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+               return 0;
+       }
+
+       return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+}
+
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
+{
+       struct iwl_powertable_cmd cmd = {
+               .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
+       };
+
+       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+                                   sizeof(cmd), &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
deleted file mode 100644 (file)
index ef712ae..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- *  Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *  * Neither the name Intel Corporation nor the names of its
- *    contributors may be used to endorse or promote products derived
- *    from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-debug.h"
-#include "mvm.h"
-#include "iwl-modparams.h"
-#include "fw-api-power.h"
-
-#define POWER_KEEP_ALIVE_PERIOD_SEC    25
-
-static void iwl_mvm_power_log(struct iwl_mvm *mvm,
-                             struct iwl_powertable_cmd *cmd)
-{
-       IWL_DEBUG_POWER(mvm,
-                       "Sending power table command for power level %d, flags = 0x%X\n",
-                       iwlmvm_mod_params.power_scheme,
-                       le16_to_cpu(cmd->flags));
-       IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
-       if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
-               IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
-                               le32_to_cpu(cmd->rx_data_timeout));
-               IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
-                               le32_to_cpu(cmd->tx_data_timeout));
-               if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
-                       IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
-                                       le32_to_cpu(cmd->skip_dtim_periods));
-               if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
-                       IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
-                                       le32_to_cpu(cmd->lprx_rssi_threshold));
-       }
-}
-
-static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
-                                   struct ieee80211_vif *vif,
-                                   struct iwl_powertable_cmd *cmd)
-{
-       struct ieee80211_hw *hw = mvm->hw;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       struct ieee80211_channel *chan;
-       int dtimper, dtimper_msec;
-       int keep_alive;
-       bool radar_detect = false;
-       struct iwl_mvm_vif *mvmvif __maybe_unused =
-               iwl_mvm_vif_from_mac80211(vif);
-
-       /*
-        * Regardless of power management state the driver must set
-        * keep alive period. FW will use it for sending keep alive NDPs
-        * immediately after association.
-        */
-       cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
-
-       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
-               return;
-
-       cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-       if (!vif->bss_conf.assoc)
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
-           mvmvif->dbgfs_pm.disable_power_off)
-               cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
-       if (!vif->bss_conf.ps)
-               return;
-
-       cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
-       if (vif->bss_conf.beacon_rate &&
-           (vif->bss_conf.beacon_rate->bitrate == 10 ||
-            vif->bss_conf.beacon_rate->bitrate == 60)) {
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
-               cmd->lprx_rssi_threshold =
-                       cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
-       }
-
-       dtimper = hw->conf.ps_dtim_period ?: 1;
-
-       /* Check if radar detection is required on current channel */
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       WARN_ON(!chanctx_conf);
-       if (chanctx_conf) {
-               chan = chanctx_conf->def.chan;
-               radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
-       }
-       rcu_read_unlock();
-
-       /* Check skip over DTIM conditions */
-       if (!radar_detect && (dtimper <= 10) &&
-           (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
-            mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-               cmd->skip_dtim_periods = cpu_to_le32(3);
-       }
-
-       /* Check that keep alive period is at least 3 * DTIM */
-       dtimper_msec = dtimper * vif->bss_conf.beacon_int;
-       keep_alive = max_t(int, 3 * dtimper_msec,
-                          MSEC_PER_SEC * cmd->keep_alive_seconds);
-       keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
-       cmd->keep_alive_seconds = keep_alive;
-
-       if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
-               cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
-               cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
-       } else {
-               cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
-               cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
-       }
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
-               cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
-               if (mvmvif->dbgfs_pm.skip_over_dtim)
-                       cmd->flags |=
-                               cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-               else
-                       cmd->flags &=
-                               cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
-       }
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
-               cmd->rx_data_timeout =
-                       cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
-               cmd->tx_data_timeout =
-                       cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
-               cmd->skip_dtim_periods =
-                       cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
-               if (mvmvif->dbgfs_pm.lprx_ena)
-                       cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
-               else
-                       cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
-       }
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
-               cmd->lprx_rssi_threshold =
-                       cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
-#endif /* CONFIG_IWLWIFI_DEBUGFS */
-}
-
-static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
-                                           struct ieee80211_vif *vif)
-{
-       int ret;
-       bool ba_enable;
-       struct iwl_powertable_cmd cmd = {};
-
-       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
-               return 0;
-
-       /*
-        * TODO: The following vif_count verification is temporary condition.
-        * Avoid power mode update if more than one interface is currently
-        * active. Remove this condition when FW will support power management
-        * on multiple MACs.
-        */
-       IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
-                       mvm->vif_count);
-       if (mvm->vif_count > 1)
-               return 0;
-
-       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-       iwl_mvm_power_log(mvm, &cmd);
-
-       ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
-                                  sizeof(cmd), &cmd);
-       if (ret)
-               return ret;
-
-       ba_enable = !!(cmd.flags &
-                      cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
-       return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
-                                       struct ieee80211_vif *vif)
-{
-       struct iwl_powertable_cmd cmd = {};
-       struct iwl_mvm_vif *mvmvif __maybe_unused =
-               iwl_mvm_vif_from_mac80211(vif);
-
-       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
-               return 0;
-
-       if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
-               cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
-           mvmvif->dbgfs_pm.disable_power_off)
-               cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
-       iwl_mvm_power_log(mvm, &cmd);
-
-       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
-                                   sizeof(cmd), &cmd);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
-                                          struct ieee80211_vif *vif, char *buf,
-                                          int bufsz)
-{
-       struct iwl_powertable_cmd cmd = {};
-       int pos = 0;
-
-       iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
-       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
-                        (cmd.flags &
-                        cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
-                        0 : 1);
-       pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
-                        le32_to_cpu(cmd.skip_dtim_periods));
-       pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
-                        iwlmvm_mod_params.power_scheme);
-       pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
-                        le16_to_cpu(cmd.flags));
-       pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
-                        cmd.keep_alive_seconds);
-
-       if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
-               pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
-                                (cmd.flags &
-                                cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
-                                1 : 0);
-               pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
-                                le32_to_cpu(cmd.rx_data_timeout));
-               pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
-                                le32_to_cpu(cmd.tx_data_timeout));
-               if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
-                       pos += scnprintf(buf+pos, bufsz-pos,
-                                        "lprx_rssi_threshold = %d\n",
-                                        le32_to_cpu(cmd.lprx_rssi_threshold));
-       }
-       return pos;
-}
-#endif
-
-const struct iwl_mvm_power_ops pm_legacy_ops = {
-       .power_update_mode = iwl_mvm_power_legacy_update_mode,
-       .power_disable = iwl_mvm_power_legacy_disable,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
-#endif
-};
index ce5db6c4ef7e60e3ad557cd0e2d773738419d587..35e86e06dffda924f9f4ba75d36717ac311d3602 100644 (file)
 #include "fw-api.h"
 #include "mvm.h"
 
+#define QUOTA_100      IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
 struct iwl_mvm_quota_iterator_data {
        int n_interfaces[MAX_BINDINGS];
        int colors[MAX_BINDINGS];
+       int low_latency[MAX_BINDINGS];
+       int n_low_latency_bindings;
        struct ieee80211_vif *new_vif;
 };
 
@@ -107,22 +112,29 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
                if (vif->bss_conf.assoc)
-                       data->n_interfaces[id]++;
-               break;
+                       break;
+               return;
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_ADHOC:
                if (mvmvif->ap_ibss_active)
-                       data->n_interfaces[id]++;
-               break;
+                       break;
+               return;
        case NL80211_IFTYPE_MONITOR:
                if (mvmvif->monitor_active)
-                       data->n_interfaces[id]++;
-               break;
+                       break;
+               return;
        case NL80211_IFTYPE_P2P_DEVICE:
-               break;
+               return;
        default:
                WARN_ON_ONCE(1);
-               break;
+               return;
+       }
+
+       data->n_interfaces[id]++;
+
+       if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+               data->n_low_latency_bindings++;
+               data->low_latency[id] = true;
        }
 }
 
@@ -162,7 +174,7 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
 int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
 {
        struct iwl_time_quota_cmd cmd = {};
-       int i, idx, ret, num_active_macs, quota, quota_rem;
+       int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
        struct iwl_mvm_quota_iterator_data data = {
                .n_interfaces = {},
                .colors = { -1, -1, -1, -1 },
@@ -197,11 +209,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
                num_active_macs += data.n_interfaces[i];
        }
 
-       quota = 0;
-       quota_rem = 0;
-       if (num_active_macs) {
-               quota = IWL_MVM_MAX_QUOTA / num_active_macs;
-               quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
+       n_non_lowlat = num_active_macs;
+
+       if (data.n_low_latency_bindings == 1) {
+               for (i = 0; i < MAX_BINDINGS; i++) {
+                       if (data.low_latency[i]) {
+                               n_non_lowlat -= data.n_interfaces[i];
+                               break;
+                       }
+               }
+       }
+
+       if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+               /*
+                * Reserve quota for the low latency binding in case that
+                * there are several data bindings but only a single
+                * low latency one. Split the rest of the quota equally
+                * between the other data interfaces.
+                */
+               quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+               quota_rem = QUOTA_100 - n_non_lowlat * quota -
+                           QUOTA_LOWLAT_MIN;
+       } else if (num_active_macs) {
+               /*
+                * There are 0 or more than 1 low latency bindings, or all the
+                * data interfaces belong to the single low latency binding.
+                * Split the quota equally between the data interfaces.
+                */
+               quota = QUOTA_100 / num_active_macs;
+               quota_rem = QUOTA_100 % num_active_macs;
+       } else {
+               /* values don't really matter - won't be used */
+               quota = 0;
+               quota_rem = 0;
        }
 
        for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -211,19 +251,37 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
                cmd.quotas[idx].id_and_color =
                        cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
 
-               if (data.n_interfaces[i] <= 0) {
+               if (data.n_interfaces[i] <= 0)
                        cmd.quotas[idx].quota = cpu_to_le32(0);
-                       cmd.quotas[idx].max_duration = cpu_to_le32(0);
-               } else {
+               else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+                        data.low_latency[i])
+                       /*
+                        * There is more than one binding, but only one of the
+                        * bindings is in low latency. For this case, allocate
+                        * the minimal required quota for the low latency
+                        * binding.
+                        */
+                       cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+               else
                        cmd.quotas[idx].quota =
                                cpu_to_le32(quota * data.n_interfaces[i]);
-                       cmd.quotas[idx].max_duration = cpu_to_le32(0);
-               }
+
+               WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
+                         "Binding=%d, quota=%u > max=%u\n",
+                         idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
+
+               cmd.quotas[idx].max_duration = cpu_to_le32(0);
+
                idx++;
        }
 
-       /* Give the remainder of the session to the first binding */
-       le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+       /* Give the remainder of the session to the first data binding */
+       for (i = 0; i < MAX_BINDINGS; i++) {
+               if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
+                       le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
+                       break;
+               }
+       }
 
        iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
 
index 6abf74e1351f0d97ab8a5161e4d2d81199f67c1b..568abd61b14fb5ebdd83d0e6f3ade7fb2bd054d3 100644 (file)
@@ -166,7 +166,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (sta->smps_mode == IEEE80211_SMPS_STATIC)
                return false;
 
-       if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 2)
+       if (num_of_ant(mvm->fw->valid_tx_ant) < 2)
                return false;
 
        if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
@@ -211,9 +211,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .next_columns = {
                        RS_COLUMN_LEGACY_ANT_B,
                        RS_COLUMN_SISO_ANT_A,
+                       RS_COLUMN_SISO_ANT_B,
                        RS_COLUMN_MIMO2,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
+                       RS_COLUMN_MIMO2_SGI,
                },
        },
        [RS_COLUMN_LEGACY_ANT_B] = {
@@ -221,10 +221,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .ant = ANT_B,
                .next_columns = {
                        RS_COLUMN_LEGACY_ANT_A,
+                       RS_COLUMN_SISO_ANT_A,
                        RS_COLUMN_SISO_ANT_B,
                        RS_COLUMN_MIMO2,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
+                       RS_COLUMN_MIMO2_SGI,
                },
        },
        [RS_COLUMN_SISO_ANT_A] = {
@@ -234,8 +234,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_B,
                        RS_COLUMN_MIMO2,
                        RS_COLUMN_SISO_ANT_A_SGI,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
+                       RS_COLUMN_SISO_ANT_B_SGI,
+                       RS_COLUMN_MIMO2_SGI,
                },
                .checks = {
                        rs_siso_allow,
@@ -248,8 +248,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_A,
                        RS_COLUMN_MIMO2,
                        RS_COLUMN_SISO_ANT_B_SGI,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
+                       RS_COLUMN_SISO_ANT_A_SGI,
+                       RS_COLUMN_MIMO2_SGI,
                },
                .checks = {
                        rs_siso_allow,
@@ -263,8 +263,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_B_SGI,
                        RS_COLUMN_MIMO2_SGI,
                        RS_COLUMN_SISO_ANT_A,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
+                       RS_COLUMN_SISO_ANT_B,
+                       RS_COLUMN_MIMO2,
                },
                .checks = {
                        rs_siso_allow,
@@ -279,8 +279,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
                        RS_COLUMN_SISO_ANT_A_SGI,
                        RS_COLUMN_MIMO2_SGI,
                        RS_COLUMN_SISO_ANT_B,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
+                       RS_COLUMN_SISO_ANT_A,
+                       RS_COLUMN_MIMO2,
                },
                .checks = {
                        rs_siso_allow,
@@ -292,10 +292,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .ant = ANT_AB,
                .next_columns = {
                        RS_COLUMN_SISO_ANT_A,
+                       RS_COLUMN_SISO_ANT_B,
+                       RS_COLUMN_SISO_ANT_A_SGI,
+                       RS_COLUMN_SISO_ANT_B_SGI,
                        RS_COLUMN_MIMO2_SGI,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_mimo_allow,
@@ -307,10 +307,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
                .sgi = true,
                .next_columns = {
                        RS_COLUMN_SISO_ANT_A_SGI,
+                       RS_COLUMN_SISO_ANT_B_SGI,
+                       RS_COLUMN_SISO_ANT_A,
+                       RS_COLUMN_SISO_ANT_B,
                        RS_COLUMN_MIMO2,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
-                       RS_COLUMN_INVALID,
                },
                .checks = {
                        rs_mimo_allow,
@@ -380,49 +380,49 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
  * (2.4 GHz) band.
  */
 
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
        7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
 };
 
 /* Expected TpT tables. 4 indexes:
  * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
  */
-static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202, 216, 0},
        {0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210, 225, 0},
        {0, 0, 0, 0, 49, 0,  97, 145, 192, 285, 375, 420, 464, 551, 0},
        {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
 };
 
-static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250,  257,  269,  275},
        {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257,  264,  275,  280},
        {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828,  911, 1070, 1173},
        {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
 };
 
-static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 130, 0, 191, 223, 244,  273,  288,  294,  298,  305,  308},
        {0, 0, 0, 0, 138, 0, 200, 231, 251,  279,  293,  298,  302,  308,  312},
        {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
        {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
 };
 
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250,  261, 0},
        {0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256,  267, 0},
        {0, 0, 0, 0,  98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
        {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
 };
 
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 123, 0, 182, 214, 235,  264,  279,  285,  289,  296,  300},
        {0, 0, 0, 0, 131, 0, 191, 222, 242,  270,  284,  289,  293,  300,  303},
        {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
        {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
 };
 
-static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
        {0, 0, 0, 0, 182, 0, 240,  264,  278,  299,  308,  311,  313,  317,  319},
        {0, 0, 0, 0, 190, 0, 247,  269,  282,  302,  310,  313,  315,  319,  320},
        {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
@@ -503,6 +503,14 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
        window->average_tpt = IWL_INVALID_VALUE;
 }
 
+static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl)
+{
+       int i;
+
+       for (i = 0; i < IWL_RATE_COUNT; i++)
+               rs_rate_scale_clear_window(&tbl->win[i]);
+}
+
 static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
 {
        return (ant_type & valid_antenna) == ant_type;
@@ -566,19 +574,13 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
  * at this rate.  window->data contains the bitmask of successful
  * packets.
  */
-static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
-                             int scale_index, int attempts, int successes)
+static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+                              int scale_index, int attempts, int successes,
+                              struct iwl_rate_scale_data *window)
 {
-       struct iwl_rate_scale_data *window = NULL;
        static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
        s32 fail_count, tpt;
 
-       if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
-               return -EINVAL;
-
-       /* Select window for current tx bit rate */
-       window = &(tbl->win[scale_index]);
-
        /* Get expected throughput */
        tpt = get_expected_tpt(tbl, scale_index);
 
@@ -636,6 +638,21 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
        return 0;
 }
 
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+                             int scale_index, int attempts, int successes)
+{
+       struct iwl_rate_scale_data *window = NULL;
+
+       if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+               return -EINVAL;
+
+       /* Select window for current tx bit rate */
+       window = &(tbl->win[scale_index]);
+
+       return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+                                  window);
+}
+
 /* Convert rs_rate object into ucode rate bitmask */
 static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
                                  struct rs_rate *rate)
@@ -905,7 +922,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
 
                rate->bw = RATE_MCS_CHAN_WIDTH_20;
 
-               WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+               WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
                             rate->index > IWL_RATE_MCS_9_INDEX);
 
                rate->index = rs_ht_to_legacy[rate->index];
@@ -917,7 +934,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
 
 
        if (num_of_ant(rate->ant) > 1)
-               rate->ant = first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+               rate->ant = first_antenna(mvm->fw->valid_tx_ant);
 
        /* Relevant in both switching to SISO or Legacy */
        rate->sgi = false;
@@ -1169,12 +1186,12 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
        lq_sta->visited_columns = 0;
 }
 
-static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
                                      const struct rs_tx_column *column,
                                      u32 bw)
 {
        /* Used to choose among HT tables */
-       s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+       const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
 
        if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
                         column->mode != RS_SISO &&
@@ -1262,9 +1279,8 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
            &(lq_sta->lq_info[lq_sta->active_tbl]);
        s32 active_sr = active_tbl->win[index].success_ratio;
        s32 active_tpt = active_tbl->expected_tpt[index];
-
        /* expected "search" throughput */
-       s32 *tpt_tbl = tbl->expected_tpt;
+       const u16 *tpt_tbl = tbl->expected_tpt;
 
        s32 new_rate, high, low, start_hi;
        u16 high_low;
@@ -1362,7 +1378,6 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
 static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
 {
        struct iwl_scale_tbl_info *tbl;
-       int i;
        int active_tbl;
        int flush_interval_passed = 0;
        struct iwl_mvm *mvm;
@@ -1423,9 +1438,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
 
                                IWL_DEBUG_RATE(mvm,
                                               "LQ: stay in table clear win\n");
-                               for (i = 0; i < IWL_RATE_COUNT; i++)
-                                       rs_rate_scale_clear_window(
-                                               &(tbl->win[i]));
+                               rs_rate_scale_clear_tbl_windows(tbl);
                        }
                }
 
@@ -1434,8 +1447,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
                 * "search" table). */
                if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
                        IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
-                       for (i = 0; i < IWL_RATE_COUNT; i++)
-                               rs_rate_scale_clear_window(&(tbl->win[i]));
+                       rs_rate_scale_clear_tbl_windows(tbl);
                }
        }
 }
@@ -1478,8 +1490,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
        const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
        const struct rs_tx_column *next_col;
        allow_column_func_t allow_func;
-       u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
-       s32 *expected_tpt_tbl;
+       u8 valid_ants = mvm->fw->valid_tx_ant;
+       const u16 *expected_tpt_tbl;
        s32 tpt, max_expected_tpt;
 
        for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
@@ -1725,7 +1737,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
        int low = IWL_RATE_INVALID;
        int high = IWL_RATE_INVALID;
        int index;
-       int i;
        struct iwl_rate_scale_data *window = NULL;
        int current_tpt = IWL_INVALID_VALUE;
        int low_tpt = IWL_INVALID_VALUE;
@@ -2010,8 +2021,7 @@ lq_update:
                if (lq_sta->search_better_tbl) {
                        /* Access the "search" table, clear its history. */
                        tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-                       for (i = 0; i < IWL_RATE_COUNT; i++)
-                               rs_rate_scale_clear_window(&(tbl->win[i]));
+                       rs_rate_scale_clear_tbl_windows(tbl);
 
                        /* Use new "search" start rate */
                        index = tbl->rate.index;
@@ -2090,7 +2100,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
 
        i = lq_sta->last_txrate_idx;
 
-       valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
+       valid_tx_ant = mvm->fw->valid_tx_ant;
 
        if (!lq_sta->search_better_tbl)
                active_tbl = lq_sta->active_tbl;
@@ -2241,6 +2251,73 @@ static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
        }
 }
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm,
+                                     struct iwl_mvm_frame_stats *stats)
+{
+       spin_lock_bh(&mvm->drv_stats_lock);
+       memset(stats, 0, sizeof(*stats));
+       spin_unlock_bh(&mvm->drv_stats_lock);
+}
+
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm,
+                               struct iwl_mvm_frame_stats *stats,
+                               u32 rate, bool agg)
+{
+       u8 nss = 0, mcs = 0;
+
+       spin_lock(&mvm->drv_stats_lock);
+
+       if (agg)
+               stats->agg_frames++;
+
+       stats->success_frames++;
+
+       switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+       case RATE_MCS_CHAN_WIDTH_20:
+               stats->bw_20_frames++;
+               break;
+       case RATE_MCS_CHAN_WIDTH_40:
+               stats->bw_40_frames++;
+               break;
+       case RATE_MCS_CHAN_WIDTH_80:
+               stats->bw_80_frames++;
+               break;
+       default:
+               WARN_ONCE(1, "bad BW. rate 0x%x", rate);
+       }
+
+       if (rate & RATE_MCS_HT_MSK) {
+               stats->ht_frames++;
+               mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
+               nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
+       } else if (rate & RATE_MCS_VHT_MSK) {
+               stats->vht_frames++;
+               mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+               nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
+                      RATE_VHT_MCS_NSS_POS) + 1;
+       } else {
+               stats->legacy_frames++;
+       }
+
+       if (nss == 1)
+               stats->siso_frames++;
+       else if (nss == 2)
+               stats->mimo2_frames++;
+
+       if (rate & RATE_MCS_SGI_MSK)
+               stats->sgi_frames++;
+       else
+               stats->ngi_frames++;
+
+       stats->last_rates[stats->last_frame_idx] = rate;
+       stats->last_frame_idx = (stats->last_frame_idx + 1) %
+               ARRAY_SIZE(stats->last_rates);
+
+       spin_unlock(&mvm->drv_stats_lock);
+}
+#endif
+
 /*
  * Called after adding a new station to initialize rate scaling
  */
@@ -2265,8 +2342,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        lq_sta->lq.sta_id = sta_priv->sta_id;
 
        for (j = 0; j < LQ_SIZE; j++)
-               for (i = 0; i < IWL_RATE_COUNT; i++)
-                       rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+               rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]);
 
        lq_sta->flush_timer = 0;
 
@@ -2320,7 +2396,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        /* These values will be overridden later */
        lq_sta->lq.single_stream_ant_msk =
-               first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
+               first_antenna(mvm->fw->valid_tx_ant);
        lq_sta->lq.dual_stream_ant_msk = ANT_AB;
 
        /* as default allow aggregation for all tids */
@@ -2335,7 +2411,9 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 #ifdef CONFIG_MAC80211_DEBUGFS
        lq_sta->dbg_fixed_rate = 0;
 #endif
-
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
+#endif
        rs_initialize_lq(mvm, sta, lq_sta, band, init);
 }
 
@@ -2446,7 +2524,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
 
        memcpy(&rate, initial_rate, sizeof(rate));
 
-       valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
+       valid_tx_ant = mvm->fw->valid_tx_ant;
 
        if (is_siso(&rate)) {
                num_rates = RS_INITIAL_SISO_NUM_RATES;
@@ -2523,7 +2601,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
 
        if (sta)
                lq_cmd->agg_time_limit =
-                       cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
+                       cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
 }
 
 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2547,7 +2625,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
 }
 
 #ifdef CONFIG_MAC80211_DEBUGFS
-static int rs_pretty_print_rate(char *buf, const u32 rate)
+int rs_pretty_print_rate(char *buf, const u32 rate)
 {
 
        char *type, *bw;
@@ -2596,7 +2674,7 @@ static int rs_pretty_print_rate(char *buf, const u32 rate)
        return sprintf(buf, "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s\n",
                       type, rs_pretty_ant(ant), bw, mcs, nss,
                       (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
-                      (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+                      (rate & RATE_MCS_HT_STBC_MSK) ? "STBC " : "",
                       (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
                       (rate & RATE_MCS_BF_MSK) ? "BF " : "",
                       (rate & RATE_MCS_ZLF_MSK) ? "ZLF " : "");
@@ -2677,9 +2755,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
        desc += sprintf(buff+desc, "fixed rate 0x%X\n",
                        lq_sta->dbg_fixed_rate);
        desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
-           (iwl_fw_valid_tx_ant(mvm->fw) & ANT_A) ? "ANT_A," : "",
-           (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
-           (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
+           (mvm->fw->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+           (mvm->fw->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+           (mvm->fw->valid_tx_ant & ANT_C) ? "ANT_C" : "");
        desc += sprintf(buff+desc, "lq type %s\n",
                        (is_legacy(rate)) ? "legacy" :
                        is_vht(rate) ? "VHT" : "HT");
@@ -2815,8 +2893,8 @@ static void rs_rate_init_stub(void *mvm_r,
                              struct ieee80211_sta *sta, void *mvm_sta)
 {
 }
-static struct rate_control_ops rs_mvm_ops = {
-       .module = NULL,
+
+static const struct rate_control_ops rs_mvm_ops = {
        .name = RS_NAME,
        .tx_status = rs_tx_status,
        .get_rate = rs_get_rate,
index 7bc6404f6986f4b0babca6c5d8c17bc94343e0f9..3332b396011e3ce6866b9544068266506cb5a811 100644 (file)
@@ -277,7 +277,7 @@ enum rs_column {
 struct iwl_scale_tbl_info {
        struct rs_rate rate;
        enum rs_column column;
-       s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
+       const u16 *expected_tpt;        /* throughput metrics; expected_tpt_G, etc. */
        struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
 };
 
index a85b60f7e67e47e24c842f65f726794dd32dfddd..6061553a5e444956c7b5d626695a2950fb1f3fd1 100644 (file)
@@ -77,6 +77,15 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 
        memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
        mvm->ampdu_ref++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+               spin_lock(&mvm->drv_stats_lock);
+               mvm->drv_rx_stats.ampdu_count++;
+               spin_unlock(&mvm->drv_stats_lock);
+       }
+#endif
+
        return 0;
 }
 
@@ -129,22 +138,16 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
                              struct ieee80211_rx_status *rx_status)
 {
        int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
-       int rssi_all_band_a, rssi_all_band_b;
-       u32 agc_a, agc_b, max_agc;
+       u32 agc_a, agc_b;
        u32 val;
 
        val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
        agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
        agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
-       max_agc = max_t(u32, agc_a, agc_b);
 
        val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
        rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
        rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
-       rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
-                               IWL_OFDM_RSSI_ALLBAND_A_POS;
-       rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
-                               IWL_OFDM_RSSI_ALLBAND_B_POS;
 
        /*
         * dBm = rssi dB - agc dB - constant.
@@ -364,31 +367,43 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                rx_status.flag |= RX_FLAG_40MHZ;
                break;
        case RATE_MCS_CHAN_WIDTH_80:
-               rx_status.flag |= RX_FLAG_80MHZ;
+               rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
                break;
        case RATE_MCS_CHAN_WIDTH_160:
-               rx_status.flag |= RX_FLAG_160MHZ;
+               rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
                break;
        }
        if (rate_n_flags & RATE_MCS_SGI_MSK)
                rx_status.flag |= RX_FLAG_SHORT_GI;
        if (rate_n_flags & RATE_HT_MCS_GF_MSK)
                rx_status.flag |= RX_FLAG_HT_GF;
+       if (rate_n_flags & RATE_MCS_LDPC_MSK)
+               rx_status.flag |= RX_FLAG_LDPC;
        if (rate_n_flags & RATE_MCS_HT_MSK) {
+               u8 stbc = (rate_n_flags & RATE_MCS_HT_STBC_MSK) >>
+                               RATE_MCS_STBC_POS;
                rx_status.flag |= RX_FLAG_HT;
                rx_status.rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+               rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
        } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               u8 stbc = (rate_n_flags & RATE_MCS_VHT_STBC_MSK) >>
+                               RATE_MCS_STBC_POS;
                rx_status.vht_nss =
                        ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
                                                RATE_VHT_MCS_NSS_POS) + 1;
                rx_status.rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
                rx_status.flag |= RX_FLAG_VHT;
+               rx_status.flag |= stbc << RX_FLAG_STBC_SHIFT;
        } else {
                rx_status.rate_idx =
                        iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
                                                            rx_status.band);
        }
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       iwl_mvm_update_frame_stats(mvm, &mvm->drv_rx_stats, rate_n_flags,
+                                  rx_status.flag & RX_FLAG_AMPDU_DETAILS);
+#endif
        iwl_mvm_pass_packet_to_mac80211(mvm, hdr, len, ampdu_status,
                                        rxb, &rx_status);
        return 0;
index 742afc429c946c0c5defa31de5f7d720071d2485..c91dc8498852c46653cc43fddb57c382d3d7f3f0 100644 (file)
 
 #define IWL_PLCP_QUIET_THRESH 1
 #define IWL_ACTIVE_QUIET_TIME 10
-#define LONG_OUT_TIME_PERIOD 600
-#define SHORT_OUT_TIME_PERIOD 200
-#define SUSPEND_TIME_PERIOD 100
+
+struct iwl_mvm_scan_params {
+       u32 max_out_time;
+       u32 suspend_time;
+       bool passive_fragmented;
+       struct _dwell {
+               u16 passive;
+               u16 active;
+       } dwell[IEEE80211_NUM_BANDS];
+};
 
 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
 {
@@ -82,7 +89,7 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
        if (mvm->scan_rx_ant != ANT_NONE)
                rx_ant = mvm->scan_rx_ant;
        else
-               rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+               rx_ant = mvm->fw->valid_rx_ant;
        rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
        rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
        rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -90,24 +97,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
        return cpu_to_le16(rx_chain);
 }
 
-static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
-                                              u32 flags, bool is_assoc)
-{
-       if (!is_assoc)
-               return 0;
-       if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
-               return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
-       return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
-}
-
-static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
-                                              bool is_assoc)
-{
-       if (!is_assoc)
-               return 0;
-       return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
-}
-
 static inline __le32
 iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
 {
@@ -124,7 +113,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
        u32 tx_ant;
 
        mvm->scan_last_antenna_idx =
-               iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+               iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
                                     mvm->scan_last_antenna_idx);
        tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
 
@@ -181,15 +170,14 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
 
 static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
                                       struct cfg80211_scan_request *req,
-                                      bool basic_ssid)
+                                      bool basic_ssid,
+                                      struct iwl_mvm_scan_params *params)
 {
-       u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
-       u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
-                                                   req->n_ssids);
        struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
                (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
        int i;
        int type = BIT(req->n_ssids) - 1;
+       enum ieee80211_band band = req->channels[0]->band;
 
        if (!basic_ssid)
                type |= BIT(req->n_ssids);
@@ -199,8 +187,8 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
                chan->type = cpu_to_le32(type);
                if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
                        chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
-               chan->active_dwell = cpu_to_le16(active_dwell);
-               chan->passive_dwell = cpu_to_le16(passive_dwell);
+               chan->active_dwell = cpu_to_le16(params->dwell[band].active);
+               chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
                chan->iteration_count = cpu_to_le16(1);
                chan++;
        }
@@ -267,13 +255,76 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
        return (u16)len;
 }
 
-static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac,
-                                      struct ieee80211_vif *vif)
+static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
+                                           struct ieee80211_vif *vif)
 {
-       bool *is_assoc = data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       bool *global_bound = data;
+
+       if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
+               *global_bound = true;
+}
+
+static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
+                                    struct ieee80211_vif *vif,
+                                    int n_ssids,
+                                    struct iwl_mvm_scan_params *params)
+{
+       bool global_bound = false;
+       enum ieee80211_band band;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                           IEEE80211_IFACE_ITER_NORMAL,
+                                           iwl_mvm_scan_condition_iterator,
+                                           &global_bound);
+       /*
+        * Under low latency traffic passive scan is fragmented meaning
+        * that dwell on a particular channel will be fragmented. Each fragment
+        * dwell time is 20ms and fragments period is 105ms. Skipping to next
+        * channel will be delayed by the same period - 105ms. So suspend_time
+        * parameter describing both fragments and channels skipping periods is
+        * set to 105ms. This value is chosen so that overall passive scan
+        * duration will not be too long. Max_out_time in this case is set to
+        * 70ms, so for active scanning operating channel will be left for 70ms
+        * while for passive still for 20ms (fragment dwell).
+        */
+       if (global_bound) {
+               if (!iwl_mvm_low_latency(mvm)) {
+                       params->suspend_time = ieee80211_tu_to_usec(100);
+                       params->max_out_time = ieee80211_tu_to_usec(600);
+               } else {
+                       params->suspend_time = ieee80211_tu_to_usec(105);
+                       /* P2P doesn't support fragmented passive scan, so
+                        * configure max_out_time to be at least longest dwell
+                        * time for passive scan.
+                        */
+                       if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
+                               params->max_out_time = ieee80211_tu_to_usec(70);
+                               params->passive_fragmented = true;
+                       } else {
+                               u32 passive_dwell;
 
-       if (vif->bss_conf.assoc)
-               *is_assoc = true;
+                               /*
+                                * Use band G so that passive channel dwell time
+                                * will be assigned with maximum value.
+                                */
+                               band = IEEE80211_BAND_2GHZ;
+                               passive_dwell = iwl_mvm_get_passive_dwell(band);
+                               params->max_out_time =
+                                       ieee80211_tu_to_usec(passive_dwell);
+                       }
+               }
+       }
+
+       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+               if (params->passive_fragmented)
+                       params->dwell[band].passive = 20;
+               else
+                       params->dwell[band].passive =
+                               iwl_mvm_get_passive_dwell(band);
+               params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+                                                                     n_ssids);
+       }
 }
 
 int iwl_mvm_scan_request(struct iwl_mvm *mvm,
@@ -288,13 +339,13 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
        };
        struct iwl_scan_cmd *cmd = mvm->scan_cmd;
-       bool is_assoc = false;
        int ret;
        u32 status;
        int ssid_len = 0;
        u8 *ssid = NULL;
        bool basic_ssid = !(mvm->fw->ucode_capa.flags &
                           IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
+       struct iwl_mvm_scan_params params = {};
 
        lockdep_assert_held(&mvm->mutex);
        BUG_ON(mvm->scan_cmd == NULL);
@@ -304,17 +355,18 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
        memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
               mvm->fw->ucode_capa.max_probe_length +
               (MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
-       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
-                                           IEEE80211_IFACE_ITER_NORMAL,
-                                           iwl_mvm_vif_assoc_iterator,
-                                           &is_assoc);
+
        cmd->channel_count = (u8)req->n_channels;
        cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
        cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
        cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
-       cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
-                                                     is_assoc);
-       cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
+
+       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+       cmd->max_out_time = cpu_to_le32(params.max_out_time);
+       cmd->suspend_time = cpu_to_le32(params.suspend_time);
+       if (params.passive_fragmented)
+               cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
+
        cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
        cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
                                        MAC_FILTER_IN_BEACON);
@@ -360,7 +412,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                            req->ie, req->ie_len,
                            mvm->fw->ucode_capa.max_probe_length));
 
-       iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
+       iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
 
        cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
                le16_to_cpu(cmd->tx_cmd.len) +
@@ -402,12 +454,17 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scan_complete_notif *notif = (void *)pkt->data;
 
+       lockdep_assert_held(&mvm->mutex);
+
        IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
                       notif->status, notif->scanned_channels);
 
-       mvm->scan_status = IWL_MVM_SCAN_NONE;
+       if (mvm->scan_status == IWL_MVM_SCAN_OS)
+               mvm->scan_status = IWL_MVM_SCAN_NONE;
        ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
 
+       iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
        return 0;
 }
 
@@ -464,7 +521,7 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
        };
 }
 
-void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
+int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_scan_abort;
        static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
@@ -472,12 +529,13 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
        int ret;
 
        if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-               return;
+               return 0;
 
        if (iwl_mvm_is_radio_killed(mvm)) {
                ieee80211_scan_completed(mvm->hw, true);
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
                mvm->scan_status = IWL_MVM_SCAN_NONE;
-               return;
+               return 0;
        }
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
@@ -488,18 +546,15 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
        ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
-               /* mac80211's state will be cleaned in the fw_restart flow */
+               /* mac80211's state will be cleaned in the nic_restart flow */
                goto out_remove_notif;
        }
 
-       ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ);
-       if (ret)
-               IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
-
-       return;
+       return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
 
 out_remove_notif:
        iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
+       return ret;
 }
 
 int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
@@ -509,12 +564,18 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
 
+       /* scan status must be locked for proper checking */
+       lockdep_assert_held(&mvm->mutex);
+
        IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
                       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                       "completed" : "aborted");
 
-       mvm->scan_status = IWL_MVM_SCAN_NONE;
-       ieee80211_sched_scan_stopped(mvm->hw);
+       /* only call mac80211 completion if the stop was initiated by FW */
+       if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
+               mvm->scan_status = IWL_MVM_SCAN_NONE;
+               ieee80211_sched_scan_stopped(mvm->hw);
+       }
 
        return 0;
 }
@@ -545,14 +606,9 @@ static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
 static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
                               struct ieee80211_vif *vif,
                               struct cfg80211_sched_scan_request *req,
-                              struct iwl_scan_offload_cmd *scan)
+                              struct iwl_scan_offload_cmd *scan,
+                              struct iwl_mvm_scan_params *params)
 {
-       bool is_assoc = false;
-
-       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
-                                           IEEE80211_IFACE_ITER_NORMAL,
-                                           iwl_mvm_vif_assoc_iterator,
-                                           &is_assoc);
        scan->channel_count =
                mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
                mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
@@ -560,13 +616,17 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
        scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
        scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
        scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
-       scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
-                                                      is_assoc);
-       scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
+
+       scan->max_out_time = cpu_to_le32(params->max_out_time);
+       scan->suspend_time = cpu_to_le32(params->suspend_time);
+
        scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
                                          MAC_FILTER_IN_BEACON);
        scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
        scan->rep_count = cpu_to_le32(1);
+
+       if (params->passive_fragmented)
+               scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
 }
 
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -596,6 +656,9 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
         * config match list.
         */
        for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+               /* skip empty SSID matchsets */
+               if (!req->match_sets[i].ssid.ssid_len)
+                       continue;
                scan->direct_scan[i].id = WLAN_EID_SSID;
                scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
                memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
@@ -628,12 +691,11 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
                                  struct iwl_scan_channel_cfg *channels,
                                  enum ieee80211_band band,
                                  int *head, int *tail,
-                                 u32 ssid_bitmap)
+                                 u32 ssid_bitmap,
+                                 struct iwl_mvm_scan_params *params)
 {
        struct ieee80211_supported_band *s_band;
-       int n_probes = req->n_ssids;
        int n_channels = req->n_channels;
-       u8 active_dwell, passive_dwell;
        int i, j, index = 0;
        bool partial;
 
@@ -643,8 +705,6 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
         * to scan. So add requested channels to head of the list and others to
         * the end.
        */
-       active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
-       passive_dwell = iwl_mvm_get_passive_dwell(band);
        s_band = &mvm->nvm_data->bands[band];
 
        for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
@@ -668,8 +728,8 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
                channels->channel_number[index] =
                        cpu_to_le16(ieee80211_frequency_to_channel(
                                        s_band->channels[i].center_freq));
-               channels->dwell_time[index][0] = active_dwell;
-               channels->dwell_time[index][1] = passive_dwell;
+               channels->dwell_time[index][0] = params->dwell[band].active;
+               channels->dwell_time[index][1] = params->dwell[band].passive;
 
                channels->iter_count[index] = cpu_to_le16(1);
                channels->iter_interval[index] = 0;
@@ -698,7 +758,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                              struct cfg80211_sched_scan_request *req,
                              struct ieee80211_sched_scan_ies *ies)
 {
-       int supported_bands = 0;
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
@@ -712,22 +771,19 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                .id = SCAN_OFFLOAD_CONFIG_CMD,
                .flags = CMD_SYNC,
        };
+       struct iwl_mvm_scan_params params = {};
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (band_2ghz)
-               supported_bands++;
-       if (band_5ghz)
-               supported_bands++;
-
        cmd_len = sizeof(struct iwl_scan_offload_cfg) +
-                               supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+                 2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
 
        scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
        if (!scan_cfg)
                return -ENOMEM;
 
-       iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
+       iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
        scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
 
        iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
@@ -739,7 +795,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                                              scan_cfg->data);
                iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
                                      IEEE80211_BAND_2GHZ, &head, &tail,
-                                     ssid_bitmap);
+                                     ssid_bitmap, &params);
        }
        if (band_5ghz) {
                iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
@@ -749,7 +805,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                                                SCAN_OFFLOAD_PROBE_REQ_SIZE);
                iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
                                      IEEE80211_BAND_5GHZ, &head, &tail,
-                                     ssid_bitmap);
+                                     ssid_bitmap, &params);
        }
 
        cmd.data[0] = scan_cfg;
@@ -889,26 +945,49 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
                 * microcode has notified us that a scan is completed.
                 */
                IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
-               ret = -EIO;
+               ret = -ENOENT;
        }
 
        return ret;
 }
 
-void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
 {
        int ret;
+       struct iwl_notification_wait wait_scan_done;
+       static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
 
        lockdep_assert_held(&mvm->mutex);
 
        if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
                IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
-               return;
+               return 0;
        }
 
+       iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+                                  scan_done_notif,
+                                  ARRAY_SIZE(scan_done_notif),
+                                  NULL, NULL);
+
        ret = iwl_mvm_send_sched_scan_abort(mvm);
-       if (ret)
+       if (ret) {
                IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
-       else
-               IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+               iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+               return ret;
+       }
+
+       IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+
+       ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
+       if (ret)
+               return ret;
+
+       /*
+        * Clear the scan status so the next scan requests will succeed. This
+        * also ensures the Rx handler doesn't do anything, as the scan was
+        * stopped from above.
+        */
+       mvm->scan_status = IWL_MVM_SCAN_NONE;
+
+       return 0;
 }
index 3397f59cd4e4deb532be48e31c2906b45101f6a5..f339ef8842508774e2ff7d51c9bdce069e014a1b 100644 (file)
 #include "sta.h"
 #include "rs.h"
 
-static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
                                         struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
 {
        memset(cmd_v5, 0, sizeof(*cmd_v5));
 
-       cmd_v5->add_modify = cmd_v6->add_modify;
-       cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
-       cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
-       memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
-       cmd_v5->sta_id = cmd_v6->sta_id;
-       cmd_v5->modify_mask = cmd_v6->modify_mask;
-       cmd_v5->station_flags = cmd_v6->station_flags;
-       cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
-       cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
-       cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
-       cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
-       cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
-       cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
-       cmd_v5->assoc_id = cmd_v6->assoc_id;
-       cmd_v5->beamform_flags = cmd_v6->beamform_flags;
-       cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+       cmd_v5->add_modify = cmd_v7->add_modify;
+       cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
+       cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
+       memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
+       cmd_v5->sta_id = cmd_v7->sta_id;
+       cmd_v5->modify_mask = cmd_v7->modify_mask;
+       cmd_v5->station_flags = cmd_v7->station_flags;
+       cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
+       cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
+       cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
+       cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
+       cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
+       cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
+       cmd_v5->assoc_id = cmd_v7->assoc_id;
+       cmd_v5->beamform_flags = cmd_v7->beamform_flags;
+       cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
 }
 
 static void
@@ -110,7 +110,7 @@ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
 }
 
 static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
-                                          struct iwl_mvm_add_sta_cmd_v6 *cmd,
+                                          struct iwl_mvm_add_sta_cmd_v7 *cmd,
                                           int *status)
 {
        struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -119,14 +119,14 @@ static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
                return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
                                                   cmd, status);
 
-       iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+       iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
 
        return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
                                           &cmd_v5, status);
 }
 
 static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
-                                   struct iwl_mvm_add_sta_cmd_v6 *cmd)
+                                   struct iwl_mvm_add_sta_cmd_v7 *cmd)
 {
        struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
 
@@ -134,7 +134,7 @@ static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
                return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
                                            sizeof(*cmd), cmd);
 
-       iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+       iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
 
        return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
                                    &cmd_v5);
@@ -175,19 +175,30 @@ static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
                                    &sta_cmd);
 }
 
-static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
+                                   enum nl80211_iftype iftype)
 {
        int sta_id;
+       u32 reserved_ids = 0;
 
+       BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
        WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
 
        lockdep_assert_held(&mvm->mutex);
 
+       /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+       if (iftype != NL80211_IFTYPE_STATION)
+               reserved_ids = BIT(0);
+
        /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
-       for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+       for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+               if (BIT(sta_id) & reserved_ids)
+                       continue;
+
                if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
                                               lockdep_is_held(&mvm->mutex)))
                        return sta_id;
+       }
        return IWL_MVM_STATION_COUNT;
 }
 
@@ -196,7 +207,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                           bool update)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
+       struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
        int ret;
        u32 status;
        u32 agg_size = 0, mpdu_dens = 0;
@@ -312,7 +323,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
-               sta_id = iwl_mvm_find_free_sta_id(mvm);
+               sta_id = iwl_mvm_find_free_sta_id(mvm,
+                                                 ieee80211_vif_type_p2p(vif));
        else
                sta_id = mvm_sta->sta_id;
 
@@ -368,7 +380,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain)
 {
-       struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
        int ret;
        u32 status;
 
@@ -522,6 +534,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
 
                /* unassoc - go ahead - remove the AP STA now */
                mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+               /* clear d0i3_ap_sta_id if no longer relevant */
+               if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+                       mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
        }
 
        /*
@@ -560,10 +576,10 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
 }
 
 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
-                            u32 qmask)
+                            u32 qmask, enum nl80211_iftype iftype)
 {
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
-               sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+               sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
                if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
                        return -ENOSPC;
        }
@@ -587,13 +603,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
                                      const u8 *addr,
                                      u16 mac_id, u16 color)
 {
-       struct iwl_mvm_add_sta_cmd_v6 cmd;
+       struct iwl_mvm_add_sta_cmd_v7 cmd;
        int ret;
        u32 status;
 
        lockdep_assert_held(&mvm->mutex);
 
-       memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
+       memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
        cmd.sta_id = sta->sta_id;
        cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
                                                             color));
@@ -627,7 +643,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
        lockdep_assert_held(&mvm->mutex);
 
        /* Add the aux station, but without any queues */
-       ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+       ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
+                                      NL80211_IFTYPE_UNSPECIFIED);
        if (ret)
                return ret;
 
@@ -699,7 +716,8 @@ int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        lockdep_assert_held(&mvm->mutex);
 
        qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
-       ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+       ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask,
+                                      ieee80211_vif_type_p2p(vif));
        if (ret)
                return ret;
 
@@ -735,7 +753,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                       int tid, u16 ssn, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
        int ret;
        u32 status;
 
@@ -794,7 +812,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                              int tid, u8 queue, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+       struct iwl_mvm_add_sta_cmd_v7 cmd = {};
        int ret;
        u32 status;
 
@@ -833,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        return ret;
 }
 
-static const u8 tid_to_ac[] = {
+const u8 tid_to_mac80211_ac[] = {
        IEEE80211_AC_BE,
        IEEE80211_AC_BK,
        IEEE80211_AC_BK,
@@ -844,6 +862,17 @@ static const u8 tid_to_ac[] = {
        IEEE80211_AC_VO,
 };
 
+static const u8 tid_to_ucode_ac[] = {
+       AC_BE,
+       AC_BK,
+       AC_BK,
+       AC_BE,
+       AC_VI,
+       AC_VI,
+       AC_VO,
+       AC_VO,
+};
+
 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                             struct ieee80211_sta *sta, u16 tid, u16 *ssn)
 {
@@ -873,10 +902,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return -EIO;
        }
 
+       spin_lock_bh(&mvmsta->lock);
+
+       /* possible race condition - we entered D0i3 while starting agg */
+       if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
+               spin_unlock_bh(&mvmsta->lock);
+               IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
+               return -EIO;
+       }
+
        /* the new tx queue is still connected to the same mac80211 queue */
-       mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+       mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
 
-       spin_lock_bh(&mvmsta->lock);
        tid_data = &mvmsta->tid_data[tid];
        tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
        tid_data->txq_id = txq_id;
@@ -916,7 +953,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        tid_data->ssn = 0xffff;
        spin_unlock_bh(&mvmsta->lock);
 
-       fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+       fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
        if (ret)
@@ -1411,7 +1448,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_mvm_add_sta_cmd_v6 cmd = {
+       struct iwl_mvm_add_sta_cmd_v7 cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1427,28 +1464,102 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       struct ieee80211_sta *sta,
                                       enum ieee80211_frame_release_type reason,
-                                      u16 cnt)
+                                      u16 cnt, u16 tids, bool more_data,
+                                      bool agg)
 {
-       u16 sleep_state_flags =
-               (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
-                       STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_mvm_add_sta_cmd_v6 cmd = {
+       struct iwl_mvm_add_sta_cmd_v7 cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
                .sleep_tx_count = cpu_to_le16(cnt),
                .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
-               /*
-                * Same modify mask for sleep_tx_count and sleep_state_flags so
-                * we must set the sleep_state_flags too.
-                */
-               .sleep_state_flags = cpu_to_le16(sleep_state_flags),
        };
-       int ret;
+       int tid, ret;
+       unsigned long _tids = tids;
+
+       /* convert TIDs to ACs - we don't support TSPEC so that's OK
+        * Note that this field is reserved and unused by firmware not
+        * supporting GO uAPSD, so it's safe to always do this.
+        */
+       for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+               cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+       /* If we're releasing frames from aggregation queues then check if the
+        * all queues combined that we're releasing frames from have
+        *  - more frames than the service period, in which case more_data
+        *    needs to be set
+        *  - fewer than 'cnt' frames, in which case we need to adjust the
+        *    firmware command (but do that unconditionally)
+        */
+       if (agg) {
+               int remaining = cnt;
+
+               spin_lock_bh(&mvmsta->lock);
+               for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+                       struct iwl_mvm_tid_data *tid_data;
+                       u16 n_queued;
+
+                       tid_data = &mvmsta->tid_data[tid];
+                       if (WARN(tid_data->state != IWL_AGG_ON &&
+                                tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
+                                "TID %d state is %d\n",
+                                tid, tid_data->state)) {
+                               spin_unlock_bh(&mvmsta->lock);
+                               ieee80211_sta_eosp(sta);
+                               return;
+                       }
+
+                       n_queued = iwl_mvm_tid_queued(tid_data);
+                       if (n_queued > remaining) {
+                               more_data = true;
+                               remaining = 0;
+                               break;
+                       }
+                       remaining -= n_queued;
+               }
+               spin_unlock_bh(&mvmsta->lock);
+
+               cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
+               if (WARN_ON(cnt - remaining == 0)) {
+                       ieee80211_sta_eosp(sta);
+                       return;
+               }
+       }
+
+       /* Note: this is ignored by firmware not supporting GO uAPSD */
+       if (more_data)
+               cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
+
+       if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+               mvmsta->next_status_eosp = true;
+               cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
+       } else {
+               cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
+       }
 
-       /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
        ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
+
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                         struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+       struct ieee80211_sta *sta;
+       u32 sta_id = le32_to_cpu(notif->sta_id);
+
+       if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+               return 0;
+
+       rcu_read_lock();
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+       if (!IS_ERR_OR_NULL(sta))
+               ieee80211_sta_eosp(sta);
+       rcu_read_unlock();
+
+       return 0;
+}
index 4968d0237dc5455d6aa29f3fa5f03e470effd596..2ed84c421481c79dfa1e847b1940ac78da8a9d9c 100644 (file)
@@ -195,24 +195,33 @@ struct iwl_mvm;
 /**
  * DOC: AP mode - PS
  *
- * When a station is asleep, the fw will set it as "asleep". All the
- * non-aggregation frames to that station will be dropped by the fw
- * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
  * AMPDUs are in a separate queue that is stopped by the fw. We just need to
- * let mac80211 know how many frames we have in these queues so that it can
+ * let mac80211 know when there are frames in these queues so that it can
  * properly handle trigger frames.
- * When the a trigger frame is received, mac80211 tells the driver to send
- * frames from the AMPDU queues or AC queue depending on which queue are
- * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
- * all the knowledege since all the non-agg frames are buffered / filtered, and
- * the driver tells mac80211 about agg frames). The driver needs to tell the fw
- * to let frames out even if the station is asleep. This is done by
- * %iwl_mvm_sta_modify_sleep_tx_count.
- * When we receive a frame from that station with PM bit unset, the
- * driver needs to let the fw know that this station isn't alseep any more.
- * This is done by %iwl_mvm_sta_modify_ps_wake.
- *
- * TODO - EOSP handling
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledege since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
  */
 
 /**
@@ -261,6 +270,12 @@ struct iwl_mvm_tid_data {
        u16 ssn;
 };
 
+static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
+{
+       return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+                               tid_data->next_reclaimed);
+}
+
 /**
  * struct iwl_mvm_sta - representation of a station in the driver
  * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
@@ -269,7 +284,11 @@ struct iwl_mvm_tid_data {
  * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
  *     tid.
  * @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
+ *     by debugfs.
  * @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ *     we need to signal the EOSP
  * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
  * and from Tx response flow, it needs a spinlock.
  * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
@@ -287,7 +306,9 @@ struct iwl_mvm_sta {
        u32 mac_id_n_color;
        u16 tid_disable_agg;
        u8 max_agg_bufsize;
+       bool bt_reduced_txpower_dbg;
        bool bt_reduced_txpower;
+       bool next_status_eosp;
        spinlock_t lock;
        struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
        struct iwl_lq_sta lq_sta;
@@ -345,6 +366,10 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
                             struct ieee80211_sta *sta, u32 iv32,
                             u16 *phase1key);
 
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                         struct iwl_rx_cmd_buffer *rxb,
+                         struct iwl_device_cmd *cmd);
+
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                       int tid, u16 ssn, bool start);
@@ -359,7 +384,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
-                            u32 qmask);
+                            u32 qmask, enum nl80211_iftype iftype);
 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
                             struct iwl_mvm_int_sta *sta);
 int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -375,7 +400,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                                       struct ieee80211_sta *sta,
                                       enum ieee80211_frame_release_type reason,
-                                      u16 cnt);
+                                      u16 cnt, u16 tids, bool more_data,
+                                      bool agg);
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain);
 
index b4c2abaa297bd6cf760d4cd3adaa4f0468dcdad0..61331245ad9324f29ec5a86f12a3239725619673 100644 (file)
@@ -126,6 +126,7 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
         * in iwl_mvm_te_handle_notif).
         */
        clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+       iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
 
        /*
         * Of course, our status bit is just as racy as mac80211, so in
@@ -210,6 +211,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
 
                if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
                        set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+                       iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
                        ieee80211_ready_on_channel(mvm->hw);
                }
        } else {
@@ -436,7 +438,8 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
        time_cmd.duration = cpu_to_le32(duration);
        time_cmd.repeat = 1;
        time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
-                                     TE_V2_NOTIF_HOST_EVENT_END);
+                                     TE_V2_NOTIF_HOST_EVENT_END |
+                                     T2_V2_START_IMMEDIATELY);
 
        iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 }
@@ -551,7 +554,8 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
        time_cmd.repeat = 1;
        time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
-                                     TE_V2_NOTIF_HOST_EVENT_END);
+                                     TE_V2_NOTIF_HOST_EVENT_END |
+                                     T2_V2_START_IMMEDIATELY);
 
        return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
 }
index 3afa6b6bf83571734eae30e9042c798a0bdb65ed..7a99fa361954e0bc1d5e9e82bf94130b0692ac6f 100644 (file)
@@ -403,7 +403,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
        }
 }
 
-static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
 {
        struct iwl_host_cmd cmd = {
                .id = REPLY_THERMAL_MNG_BACKOFF,
@@ -412,6 +412,8 @@ static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
                .flags = CMD_SYNC,
        };
 
+       backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
        if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
                IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
                               backoff);
@@ -534,7 +536,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
        .support_tx_backoff = true,
 };
 
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
 {
        struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
 
@@ -546,6 +548,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
                tt->params = &iwl7000_tt_params;
 
        tt->throttle = false;
+       tt->min_backoff = min_backoff;
        INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
 }
 
index 76ee486039d7a082b9081f835e7b132bf08ecbe5..879aeac46cc103112fef914bcc2b38df9f028b06 100644 (file)
@@ -79,6 +79,7 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
        __le16 fc = hdr->frame_control;
        u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
        u32 len = skb->len + FCS_LEN;
+       u8 ac;
 
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
                tx_flags |= TX_CMD_FLG_ACK;
@@ -90,13 +91,6 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
        else if (ieee80211_is_back_req(fc))
                tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
 
-       /* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
-       if (info->band == IEEE80211_BAND_2GHZ &&
-           (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
-            is_multicast_ether_addr(hdr->addr1) ||
-            ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
-               tx_flags |= TX_CMD_FLG_BT_DIS;
-
        if (ieee80211_has_morefrags(fc))
                tx_flags |= TX_CMD_FLG_MORE_FRAG;
 
@@ -112,6 +106,11 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
        }
 
+       /* tid_tspec will default to 0 = BE when QOS isn't enabled */
+       ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+       tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
+                       TX_CMD_FLG_BT_PRIO_POS;
+
        if (ieee80211_is_mgmt(fc)) {
                if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
                        tx_cmd->pm_frame_timeout = cpu_to_le16(3);
@@ -122,15 +121,12 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                 * it
                 */
                WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
-       } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+       } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
                tx_cmd->pm_frame_timeout = cpu_to_le16(2);
        } else {
                tx_cmd->pm_frame_timeout = 0;
        }
 
-       if (info->flags & IEEE80211_TX_CTL_AMPDU)
-               tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
-
        if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
            !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
                tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
@@ -207,7 +203,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
        rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
 
        mvm->mgmt_last_antenna_idx =
-               iwl_mvm_next_antenna(mvm, iwl_fw_valid_tx_ant(mvm->fw),
+               iwl_mvm_next_antenna(mvm, mvm->fw->valid_tx_ant,
                                     mvm->mgmt_last_antenna_idx);
        rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
 
@@ -377,6 +373,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
        /* From now on, we cannot access info->control */
 
+       /*
+        * we handle that entirely ourselves -- for uAPSD the firmware
+        * will always send a notification, and for PS-Poll responses
+        * we'll notify mac80211 when getting frame status
+        */
+       info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
        spin_lock(&mvmsta->lock);
 
        if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
@@ -437,6 +440,17 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvmsta->lock);
 
+       if ((tid_data->state == IWL_AGG_ON ||
+            tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+           iwl_mvm_tid_queued(tid_data) == 0) {
+               /*
+                * Now that this aggregation queue is empty tell mac80211 so it
+                * knows we no longer have frames buffered for the station on
+                * this TID (for the TIM bitmap calculation.)
+                */
+               ieee80211_sta_set_buffered(sta, tid, false);
+       }
+
        if (tid_data->ssn != tid_data->next_reclaimed)
                return;
 
@@ -680,6 +694,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        iwl_mvm_check_ratid_empty(mvm, sta, tid);
                        spin_unlock_bh(&mvmsta->lock);
                }
+
+               if (mvmsta->next_status_eosp) {
+                       mvmsta->next_status_eosp = false;
+                       ieee80211_sta_eosp(sta);
+               }
        } else {
                mvmsta = NULL;
        }
index 86989df693566aa5b604be092a7910244cf04aee..d619851745a19ba6d3bf605555fcdbd5a09f8341 100644 (file)
@@ -289,8 +289,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
        return last_idx;
 }
 
-static struct {
-       char *name;
+static const struct {
+       const char *name;
        u8 num;
 } advanced_lookup[] = {
        { "NMI_INTERRUPT_WDG", 0x34 },
@@ -376,9 +376,67 @@ struct iwl_error_event_table {
        u32 flow_handler;       /* FH read/write pointers, RX credit */
 } __packed;
 
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+       u32 valid;              /* (nonzero) valid, (0) log is empty */
+       u32 error_id;           /* type of error */
+       u32 pc;                 /* program counter */
+       u32 blink1;             /* branch link */
+       u32 blink2;             /* branch link */
+       u32 ilink1;             /* interrupt link */
+       u32 ilink2;             /* interrupt link */
+       u32 data1;              /* error-specific data */
+       u32 data2;              /* error-specific data */
+       u32 line;               /* source code line of error */
+       u32 umac_ver;           /* umac version */
+} __packed;
+
 #define ERROR_START_OFFSET  (1 * sizeof(u32))
 #define ERROR_ELEM_SIZE     (7 * sizeof(u32))
 
+static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
+{
+       struct iwl_trans *trans = mvm->trans;
+       struct iwl_umac_error_event_table table;
+       u32 base;
+
+       base = mvm->umac_error_event_table;
+
+       if (base < 0x800000 || base >= 0x80C000) {
+               IWL_ERR(mvm,
+                       "Not valid error log pointer 0x%08X for %s uCode\n",
+                       base,
+                       (mvm->cur_ucode == IWL_UCODE_INIT)
+                                       ? "Init" : "RT");
+               return;
+       }
+
+       iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+       if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+               IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+               IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+                       mvm->status, table.valid);
+       }
+
+       IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+               desc_lookup(table.error_id));
+       IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc);
+       IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
+       IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
+       IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
+       IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
+       IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
+       IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
+       IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver);
+}
+
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 {
        struct iwl_trans *trans = mvm->trans;
@@ -394,7 +452,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
                        base = mvm->fw->inst_errlog_ptr;
        }
 
-       if (base < 0x800000 || base >= 0x80C000) {
+       if (base < 0x800000) {
                IWL_ERR(mvm,
                        "Not valid error log pointer 0x%08X for %s uCode\n",
                        base,
@@ -453,29 +511,31 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
        IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
        IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+       if (mvm->support_umac_log)
+               iwl_mvm_dump_umac_error_log(mvm);
 }
 
-void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
+void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
 {
        const struct fw_img *img;
-       int ofs, len = 0;
-       u8 *buf;
+       u32 ofs, sram_len;
+       void *sram;
 
-       if (!mvm->ucode_loaded)
+       if (!mvm->ucode_loaded || mvm->fw_error_sram)
                return;
 
        img = &mvm->fw->img[mvm->cur_ucode];
        ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
-       len = img->sec[IWL_UCODE_SECTION_DATA].len;
+       sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
 
-       buf = kzalloc(len, GFP_ATOMIC);
-       if (!buf)
+       sram = kzalloc(sram_len, GFP_ATOMIC);
+       if (!sram)
                return;
 
-       iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
-       iwl_print_hex_error(mvm->trans, buf, len);
-
-       kfree(buf);
+       iwl_trans_read_mem_bytes(mvm->trans, ofs, sram, sram_len);
+       mvm->fw_error_sram = sram;
+       mvm->fw_error_sram_len = sram_len;
 }
 
 /**
@@ -516,15 +576,20 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                         enum ieee80211_smps_mode smps_request)
 {
        struct iwl_mvm_vif *mvmvif;
-       enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+       enum ieee80211_smps_mode smps_mode;
        int i;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
-       if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
+       if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
                return;
 
+       if (vif->type == NL80211_IFTYPE_AP)
+               smps_mode = IEEE80211_SMPS_OFF;
+       else
+               smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
        mvmvif = iwl_mvm_vif_from_mac80211(vif);
        mvmvif->smps_requests[req_type] = smps_request;
        for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
@@ -538,3 +603,44 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        ieee80211_request_smps(vif, smps_mode);
 }
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                              bool value)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int res;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (mvmvif->low_latency == value)
+               return 0;
+
+       mvmvif->low_latency = value;
+
+       res = iwl_mvm_update_quotas(mvm, NULL);
+       if (res)
+               return res;
+
+       iwl_mvm_bt_coex_vif_change(mvm);
+
+       return iwl_mvm_power_update_mac(mvm, vif);
+}
+
+static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+       bool *result = _data;
+
+       if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
+               *result = true;
+}
+
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
+{
+       bool result = false;
+
+       ieee80211_iterate_active_interfaces_atomic(
+                       mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_ll_iter, &result);
+
+       return result;
+}
index 3872ead75488d6ac485a5cb147a4037988f7ee89..edb015c99049315a9a5ab5d6b203b95fe1c68dd2 100644 (file)
@@ -66,6 +66,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
+#include <linux/acpi.h>
 
 #include "iwl-trans.h"
 #include "iwl-drv.h"
@@ -389,12 +390,92 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+       {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
 };
 MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
 
+#ifdef CONFIG_ACPI
+#define SPL_METHOD             "SPLC"
+#define SPL_DOMAINTYPE_MODULE  BIT(0)
+#define SPL_DOMAINTYPE_WIFI    BIT(1)
+#define SPL_DOMAINTYPE_WIGIG   BIT(2)
+#define SPL_DOMAINTYPE_RFEM    BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+       union acpi_object *limits, *domain_type, *power_limit;
+
+       if (splx->type != ACPI_TYPE_PACKAGE ||
+           splx->package.count != 2 ||
+           splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+           splx->package.elements[0].integer.value != 0) {
+               IWL_ERR(trans, "Unsupported splx structure");
+               return 0;
+       }
+
+       limits = &splx->package.elements[1];
+       if (limits->type != ACPI_TYPE_PACKAGE ||
+           limits->package.count < 2 ||
+           limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+           limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+               IWL_ERR(trans, "Invalid limits element");
+               return 0;
+       }
+
+       domain_type = &limits->package.elements[0];
+       power_limit = &limits->package.elements[1];
+       if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+               IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+               return 0;
+       }
+
+       return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+       acpi_handle pxsx_handle;
+       acpi_handle handle;
+       struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+       acpi_status status;
+
+       pxsx_handle = ACPI_HANDLE(&pdev->dev);
+       if (!pxsx_handle) {
+               IWL_DEBUG_INFO(trans,
+                              "Could not retrieve root port ACPI handle");
+               return;
+       }
+
+       /* Get the method's handle */
+       status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+       if (ACPI_FAILURE(status)) {
+               IWL_DEBUG_INFO(trans, "SPL method not found");
+               return;
+       }
+
+       /* Call SPLC with no arguments */
+       status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+       if (ACPI_FAILURE(status)) {
+               IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+               return;
+       }
+
+       trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+       IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+                      trans->dflt_pwr_limit);
+       kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
 
@@ -419,6 +500,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_free_trans;
        }
 
+       set_dflt_pwr_limit(iwl_trans, pdev);
+
        /* register transport layer debugfs here */
        ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
        if (ret)
@@ -477,7 +560,7 @@ static int iwl_pci_resume(struct device *device)
        iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
-       iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+       iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
        return 0;
 }
index e851f26fd44c1644c81b24f9a91b690ab2fb418e..9091513ea7388ce11f2294fbb609b3581073e2a0 100644 (file)
@@ -304,7 +304,7 @@ struct iwl_trans_pcie {
        bool bc_table_dword;
        u32 rx_page_order;
 
-       const char **command_names;
+       const char *const *command_names;
 
        /* queue watchdog */
        unsigned long wd_timeout;
@@ -488,4 +488,6 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
        __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
 }
 
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+
 #endif /* __iwl_trans_int_pcie_h__ */
index 08c23d497a02aae938f321fcf751b61e0642b452..fdfa3969cac986c1824bd65c41512a9ac4ba7b39 100644 (file)
@@ -155,37 +155,26 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
        if (rxq->need_update == 0)
                goto exit_unlock;
 
-       if (trans->cfg->base_params->shadow_reg_enable) {
-               /* shadow register enabled */
-               /* Device expects a multiple of 8 */
-               rxq->write_actual = (rxq->write & ~0x7);
-               iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
-       } else {
-               /* If power-saving is in use, make sure device is awake */
-               if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
-                       reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
-                       if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-                               IWL_DEBUG_INFO(trans,
-                                       "Rx queue requesting wakeup,"
-                                       " GP1 = 0x%x\n", reg);
-                               iwl_set_bit(trans, CSR_GP_CNTRL,
-                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                               goto exit_unlock;
-                       }
-
-                       rxq->write_actual = (rxq->write & ~0x7);
-                       iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
-                                          rxq->write_actual);
-
-               /* Else device is assumed to be awake */
-               } else {
-                       /* Device expects a multiple of 8 */
-                       rxq->write_actual = (rxq->write & ~0x7);
-                       iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
-                                          rxq->write_actual);
+       /*
+        * explicitly wake up the NIC if:
+        * 1. shadow registers aren't enabled
+        * 2. there is a chance that the NIC is asleep
+        */
+       if (!trans->cfg->base_params->shadow_reg_enable &&
+           test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+               reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+                                      reg);
+                       iwl_set_bit(trans, CSR_GP_CNTRL,
+                                   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       goto exit_unlock;
                }
        }
+
+       rxq->write_actual = round_down(rxq->write, 8);
+       iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
        rxq->need_update = 0;
 
  exit_unlock:
@@ -802,10 +791,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 
 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 inta;
 
-       lockdep_assert_held(&trans_pcie->irq_lock);
+       lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
 
        trace_iwlwifi_dev_irq(trans->dev);
 
@@ -1006,7 +994,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
                isr_stats->rfkill++;
 
-               iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+               iwl_trans_pcie_rf_kill(trans, hw_rfkill);
                if (hw_rfkill) {
                        set_bit(STATUS_RFKILL, &trans->status);
                        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
index f9507807b4865ad210312e6999bb120d831ebf0e..dcfd6d866d095081d7001795c4ec802c3044926f 100644 (file)
 #include "iwl-agn-hw.h"
 #include "internal.h"
 
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
+{
+       iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+                   ((reg & 0x0000ffff) | (2 << 28)));
+       return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+}
+
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+{
+       iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+       iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+                   ((reg & 0x0000ffff) | (3 << 28)));
+}
+
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -89,6 +103,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
+#define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 
 static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
@@ -132,8 +147,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
         */
 
        /* Disable L0S exit timer (platform NMI Work/Around) */
-       iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
-                   CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+                           CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
 
        /*
         * Disable L0s without affecting L1;
@@ -203,19 +219,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
        /*
         * Enable DMA clock and wait for it to stabilize.
         *
-        * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
-        * do not disable clocks.  This preserves any hardware bits already
-        * set by default in "CLK_CTRL_REG" after reset.
+        * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+        * bits do not disable clocks.  This preserves any hardware
+        * bits already set by default in "CLK_CTRL_REG" after reset.
         */
-       iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
-       udelay(20);
+       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               iwl_write_prph(trans, APMG_CLK_EN_REG,
+                              APMG_CLK_VAL_DMA_CLK_RQT);
+               udelay(20);
 
-       /* Disable L1-Active */
-       iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
-                         APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+               /* Disable L1-Active */
+               iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+                                 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 
-       /* Clear the interrupt in APMG if the NIC is in RFKILL */
-       iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+               /* Clear the interrupt in APMG if the NIC is in RFKILL */
+               iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+                              APMG_RTC_INT_STT_RFKILL);
+       }
 
        set_bit(STATUS_DEVICE_ENABLED, &trans->status);
 
@@ -223,6 +243,116 @@ out:
        return ret;
 }
 
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+       int ret;
+       u32 apmg_gp1_reg;
+       u32 apmg_xtal_cfg_reg;
+       u32 dl_cfg_reg;
+
+       /* Force XTAL ON */
+       __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+                                CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+       /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+       iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+       udelay(10);
+
+       /*
+        * Set "initialization complete" bit to move adapter from
+        * D0U* --> D0A* (powered-up active) state.
+        */
+       iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       /*
+        * Wait for clock stabilization; once stabilized, access to
+        * device-internal resources is possible.
+        */
+       ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+                          CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                          CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+                          25000);
+       if (WARN_ON(ret < 0)) {
+               IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+               /* Release XTAL ON request */
+               __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+                                          CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+               return;
+       }
+
+       /*
+        * Clear "disable persistence" to avoid LP XTAL resetting when
+        * SHRD_HW_RST is applied in S3.
+        */
+       iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+                                   APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+       /*
+        * Force APMG XTAL to be active to prevent its disabling by HW
+        * caused by APMG idle state.
+        */
+       apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+                                                   SHR_APMG_XTAL_CFG_REG);
+       iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+                                apmg_xtal_cfg_reg |
+                                SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+       /*
+        * Reset entire device again - do controller reset (results in
+        * SHRD_HW_RST). Turn MAC off before proceeding.
+        */
+       iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+       udelay(10);
+
+       /* Enable LP XTAL by indirect access through CSR */
+       apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+       iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+                                SHR_APMG_GP1_WF_XTAL_LP_EN |
+                                SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+       /* Clear delay line clock power up */
+       dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+       iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+                                ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+       /*
+        * Enable persistence mode to avoid LP XTAL resetting when
+        * SHRD_HW_RST is applied in S3.
+        */
+       iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                   CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+
+       /*
+        * Clear "initialization complete" bit to move adapter from
+        * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+        */
+       iwl_clear_bit(trans, CSR_GP_CNTRL,
+                     CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       /* Activates XTAL resources monitor */
+       __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+                                CSR_MONITOR_XTAL_RESOURCES);
+
+       /* Release XTAL ON request */
+       __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+                                  CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+       udelay(10);
+
+       /* Release APMG XTAL */
+       iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+                                apmg_xtal_cfg_reg &
+                                ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
 {
        int ret = 0;
@@ -250,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
        /* Stop device's DMA activity */
        iwl_pcie_apm_stop_master(trans);
 
+       if (trans->cfg->lp_xtal_workaround) {
+               iwl_pcie_apm_lp_xtal_enable(trans);
+               return;
+       }
+
        /* Reset the entire device */
        iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
 
@@ -273,7 +408,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
 
        spin_unlock(&trans_pcie->irq_lock);
 
-       iwl_pcie_set_pwr(trans, false);
+       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               iwl_pcie_set_pwr(trans, false);
 
        iwl_op_mode_nic_config(trans->op_mode);
 
@@ -435,78 +571,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
        return ret;
 }
 
-static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
+                                             const struct fw_img *image,
+                                             int cpu,
+                                             int *first_ucode_section)
 {
        int shift_param;
-       u32 address;
-       int ret = 0;
+       int i, ret = 0;
+       u32 last_read_idx = 0;
 
        if (cpu == 1) {
                shift_param = 0;
-               address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+               *first_ucode_section = 0;
        } else {
                shift_param = 16;
-               address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+               (*first_ucode_section)++;
        }
 
-       /* set CPU to started */
-       iwl_trans_set_bits_mask(trans,
-                               CSR_UCODE_LOAD_STATUS_ADDR,
-                               CSR_CPU_STATUS_LOADING_STARTED << shift_param,
-                               1);
-
-       /* set last complete descriptor number */
-       iwl_trans_set_bits_mask(trans,
-                               CSR_UCODE_LOAD_STATUS_ADDR,
-                               CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
-                               << shift_param,
-                               1);
-
-       /* set last loaded block */
-       iwl_trans_set_bits_mask(trans,
-                               CSR_UCODE_LOAD_STATUS_ADDR,
-                               CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
-                               << shift_param,
-                               1);
+       for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+               last_read_idx = i;
 
+               if (!image->sec[i].data ||
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                       IWL_DEBUG_FW(trans,
+                                    "Break since Data not valid or Empty section, sec = %d\n",
+                                    i);
+                       break;
+               }
+
+               if (i == (*first_ucode_section) + 1)
+                       /* set CPU to started */
+                       iwl_set_bits_prph(trans,
+                                         CSR_UCODE_LOAD_STATUS_ADDR,
+                                         LMPM_CPU_HDRS_LOADING_COMPLETED
+                                         << shift_param);
+
+               ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+               if (ret)
+                       return ret;
+       }
        /* image loading complete */
-       iwl_trans_set_bits_mask(trans,
-                               CSR_UCODE_LOAD_STATUS_ADDR,
-                               CSR_CPU_STATUS_LOADING_COMPLETED
-                               << shift_param,
-                               1);
-
-       /* set FH_TCSR_0_REG  */
-       iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
-
-       /* verify image verification started  */
-       ret = iwl_poll_bit(trans, address,
-                          CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
-                          CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
-                          CSR_SECURE_TIME_OUT);
-       if (ret < 0) {
-               IWL_ERR(trans, "secure boot process didn't start\n");
-               return ret;
+       iwl_set_bits_prph(trans,
+                         CSR_UCODE_LOAD_STATUS_ADDR,
+                         LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
+
+       *first_ucode_section = last_read_idx;
+
+       return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+                                     const struct fw_img *image,
+                                     int cpu,
+                                     int *first_ucode_section)
+{
+       int shift_param;
+       int i, ret = 0;
+       u32 last_read_idx = 0;
+
+       if (cpu == 1) {
+               shift_param = 0;
+               *first_ucode_section = 0;
+       } else {
+               shift_param = 16;
+               (*first_ucode_section)++;
        }
 
-       /* wait for image verification to complete  */
-       ret = iwl_poll_bit(trans, address,
-                          CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
-                          CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
-                          CSR_SECURE_TIME_OUT);
+       for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+               last_read_idx = i;
 
-       if (ret < 0) {
-               IWL_ERR(trans, "Time out on secure boot process\n");
-               return ret;
+               if (!image->sec[i].data ||
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                       IWL_DEBUG_FW(trans,
+                                    "Break since Data not valid or Empty section, sec = %d\n",
+                                    i);
+                       break;
+               }
+
+               ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+               if (ret)
+                       return ret;
        }
 
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               iwl_set_bits_prph(trans,
+                                 CSR_UCODE_LOAD_STATUS_ADDR,
+                                 (LMPM_CPU_UCODE_LOADING_COMPLETED |
+                                  LMPM_CPU_HDRS_LOADING_COMPLETED |
+                                  LMPM_CPU_UCODE_LOADING_STARTED) <<
+                                       shift_param);
+
+       *first_ucode_section = last_read_idx;
+
        return 0;
 }
 
 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
                                const struct fw_img *image)
 {
-       int i, ret = 0;
+       int ret = 0;
+       int first_ucode_section;
 
        IWL_DEBUG_FW(trans,
                     "working with %s image\n",
@@ -518,53 +682,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
        /* configure the ucode to be ready to get the secured image */
        if (image->is_secure) {
                /* set secure boot inspector addresses */
-               iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
-               iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
-
-               /* release CPU1 reset if secure inspector image burned in OTP */
-               iwl_write32(trans, CSR_RESET, 0);
-       }
-
-       /* load to FW the binary sections of CPU1 */
-       IWL_DEBUG_INFO(trans, "Loading CPU1\n");
-       for (i = 0;
-            i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
-            i++) {
-               if (!image->sec[i].data)
-                       break;
-               ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+               iwl_write_prph(trans,
+                              LMPM_SECURE_INSPECTOR_CODE_ADDR,
+                              LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+
+               iwl_write_prph(trans,
+                              LMPM_SECURE_INSPECTOR_DATA_ADDR,
+                              LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+
+               /* set CPU1 header address */
+               iwl_write_prph(trans,
+                              LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
+                              LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+
+               /* load to FW the binary Secured sections of CPU1 */
+               ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
+                                                        &first_ucode_section);
                if (ret)
                        return ret;
-       }
 
-       /* configure the ucode to start secure process on CPU1 */
-       if (image->is_secure) {
-               /* config CPU1 to start secure protocol */
-               ret = iwl_pcie_secure_set(trans, 1);
+       } else {
+               /* load to FW the binary Non secured sections of CPU1 */
+               ret = iwl_pcie_load_cpu_sections(trans, image, 1,
+                                                &first_ucode_section);
                if (ret)
                        return ret;
-       } else {
-               /* Remove all resets to allow NIC to operate */
-               iwl_write32(trans, CSR_RESET, 0);
        }
 
        if (image->is_dual_cpus) {
+               /* set CPU2 header address */
+               iwl_write_prph(trans,
+                              LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+                              LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
                /* load to FW the binary sections of CPU2 */
-               IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
-               for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
-                       i < IWL_UCODE_SECTION_MAX; i++) {
-                       if (!image->sec[i].data)
-                               break;
-                       ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
-                       if (ret)
-                               return ret;
-               }
+               if (image->is_secure)
+                       ret = iwl_pcie_load_cpu_secured_sections(
+                                                       trans, image, 2,
+                                                       &first_ucode_section);
+               else
+                       ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+                                                        &first_ucode_section);
+               if (ret)
+                       return ret;
+       }
+
+       /* release CPU reset */
+       if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+       else
+               iwl_write32(trans, CSR_RESET, 0);
 
-               if (image->is_secure) {
-                       /* set CPU2 for secure protocol */
-                       ret = iwl_pcie_secure_set(trans, 2);
-                       if (ret)
-                               return ret;
+       if (image->is_secure) {
+               /* wait for image verification to complete  */
+               ret = iwl_poll_prph_bit(trans,
+                                       LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
+                                       LMPM_SECURE_BOOT_STATUS_SUCCESS,
+                                       LMPM_SECURE_BOOT_STATUS_SUCCESS,
+                                       LMPM_SECURE_TIME_OUT);
+
+               if (ret < 0) {
+                       IWL_ERR(trans, "Time out on secure boot process\n");
+                       return ret;
                }
        }
 
@@ -591,7 +770,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                set_bit(STATUS_RFKILL, &trans->status);
        else
                clear_bit(STATUS_RFKILL, &trans->status);
-       iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+       iwl_trans_pcie_rf_kill(trans, hw_rfkill);
        if (hw_rfkill && !run_in_rfkill)
                return -ERFKILL;
 
@@ -706,7 +885,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
        else
                clear_bit(STATUS_RFKILL, &trans->status);
        if (hw_rfkill != was_hw_rfkill)
-               iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+               iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+{
+       if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
+               iwl_trans_pcie_stop_device(trans);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -815,7 +1000,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
                set_bit(STATUS_RFKILL, &trans->status);
        else
                clear_bit(STATUS_RFKILL, &trans->status);
-       iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+       iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
        return 0;
 }
@@ -1158,6 +1343,7 @@ static const char *get_csr_string(int cmd)
        IWL_CMD(CSR_GIO_CHICKEN_BITS);
        IWL_CMD(CSR_ANA_PLL_CFG);
        IWL_CMD(CSR_HW_REV_WA_REG);
+       IWL_CMD(CSR_MONITOR_STATUS_REG);
        IWL_CMD(CSR_DBG_HPET_MEM_REG);
        default:
                return "UNKNOWN";
@@ -1190,6 +1376,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
                CSR_DRAM_INT_TBL_REG,
                CSR_GIO_CHICKEN_BITS,
                CSR_ANA_PLL_CFG,
+               CSR_MONITOR_STATUS_REG,
                CSR_HW_REV_WA_REG,
                CSR_DBG_HPET_MEM_REG
        };
@@ -1407,16 +1594,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
 {
        struct iwl_trans *trans = file->private_data;
        char *buf = NULL;
-       int pos = 0;
-       ssize_t ret = -EFAULT;
-
-       ret = pos = iwl_dump_fh(trans, &buf);
-       if (buf) {
-               ret = simple_read_from_buffer(user_buf,
-                                             count, ppos, buf, pos);
-               kfree(buf);
-       }
+       ssize_t ret;
 
+       ret = iwl_dump_fh(trans, &buf);
+       if (ret < 0)
+               return ret;
+       if (!buf)
+               return -EINVAL;
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+       kfree(buf);
        return ret;
 }
 
index 3d549008b3e2db714df564e16060948e3336ee41..3b0c72c1005446d2d93f7eb15ed6d3207e1c3ea1 100644 (file)
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
                IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
                        le32_to_cpu(txq->scratchbufs[i].scratch));
 
-       iwl_trans_fw_error(trans);
+       iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
 }
 
 /*
@@ -296,43 +296,38 @@ void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
        if (txq->need_update == 0)
                return;
 
-       if (trans->cfg->base_params->shadow_reg_enable ||
-           txq_id == trans_pcie->cmd_queue) {
-               /* shadow register enabled */
-               iwl_write32(trans, HBUS_TARG_WRPTR,
-                           txq->q.write_ptr | (txq_id << 8));
-       } else {
-               /* if we're trying to save power */
-               if (test_bit(STATUS_TPOWER_PMI, &trans->status)) {
-                       /* wake up nic if it's powered down ...
-                        * uCode will wake up, and interrupt us again, so next
-                        * time we'll skip this part. */
-                       reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
-
-                       if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
-                               IWL_DEBUG_INFO(trans,
-                                       "Tx queue %d requesting wakeup,"
-                                       " GP1 = 0x%x\n", txq_id, reg);
-                               iwl_set_bit(trans, CSR_GP_CNTRL,
-                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                               return;
-                       }
-
-                       IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id,
-                                    txq->q.write_ptr);
-
-                       iwl_write_direct32(trans, HBUS_TARG_WRPTR,
-                                    txq->q.write_ptr | (txq_id << 8));
-
+       /*
+        * explicitly wake up the NIC if:
+        * 1. shadow registers aren't enabled
+        * 2. NIC is woken up for CMD regardless of shadow outside this function
+        * 3. there is a chance that the NIC is asleep
+        */
+       if (!trans->cfg->base_params->shadow_reg_enable &&
+           txq_id != trans_pcie->cmd_queue &&
+           test_bit(STATUS_TPOWER_PMI, &trans->status)) {
                /*
-                * else not in power-save mode,
-                * uCode will never sleep when we're
-                * trying to tx (during RFKILL, we're not trying to tx).
+                * wake up nic if it's powered down ...
+                * uCode will wake up, and interrupt us again, so next
+                * time we'll skip this part.
                 */
-               } else
-                       iwl_write32(trans, HBUS_TARG_WRPTR,
-                                   txq->q.write_ptr | (txq_id << 8));
+               reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+               if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+                       IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+                                      txq_id, reg);
+                       iwl_set_bit(trans, CSR_GP_CNTRL,
+                                   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+                       return;
+               }
        }
+
+       /*
+        * if not in power-save mode, uCode will never sleep when we're
+        * trying to tx (during RFKILL, we're not trying to tx).
+        */
+       IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->q.write_ptr);
+       iwl_write32(trans, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
        txq->need_update = 0;
 }
 
@@ -705,8 +700,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
                           reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
 
        /* Enable L1-Active */
-       iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
-                           APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+               iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+                                   APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 }
 
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
@@ -1028,7 +1024,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
                if (nfreed++ > 0) {
                        IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
                                idx, q->write_ptr, q->read_ptr);
-                       iwl_trans_fw_error(trans);
+                       iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
                }
        }
 
@@ -1587,6 +1583,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                               get_cmd_string(trans_pcie, cmd->id));
                ret = -ETIMEDOUT;
 
+               iwl_write_prph(trans, DEVICE_SET_NMI_REG, 1);
                iwl_trans_fw_error(trans);
 
                goto cancel;
index cb6d189bc3e60fd9bfeb2e3e696caf822ebb9748..54e344aed6e05d097c3eb60462d74064757b81fc 100644 (file)
@@ -1766,7 +1766,8 @@ static void lbs_join_post(struct lbs_private *priv,
        memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
        priv->wdev->ssid_len = params->ssid_len;
 
-       cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
+       cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
+                            GFP_KERNEL);
 
        /* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
        priv->connect_status = LBS_CONNECTED;
index 58c6ee5de98f9256b2e3db4735adbfe1fed299bd..33ceda296c9c61ece7d14a8484cd551b50026dfa 100644 (file)
@@ -498,7 +498,7 @@ static int if_sdio_prog_helper(struct if_sdio_card *card,
                 */
                mdelay(2);
 
-               chunk_size = min(size, (size_t)60);
+               chunk_size = min_t(size_t, size, 60);
 
                *((__le32*)chunk_buffer) = cpu_to_le32(chunk_size);
                memcpy(chunk_buffer + 4, firmware, chunk_size);
@@ -639,7 +639,7 @@ static int if_sdio_prog_real(struct if_sdio_card *card,
                        req_size = size;
 
                while (req_size) {
-                       chunk_size = min(req_size, (size_t)512);
+                       chunk_size = min_t(size_t, req_size, 512);
 
                        memcpy(chunk_buffer, firmware, chunk_size);
 /*
index 69d4c3179d042159122911539f41619ae7e71597..9d7a52f5a4102abedd2dbebc03c26c3866da2a64 100644 (file)
@@ -57,6 +57,10 @@ static bool rctbl = false;
 module_param(rctbl, bool, 0444);
 MODULE_PARM_DESC(rctbl, "Handle rate control table");
 
+static bool support_p2p_device = true;
+module_param(support_p2p_device, bool, 0444);
+MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
+
 /**
  * enum hwsim_regtest - the type of regulatory tests we offer
  *
@@ -335,7 +339,8 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
 #endif
                                 BIT(NL80211_IFTYPE_AP) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
-       { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+       /* must be last, see hwsim_if_comb */
+       { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
 };
 
 static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
@@ -343,6 +348,27 @@ static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
 };
 
 static const struct ieee80211_iface_combination hwsim_if_comb[] = {
+       {
+               .limits = hwsim_if_limits,
+               /* remove the last entry which is P2P_DEVICE */
+               .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
+               .max_interfaces = 2048,
+               .num_different_channels = 1,
+       },
+       {
+               .limits = hwsim_if_dfs_limits,
+               .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+               .max_interfaces = 8,
+               .num_different_channels = 1,
+               .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                                      BIT(NL80211_CHAN_WIDTH_20) |
+                                      BIT(NL80211_CHAN_WIDTH_40) |
+                                      BIT(NL80211_CHAN_WIDTH_80) |
+                                      BIT(NL80211_CHAN_WIDTH_160),
+       }
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
        {
                .limits = hwsim_if_limits,
                .n_limits = ARRAY_SIZE(hwsim_if_limits),
@@ -385,6 +411,7 @@ struct mac80211_hwsim_data {
 
        struct mac_address addresses[2];
        int channels, idx;
+       bool use_chanctx;
 
        struct ieee80211_channel *tmp_chan;
        struct delayed_work roc_done;
@@ -451,7 +478,7 @@ static struct genl_family hwsim_genl_family = {
 
 /* MAC80211_HWSIM netlink policy */
 
-static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
+static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
        [HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
        [HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
        [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
@@ -468,6 +495,7 @@ static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
        [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
        [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
        [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
+       [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
 };
 
 static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -1035,32 +1063,6 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
                        ack = true;
 
                rx_status.mactime = now + data2->tsf_offset;
-#if 0
-               /*
-                * Don't enable this code by default as the OUI 00:00:00
-                * is registered to Xerox so we shouldn't use it here, it
-                * might find its way into pcap files.
-                * Note that this code requires the headroom in the SKB
-                * that was allocated earlier.
-                */
-               rx_status.vendor_radiotap_oui[0] = 0x00;
-               rx_status.vendor_radiotap_oui[1] = 0x00;
-               rx_status.vendor_radiotap_oui[2] = 0x00;
-               rx_status.vendor_radiotap_subns = 127;
-               /*
-                * Radiotap vendor namespaces can (and should) also be
-                * split into fields by using the standard radiotap
-                * presence bitmap mechanism. Use just BIT(0) here for
-                * the presence bitmap.
-                */
-               rx_status.vendor_radiotap_bitmap = BIT(0);
-               /* We have 8 bytes of (dummy) data */
-               rx_status.vendor_radiotap_len = 8;
-               /* For testing, also require it to be aligned */
-               rx_status.vendor_radiotap_align = 8;
-               /* push the data */
-               memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
-#endif
 
                memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
                ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -1087,7 +1089,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
                return;
        }
 
-       if (data->channels == 1) {
+       if (!data->use_chanctx) {
                channel = data->channel;
        } else if (txi->hw_queue == 4) {
                channel = data->tmp_chan;
@@ -1275,6 +1277,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
 
        mac80211_hwsim_tx_frame(hw, skb,
                                rcu_dereference(vif->chanctx_conf)->def.chan);
+
+       if (vif->csa_active && ieee80211_csa_is_complete(vif))
+               ieee80211_csa_finish(vif);
 }
 
 static enum hrtimer_restart
@@ -1350,7 +1355,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
 
        data->channel = conf->chandef.chan;
 
-       WARN_ON(data->channel && data->channels > 1);
+       WARN_ON(data->channel && data->use_chanctx);
 
        data->power_level = conf->power_level;
        if (!data->started || !data->beacon_int)
@@ -1936,7 +1941,8 @@ static struct ieee80211_ops mac80211_hwsim_mchan_ops;
 
 static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
                                       const struct ieee80211_regdomain *regd,
-                                      bool reg_strict)
+                                      bool reg_strict, bool p2p_device,
+                                      bool use_chanctx)
 {
        int err;
        u8 addr[ETH_ALEN];
@@ -1946,11 +1952,14 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
        const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
        int idx;
 
+       if (WARN_ON(channels > 1 && !use_chanctx))
+               return -EINVAL;
+
        spin_lock_bh(&hwsim_radio_lock);
        idx = hwsim_radio_idx++;
        spin_unlock_bh(&hwsim_radio_lock);
 
-       if (channels > 1)
+       if (use_chanctx)
                ops = &mac80211_hwsim_mchan_ops;
        hw = ieee80211_alloc_hw(sizeof(*data), ops);
        if (!hw) {
@@ -1991,17 +2000,25 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
        hw->wiphy->addresses = data->addresses;
 
        data->channels = channels;
+       data->use_chanctx = use_chanctx;
        data->idx = idx;
 
-       if (data->channels > 1) {
+       if (data->use_chanctx) {
                hw->wiphy->max_scan_ssids = 255;
                hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
                hw->wiphy->max_remain_on_channel_duration = 1000;
                /* For channels > 1 DFS is not allowed */
                hw->wiphy->n_iface_combinations = 1;
                hw->wiphy->iface_combinations = &data->if_combination;
-               data->if_combination = hwsim_if_comb[0];
+               if (p2p_device)
+                       data->if_combination = hwsim_if_comb_p2p_dev[0];
+               else
+                       data->if_combination = hwsim_if_comb[0];
                data->if_combination.num_different_channels = data->channels;
+       } else if (p2p_device) {
+               hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
+               hw->wiphy->n_iface_combinations =
+                       ARRAY_SIZE(hwsim_if_comb_p2p_dev);
        } else {
                hw->wiphy->iface_combinations = hwsim_if_comb;
                hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
@@ -2017,8 +2034,10 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
                                     BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                     BIT(NL80211_IFTYPE_P2P_GO) |
                                     BIT(NL80211_IFTYPE_ADHOC) |
-                                    BIT(NL80211_IFTYPE_MESH_POINT) |
-                                    BIT(NL80211_IFTYPE_P2P_DEVICE);
+                                    BIT(NL80211_IFTYPE_MESH_POINT);
+
+       if (p2p_device)
+               hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
 
        hw->flags = IEEE80211_HW_MFP_CAPABLE |
                    IEEE80211_HW_SIGNAL_DBM |
@@ -2027,13 +2046,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
                    IEEE80211_HW_AMPDU_AGGREGATION |
                    IEEE80211_HW_WANT_MONITOR_VIF |
                    IEEE80211_HW_QUEUE_CONTROL |
-                   IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+                   IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
+                   IEEE80211_HW_CHANCTX_STA_CSA;
        if (rctbl)
                hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
 
        hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
                            WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
-                           WIPHY_FLAG_AP_UAPSD;
+                           WIPHY_FLAG_AP_UAPSD |
+                           WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
 
        /* ask mac80211 to reserve space for magic */
@@ -2141,7 +2162,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
        debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps);
        debugfs_create_file("group", 0666, data->debugfs, data,
                            &hwsim_fops_group);
-       if (data->channels == 1)
+       if (!data->use_chanctx)
                debugfs_create_file("dfs_simulate_radar", 0222,
                                    data->debugfs,
                                    data, &hwsim_simulate_radar);
@@ -2407,10 +2428,17 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
        const char *alpha2 = NULL;
        const struct ieee80211_regdomain *regd = NULL;
        bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+       bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+       bool use_chanctx;
 
        if (info->attrs[HWSIM_ATTR_CHANNELS])
                chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
 
+       if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
+               use_chanctx = true;
+       else
+               use_chanctx = (chans > 1);
+
        if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
                alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
 
@@ -2422,7 +2450,8 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
                regd = hwsim_world_regdom_custom[idx];
        }
 
-       return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+       return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
+                                          p2p_device, use_chanctx);
 }
 
 static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -2640,7 +2669,9 @@ static int __init init_mac80211_hwsim(void)
                }
 
                err = mac80211_hwsim_create_radio(channels, reg_alpha2,
-                                                 regd, reg_strict);
+                                                 regd, reg_strict,
+                                                 support_p2p_device,
+                                                 channels > 1);
                if (err < 0)
                        goto out_free_radios;
        }
index 2747cce5a269e46d42d1b3f6f819b59753b8aa8b..c9d0315575bab27035378378d396a46db828bf4c 100644 (file)
@@ -107,6 +107,10 @@ enum {
  *     (nla string, length 2)
  * @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
  * @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
+ * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag)
+ * @HWSIM_ATTR_USE_CHANCTX: used with the %HWSIM_CMD_CREATE_RADIO
+ *     command to force use of channel contexts even when only a
+ *     single channel is supported
  * @__HWSIM_ATTR_MAX: enum limit
  */
 
@@ -126,6 +130,8 @@ enum {
        HWSIM_ATTR_REG_HINT_ALPHA2,
        HWSIM_ATTR_REG_CUSTOM_REG,
        HWSIM_ATTR_REG_STRICT_REG,
+       HWSIM_ATTR_SUPPORT_P2P_DEVICE,
+       HWSIM_ATTR_USE_CHANCTX,
        __HWSIM_ATTR_MAX,
 };
 #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
index 5d9a8084665d5176fc3b54afd62c1b821c7278df..c92f27aa71ede1f049c101a0ba185f31d982aaa3 100644 (file)
 #include "main.h"
 #include "11ac.h"
 
+/* Tables of the MCS map to the highest data rate (in Mbps) supported
+ * for long GI.
+ */
+static const u16 max_rate_lgi_80MHZ[8][3] = {
+       {0x124, 0x15F, 0x186},  /* NSS = 1 */
+       {0x249, 0x2BE, 0x30C},  /* NSS = 2 */
+       {0x36D, 0x41D, 0x492},  /* NSS = 3 */
+       {0x492, 0x57C, 0x618},  /* NSS = 4 */
+       {0x5B6, 0x6DB, 0x79E},  /* NSS = 5 */
+       {0x6DB, 0x83A, 0x0},    /* NSS = 6 */
+       {0x7FF, 0x999, 0xAAA},  /* NSS = 7 */
+       {0x924, 0xAF8, 0xC30}   /* NSS = 8 */
+};
+
+static const u16 max_rate_lgi_160MHZ[8][3] = {
+       {0x249, 0x2BE, 0x30C},   /* NSS = 1 */
+       {0x492, 0x57C, 0x618},   /* NSS = 2 */
+       {0x6DB, 0x83A, 0x0},     /* NSS = 3 */
+       {0x924, 0xAF8, 0xC30},   /* NSS = 4 */
+       {0xB6D, 0xDB6, 0xF3C},   /* NSS = 5 */
+       {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
+       {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
+       {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
+};
+
 /* This function converts the 2-bit MCS map to the highest long GI
  * VHT data rate.
  */
@@ -30,33 +55,10 @@ static u16
 mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
                                  u8 bands, u16 mcs_map)
 {
-       u8 i, nss, max_mcs;
+       u8 i, nss, mcs;
        u16 max_rate = 0;
        u32 usr_vht_cap_info = 0;
        struct mwifiex_adapter *adapter = priv->adapter;
-       /* tables of the MCS map to the highest data rate (in Mbps)
-        * supported for long GI
-        */
-       u16 max_rate_lgi_80MHZ[8][3] = {
-               {0x124, 0x15F, 0x186},  /* NSS = 1 */
-               {0x249, 0x2BE, 0x30C},  /* NSS = 2 */
-               {0x36D, 0x41D, 0x492},  /* NSS = 3 */
-               {0x492, 0x57C, 0x618},  /* NSS = 4 */
-               {0x5B6, 0x6DB, 0x79E},  /* NSS = 5 */
-               {0x6DB, 0x83A, 0x0},    /* NSS = 6 */
-               {0x7FF, 0x999, 0xAAA},  /* NSS = 7 */
-               {0x924, 0xAF8, 0xC30}   /* NSS = 8 */
-       };
-       u16 max_rate_lgi_160MHZ[8][3] = {
-               {0x249, 0x2BE, 0x30C},   /* NSS = 1 */
-               {0x492, 0x57C, 0x618},   /* NSS = 2 */
-               {0x6DB, 0x83A, 0x0},     /* NSS = 3 */
-               {0x924, 0xAF8, 0xC30},   /* NSS = 4 */
-               {0xB6D, 0xDB6, 0xF3C},   /* NSS = 5 */
-               {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
-               {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
-               {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
-       };
 
        if (bands & BAND_AAC)
                usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
@@ -64,29 +66,29 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
                usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
 
        /* find the max NSS supported */
-       nss = 0;
-       for (i = 0; i < 8; i++) {
-               max_mcs = (mcs_map >> (2 * i)) & 0x3;
-               if (max_mcs < 3)
+       nss = 1;
+       for (i = 1; i <= 8; i++) {
+               mcs = GET_VHTNSSMCS(mcs_map, i);
+               if (mcs < IEEE80211_VHT_MCS_NOT_SUPPORTED)
                        nss = i;
        }
-       max_mcs = (mcs_map >> (2 * nss)) & 0x3;
+       mcs = GET_VHTNSSMCS(mcs_map, nss);
 
-       /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */
-       if (max_mcs >= 3)
-               max_mcs = 2;
+       /* if mcs is 3, nss must be 1 (NSS = 1). Default mcs to MCS 0~9 */
+       if (mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+               mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
 
        if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) {
                /* support 160 MHz */
-               max_rate = max_rate_lgi_160MHZ[nss][max_mcs];
+               max_rate = max_rate_lgi_160MHZ[nss - 1][mcs];
                if (!max_rate)
                        /* MCS9 is not supported in NSS6 */
-                       max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1];
+                       max_rate = max_rate_lgi_160MHZ[nss - 1][mcs - 1];
        } else {
-               max_rate = max_rate_lgi_80MHZ[nss][max_mcs];
+               max_rate = max_rate_lgi_80MHZ[nss - 1][mcs];
                if (!max_rate)
                        /* MCS9 is not supported in NSS3 */
-                       max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1];
+                       max_rate = max_rate_lgi_80MHZ[nss - 1][mcs - 1];
        }
 
        return max_rate;
@@ -94,21 +96,20 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
 
 static void
 mwifiex_fill_vht_cap_info(struct mwifiex_private *priv,
-                         struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+                         struct ieee80211_vht_cap *vht_cap, u8 bands)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
 
        if (bands & BAND_A)
-               vht_cap->vht_cap.vht_cap_info =
+               vht_cap->vht_cap_info =
                                cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a);
        else
-               vht_cap->vht_cap.vht_cap_info =
+               vht_cap->vht_cap_info =
                                cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg);
 }
 
-static void
-mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
-                        struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+                             struct ieee80211_vht_cap *vht_cap, u8 bands)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
        u16 mcs_map_user, mcs_map_resp, mcs_map_result;
@@ -119,46 +120,48 @@ mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
 
        /* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */
        mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
-       mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map);
+       mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
        mcs_map_result = 0;
 
        for (nss = 1; nss <= 8; nss++) {
                mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
                mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
 
-               if ((mcs_user == NO_NSS_SUPPORT) ||
-                   (mcs_resp == NO_NSS_SUPPORT))
-                       SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+               if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+                   (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+                       SET_VHTNSSMCS(mcs_map_result, nss,
+                                     IEEE80211_VHT_MCS_NOT_SUPPORTED);
                else
                        SET_VHTNSSMCS(mcs_map_result, nss,
                                      min(mcs_user, mcs_resp));
        }
 
-       vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
+       vht_cap->supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
 
        tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
-       vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp);
+       vht_cap->supp_mcs.rx_highest = cpu_to_le16(tmp);
 
        /* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */
        mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support);
-       mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map);
+       mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
        mcs_map_result = 0;
 
        for (nss = 1; nss <= 8; nss++) {
                mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
                mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
-               if ((mcs_user == NO_NSS_SUPPORT) ||
-                   (mcs_resp == NO_NSS_SUPPORT))
-                       SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+               if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+                   (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+                       SET_VHTNSSMCS(mcs_map_result, nss,
+                                     IEEE80211_VHT_MCS_NOT_SUPPORTED);
                else
                        SET_VHTNSSMCS(mcs_map_result, nss,
                                      min(mcs_user, mcs_resp));
        }
 
-       vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
+       vht_cap->supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
 
        tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
-       vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp);
+       vht_cap->supp_mcs.tx_highest = cpu_to_le16(tmp);
 
        return;
 }
@@ -192,7 +195,8 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
                       (u8 *)bss_desc->bcn_vht_cap,
                       le16_to_cpu(vht_cap->header.len));
 
-               mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+               mwifiex_fill_vht_cap_tlv(priv, &vht_cap->vht_cap,
+                                        bss_desc->bss_band);
                *buffer += sizeof(*vht_cap);
                ret_len += sizeof(*vht_cap);
        }
@@ -299,3 +303,81 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv)
 
        return;
 }
+
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv)
+{
+       struct mwifiex_bssdescriptor *bss_desc;
+       struct ieee80211_vht_operation *vht_oper;
+
+       bss_desc = &priv->curr_bss_params.bss_descriptor;
+       vht_oper = bss_desc->bcn_vht_oper;
+
+       if (!bss_desc->bcn_vht_cap || !vht_oper)
+               return false;
+
+       if (vht_oper->chan_width == IEEE80211_VHT_CHANWIDTH_USE_HT)
+               return false;
+
+       return true;
+}
+
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+                                u32 pri_chan, u8 chan_bw)
+{
+       u8 center_freq_idx = 0;
+
+       if (band & BAND_AAC) {
+               switch (pri_chan) {
+               case 36:
+               case 40:
+               case 44:
+               case 48:
+                       if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+                               center_freq_idx = 42;
+                       break;
+               case 52:
+               case 56:
+               case 60:
+               case 64:
+                       if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+                               center_freq_idx = 58;
+                       else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+                               center_freq_idx = 50;
+                       break;
+               case 100:
+               case 104:
+               case 108:
+               case 112:
+                       if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+                               center_freq_idx = 106;
+                       break;
+               case 116:
+               case 120:
+               case 124:
+               case 128:
+                       if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+                               center_freq_idx = 122;
+                       else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+                               center_freq_idx = 114;
+                       break;
+               case 132:
+               case 136:
+               case 140:
+               case 144:
+                       if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+                               center_freq_idx = 138;
+                       break;
+               case 149:
+               case 153:
+               case 157:
+               case 161:
+                       if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+                               center_freq_idx = 155;
+                       break;
+               default:
+                       center_freq_idx = 42;
+               }
+       }
+
+       return center_freq_idx;
+}
index 7c2c69b5b3eb47e3af4505516a9b2b9dc7355f2f..0b02cb6cfcb4d25ea496b21c2f737d94a0017e0c 100644 (file)
@@ -40,4 +40,6 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
 int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
                         struct host_cmd_ds_command *cmd, u16 cmd_action,
                         struct mwifiex_11ac_vht_cfg *cfg);
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+                             struct ieee80211_vht_cap *vht_cap, u8 bands);
 #endif /* _MWIFIEX_11AC_H_ */
index 8d683070bdb30df702bd1384281e17f639e755da..e76b0db4e3e6392236489affbfcba48b98468348 100644 (file)
@@ -73,8 +73,8 @@ static int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag)
 {
        u32 enable = flag;
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                                    HostCmd_ACT_GEN_SET, DOT11H_I, &enable);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                               HostCmd_ACT_GEN_SET, DOT11H_I, &enable, true);
 }
 
 /* This functions processes TLV buffer for a pending BSS Join command.
index 7db1a89fdd9559fda27bf19a17c114e2b4643d56..d14ead8beca860dba6c984d26df095b104bb1375 100644 (file)
  *
  * RD responder bit to set to clear in the extended capability header.
  */
-void
-mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
-                     struct mwifiex_ie_types_htcap *ht_cap)
+int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
+                         struct ieee80211_ht_cap *ht_cap)
 {
-       uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info);
+       uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
        struct ieee80211_supported_band *sband =
                                        priv->wdev->wiphy->bands[radio_type];
 
-       ht_cap->ht_cap.ampdu_params_info =
+       if (WARN_ON_ONCE(!sband)) {
+               dev_err(priv->adapter->dev, "Invalid radio type!\n");
+               return -EINVAL;
+       }
+
+       ht_cap->ampdu_params_info =
                (sband->ht_cap.ampdu_factor &
                 IEEE80211_HT_AMPDU_PARM_FACTOR) |
                ((sband->ht_cap.ampdu_density <<
                 IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
                 IEEE80211_HT_AMPDU_PARM_DENSITY);
 
-       memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs,
+       memcpy((u8 *)&ht_cap->mcs, &sband->ht_cap.mcs,
               sizeof(sband->ht_cap.mcs));
 
        if (priv->bss_mode == NL80211_IFTYPE_STATION ||
@@ -57,13 +61,18 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
             (priv->adapter->sec_chan_offset !=
                                        IEEE80211_HT_PARAM_CHA_SEC_NONE)))
                /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
-               SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
+               SETHT_MCS32(ht_cap->mcs.rx_mask);
 
        /* Clear RD responder bit */
        ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER;
 
-       ht_cap->ht_cap.cap_info = cpu_to_le16(sband->ht_cap.cap);
-       ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+       ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
+       ht_cap->extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+
+       if (ISSUPP_BEAMFORMING(priv->adapter->hw_dot_11n_dev_cap))
+               ht_cap->tx_BF_cap_info = cpu_to_le32(MWIFIEX_DEF_11N_TX_BF_CAP);
+
+       return 0;
 }
 
 /*
@@ -150,28 +159,34 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
        int tid;
        struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
        struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
+       u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
 
        add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
                        & SSN_MASK);
 
-       tid = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
-               & IEEE80211_ADDBA_PARAM_TID_MASK)
-               >> BLOCKACKPARAM_TID_POS;
-       if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
-               tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid,
-                                               add_ba_rsp->peer_mac_addr);
-               if (tx_ba_tbl) {
-                       dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
-                       tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
-               } else {
-                       dev_err(priv->adapter->dev, "BA stream not created\n");
-               }
-       } else {
+       tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
+              >> BLOCKACKPARAM_TID_POS;
+       if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
                mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr,
                                   TYPE_DELBA_SENT, true);
                if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT)
                        priv->aggr_prio_tbl[tid].ampdu_ap =
                                BA_STREAM_NOT_ALLOWED;
+               return 0;
+       }
+
+       tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
+       if (tx_ba_tbl) {
+               dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
+               tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
+               if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
+                   priv->add_ba_param.tx_amsdu &&
+                   (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+                       tx_ba_tbl->amsdu = true;
+               else
+                       tx_ba_tbl->amsdu = false;
+       } else {
+               dev_err(priv->adapter->dev, "BA stream not created\n");
        }
 
        return 0;
@@ -311,7 +326,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
                       (u8 *)bss_desc->bcn_ht_cap,
                       le16_to_cpu(ht_cap->header.len));
 
-               mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+               mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
 
                *buffer += sizeof(struct mwifiex_ie_types_htcap);
                ret_len += sizeof(struct mwifiex_ie_types_htcap);
@@ -527,16 +542,39 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
 int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
 {
        struct host_cmd_ds_11n_addba_req add_ba_req;
+       struct mwifiex_sta_node *sta_ptr;
+       u32 tx_win_size = priv->add_ba_param.tx_win_size;
        static u8 dialog_tok;
        int ret;
+       u16 block_ack_param_set;
 
        dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
 
-       add_ba_req.block_ack_param_set = cpu_to_le16(
-               (u16) ((tid << BLOCKACKPARAM_TID_POS) |
-                        (priv->add_ba_param.
-                         tx_win_size << BLOCKACKPARAM_WINSIZE_POS) |
-                        IMMEDIATE_BLOCK_ACK));
+       if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+           ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+           priv->adapter->is_hw_11ac_capable &&
+           memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
+               sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
+               if (!sta_ptr) {
+                       dev_warn(priv->adapter->dev,
+                                "BA setup with unknown TDLS peer %pM!\n",
+                               peer_mac);
+                       return -1;
+               }
+               if (sta_ptr->is_11ac_enabled)
+                       tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
+       }
+
+       block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
+                                   tx_win_size << BLOCKACKPARAM_WINSIZE_POS |
+                                   IMMEDIATE_BLOCK_ACK);
+
+       /* enable AMSDU inside AMPDU */
+       if (priv->add_ba_param.tx_amsdu &&
+           (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+               block_ack_param_set |= BLOCKACKPARAM_AMSDU_SUPP_MASK;
+
+       add_ba_req.block_ack_param_set = cpu_to_le16(block_ack_param_set);
        add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
 
        ++dialog_tok;
@@ -548,8 +586,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
        memcpy(&add_ba_req.peer_mac_addr, peer_mac, ETH_ALEN);
 
        /* We don't wait for the response of this command */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_REQ,
-                                    0, 0, &add_ba_req);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_REQ,
+                              0, 0, &add_ba_req, false);
 
        return ret;
 }
@@ -576,8 +614,8 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
        memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN);
 
        /* We don't wait for the response of this command */
-       ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA,
-                                    HostCmd_ACT_GEN_SET, 0, &delba);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA,
+                              HostCmd_ACT_GEN_SET, 0, &delba, false);
 
        return ret;
 }
@@ -651,6 +689,7 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
                dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
                        __func__, rx_reo_tbl->tid);
                memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
+               rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
                rx_reo_tbl++;
                count++;
                if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
@@ -706,5 +745,8 @@ void mwifiex_set_ba_params(struct mwifiex_private *priv)
                                                MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
        }
 
+       priv->add_ba_param.tx_amsdu = true;
+       priv->add_ba_param.rx_amsdu = true;
+
        return;
 }
index 375db01442bfd561e08a46a0d47572f5a1d149cd..40b007a00f4bd9e786c24f8f53059c38e29e79a4 100644 (file)
@@ -34,8 +34,8 @@ int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
 int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
                               struct mwifiex_bssdescriptor *bss_desc,
                               u8 **buffer);
-void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
-                          struct mwifiex_ie_types_htcap *);
+int mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
+                         struct ieee80211_ht_cap *);
 int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
                                  u16 action, int *htcap_cfg);
 void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
@@ -64,14 +64,46 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
                                struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
 void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
 
-/*
- * This function checks whether AMPDU is allowed or not for a particular TID.
- */
 static inline u8
-mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid)
+mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
+                                struct mwifiex_ra_list_tbl *ptr, int tid)
 {
-       return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED)
-               ? true : false);
+       struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ptr->ra);
+
+       if (unlikely(!node))
+               return false;
+
+       return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
+}
+
+/* This function checks whether AMSDU is allowed for BA stream. */
+static inline u8
+mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv,
+                                 struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+       struct mwifiex_tx_ba_stream_tbl *tx_tbl;
+
+       tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra);
+       if (tx_tbl)
+               return tx_tbl->amsdu;
+
+       return false;
+}
+
+/* This function checks whether AMPDU is allowed or not for a particular TID. */
+static inline u8
+mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
+                        struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+       if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+               return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+       } else {
+               if (ptr->tdls_link)
+                       return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+
+               return (priv->aggr_prio_tbl[tid].ampdu_ap !=
+                       BA_STREAM_NOT_ALLOWED) ? true : false;
+       }
 }
 
 /*
@@ -165,4 +197,14 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
 
        return node->is_11n_enabled;
 }
+
+static inline u8
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+{
+       struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
+       if (node)
+               return node->is_11n_enabled;
+
+       return false;
+}
 #endif /* !_MWIFIEX_11N_H_ */
index ada809f576fe56c290a9150728b5b08e358c2d53..0c3571f830b0d70cc609e64d9b0b6fde3cf17209 100644 (file)
 #include "11n.h"
 #include "11n_rxreorder.h"
 
+/* This function will dispatch amsdu packet and forward it to kernel/upper
+ * layer.
+ */
+static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
+                                         struct sk_buff *skb)
+{
+       struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
+       int ret;
+
+       if (le16_to_cpu(local_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
+               struct sk_buff_head list;
+               struct sk_buff *rx_skb;
+
+               __skb_queue_head_init(&list);
+
+               skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
+               skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
+
+               ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
+                                        priv->wdev->iftype, 0, false);
+
+               while (!skb_queue_empty(&list)) {
+                       rx_skb = __skb_dequeue(&list);
+                       ret = mwifiex_recv_packet(priv, rx_skb);
+                       if (ret == -1)
+                               dev_err(priv->adapter->dev,
+                                       "Rx of A-MSDU failed");
+               }
+               return 0;
+       }
+
+       return -1;
+}
+
+/* This function will process the rx packet and forward it to kernel/upper
+ * layer.
+ */
+static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
+{
+       int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
+
+       if (!ret)
+               return 0;
+
+       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+               return mwifiex_handle_uap_rx_forward(priv, payload);
+
+       return mwifiex_process_rx_packet(priv, payload);
+}
+
 /*
  * This function dispatches all packets in the Rx reorder table until the
  * start window.
@@ -35,8 +85,9 @@
  * circular buffer.
  */
 static void
-mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
-                        struct mwifiex_rx_reorder_tbl *tbl, int start_win)
+mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
+                                        struct mwifiex_rx_reorder_tbl *tbl,
+                                        int start_win)
 {
        int pkt_to_send, i;
        void *rx_tmp_ptr;
@@ -54,12 +105,8 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
                        tbl->rx_reorder_ptr[i] = NULL;
                }
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-               if (rx_tmp_ptr) {
-                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
-                               mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
-                       else
-                               mwifiex_process_rx_packet(priv, rx_tmp_ptr);
-               }
+               if (rx_tmp_ptr)
+                       mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -101,11 +148,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
                rx_tmp_ptr = tbl->rx_reorder_ptr[i];
                tbl->rx_reorder_ptr[i] = NULL;
                spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
-               if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
-                       mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
-               else
-                       mwifiex_process_rx_packet(priv, rx_tmp_ptr);
+               mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
        }
 
        spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -135,14 +178,15 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
                             struct mwifiex_rx_reorder_tbl *tbl)
 {
        unsigned long flags;
+       int start_win;
 
        if (!tbl)
                return;
 
-       mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) &
-                                           (MAX_TID_VALUE - 1));
+       start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
+       mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
 
-       del_timer(&tbl->timer_context.timer);
+       del_timer_sync(&tbl->timer_context.timer);
 
        spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
        list_del(&tbl->list);
@@ -228,17 +272,17 @@ mwifiex_flush_data(unsigned long context)
 {
        struct reorder_tmr_cnxt *ctx =
                (struct reorder_tmr_cnxt *) context;
-       int start_win;
+       int start_win, seq_num;
 
-       start_win = mwifiex_11n_find_last_seq_num(ctx->ptr);
+       seq_num = mwifiex_11n_find_last_seq_num(ctx->ptr);
 
-       if (start_win < 0)
+       if (seq_num < 0)
                return;
 
-       dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win);
-       mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr,
-                                (ctx->ptr->start_win + start_win + 1) &
-                                (MAX_TID_VALUE - 1));
+       dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
+       start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
+       mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
+                                                start_win);
 }
 
 /*
@@ -267,7 +311,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
         */
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (tbl) {
-               mwifiex_11n_dispatch_pkt(priv, tbl, seq_num);
+               mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
                return;
        }
        /* if !tbl then create one */
@@ -279,6 +323,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        new_node->tid = tid;
        memcpy(new_node->ta, ta, ETH_ALEN);
        new_node->start_win = seq_num;
+       new_node->init_win = seq_num;
+       new_node->flags = 0;
 
        if (mwifiex_queuing_ra_based(priv)) {
                dev_dbg(priv->adapter->dev,
@@ -290,15 +336,20 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
                                last_seq = node->rx_seq[tid];
                }
        } else {
-               last_seq = priv->rx_seq[tid];
+               node = mwifiex_get_sta_entry(priv, ta);
+               if (node)
+                       last_seq = node->rx_seq[tid];
+               else
+                       last_seq = priv->rx_seq[tid];
        }
 
        if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
-           last_seq >= new_node->start_win)
+           last_seq >= new_node->start_win) {
                new_node->start_win = last_seq + 1;
+               new_node->flags |= RXREOR_INIT_WINDOW_SHIFT;
+       }
 
        new_node->win_size = win_size;
-       new_node->flags = 0;
 
        new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
                                        GFP_KERNEL);
@@ -358,10 +409,28 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
                                  *cmd_addba_req)
 {
        struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
+       struct mwifiex_sta_node *sta_ptr;
+       u32 rx_win_size = priv->add_ba_param.rx_win_size;
        u8 tid;
        int win_size;
        uint16_t block_ack_param_set;
 
+       if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+           ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+           priv->adapter->is_hw_11ac_capable &&
+           memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
+               sta_ptr = mwifiex_get_sta_entry(priv,
+                                               cmd_addba_req->peer_mac_addr);
+               if (!sta_ptr) {
+                       dev_warn(priv->adapter->dev,
+                                "BA setup with unknown TDLS peer %pM!\n",
+                                cmd_addba_req->peer_mac_addr);
+                       return -1;
+               }
+               if (sta_ptr->is_11ac_enabled)
+                       rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
+       }
+
        cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
        cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
 
@@ -376,10 +445,12 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
                >> BLOCKACKPARAM_TID_POS;
        add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
        block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
-       /* We donot support AMSDU inside AMPDU, hence reset the bit */
-       block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
-       block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
-                                            BLOCKACKPARAM_WINSIZE_POS);
+
+       /* If we don't support AMSDU inside AMPDU, reset the bit */
+       if (!priv->add_ba_param.rx_amsdu ||
+           (priv->aggr_prio_tbl[tid].amsdu == BA_STREAM_NOT_ALLOWED))
+               block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
+       block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
        add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
        win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
                                        & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
@@ -431,33 +502,46 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
        struct mwifiex_rx_reorder_tbl *tbl;
        int start_win, end_win, win_size;
        u16 pkt_index;
+       bool init_window_shift = false;
 
        tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
        if (!tbl) {
-               if (pkt_type != PKT_TYPE_BAR) {
-                       if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
-                               mwifiex_handle_uap_rx_forward(priv, payload);
-                       else
-                               mwifiex_process_rx_packet(priv, payload);
-               }
+               if (pkt_type != PKT_TYPE_BAR)
+                       mwifiex_11n_dispatch_pkt(priv, payload);
                return 0;
        }
+
+       if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
+               mwifiex_11n_dispatch_pkt(priv, payload);
+               return 0;
+       }
+
        start_win = tbl->start_win;
        win_size = tbl->win_size;
        end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
-       del_timer(&tbl->timer_context.timer);
+       if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
+               init_window_shift = true;
+               tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
+       }
        mod_timer(&tbl->timer_context.timer,
                  jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
 
-       /*
-        * If seq_num is less then starting win then ignore and drop the
-        * packet
-        */
        if (tbl->flags & RXREOR_FORCE_NO_DROP) {
                dev_dbg(priv->adapter->dev,
                        "RXREOR_FORCE_NO_DROP when HS is activated\n");
                tbl->flags &= ~RXREOR_FORCE_NO_DROP;
+       } else if (init_window_shift && seq_num < start_win &&
+                  seq_num >= tbl->init_win) {
+               dev_dbg(priv->adapter->dev,
+                       "Sender TID sequence number reset %d->%d for SSN %d\n",
+                       start_win, seq_num, tbl->init_win);
+               tbl->start_win = start_win = seq_num;
+               end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
        } else {
+               /*
+                * If seq_num is less then starting win then ignore and drop
+                * the packet
+                */
                if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
                        if (seq_num >= ((start_win + TWOPOW11) &
                                        (MAX_TID_VALUE - 1)) &&
@@ -485,7 +569,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
                        start_win = (end_win - win_size) + 1;
                else
                        start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
-               mwifiex_11n_dispatch_pkt(priv, tbl, start_win);
+               mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
        }
 
        if (pkt_type != PKT_TYPE_BAR) {
@@ -576,16 +660,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
         * Check if we had rejected the ADDBA, if yes then do not create
         * the stream
         */
-       if (le16_to_cpu(add_ba_rsp->status_code) == BA_RESULT_SUCCESS) {
-               win_size = (block_ack_param_set &
-                       IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
-                       >> BLOCKACKPARAM_WINSIZE_POS;
-
-               dev_dbg(priv->adapter->dev,
-                       "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
-                       add_ba_rsp->peer_mac_addr, tid,
-                       add_ba_rsp->ssn, win_size);
-       } else {
+       if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
                dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
                        add_ba_rsp->peer_mac_addr, tid);
 
@@ -593,8 +668,28 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
                                                     add_ba_rsp->peer_mac_addr);
                if (tbl)
                        mwifiex_del_rx_reorder_entry(priv, tbl);
+
+               return 0;
        }
 
+       win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
+                   >> BLOCKACKPARAM_WINSIZE_POS;
+
+       tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
+                                            add_ba_rsp->peer_mac_addr);
+       if (tbl) {
+               if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
+                   priv->add_ba_param.rx_amsdu &&
+                   (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
+                       tbl->amsdu = true;
+               else
+                       tbl->amsdu = false;
+       }
+
+       dev_dbg(priv->adapter->dev,
+               "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+               add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
+
        return 0;
 }
 
@@ -615,7 +710,7 @@ void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
        delba.del_ba_param_set |= cpu_to_le16(
                (u16) event->origninator << DELBA_INITIATOR_POS);
        delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
-       mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba);
+       mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba, false);
 }
 
 /*
index 4064041ac852737602b8970eff8c40f4070c9117..0fc76e4a60f886c32d3e46cf885cbcc809ee9893 100644 (file)
@@ -42,7 +42,8 @@
 #define BA_SETUP_PACKET_OFFSET         16
 
 enum mwifiex_rxreor_flags {
-       RXREOR_FORCE_NO_DROP    = 1<<0,
+       RXREOR_FORCE_NO_DROP            = 1<<0,
+       RXREOR_INIT_WINDOW_SHIFT        = 1<<1,
 };
 
 static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
index a42a506fd32b6b0a211607c3ae0e224f6e6c8106..2aa208ffbe233eefc06f9cdb24c64db9c56f3188 100644 (file)
@@ -41,6 +41,7 @@ mwifiex-y += uap_txrx.o
 mwifiex-y += cfg80211.o
 mwifiex-y += ethtool.o
 mwifiex-y += 11h.o
+mwifiex-y += tdls.o
 mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
 obj-$(CONFIG_MWIFIEX) += mwifiex.o
 
index 3d64613ebb2979426d9fd1652b9fc17325cf8676..b9242c3dca435ee9a4d5123fd57ad0733a96a24d 100644 (file)
@@ -131,7 +131,7 @@ info
        hs_configured = <0/1, host sleep not configured/configured>
        hs_activated = <0/1, extended host sleep not activated/activated>
        num_tx_timeout = <number of Tx timeout>
-       num_cmd_timeout = <number of timeout commands>
+       is_cmd_timedout = <0/1 command timeout not occurred/occurred>
        timeout_cmd_id = <command id of the last timeout command>
        timeout_cmd_act = <command action of the last timeout command>
        last_cmd_id = <command id of the last several commands sent to device>
index 8bfc07cd330e744f8fb68065b551181aa5857688..21ee27ab7b745261f9a398bcaacc92b24ce7c8f7 100644 (file)
@@ -252,9 +252,9 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
 
        if (mask != priv->mgmt_frame_mask) {
                priv->mgmt_frame_mask = mask;
-               mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
-                                      HostCmd_ACT_GEN_SET, 0,
-                                      &priv->mgmt_frame_mask);
+               mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
+                                HostCmd_ACT_GEN_SET, 0,
+                                &priv->mgmt_frame_mask, false);
                wiphy_dbg(wiphy, "info: mgmt frame registered\n");
        }
 }
@@ -515,8 +515,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
 
        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 
-       if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
-                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+                            HostCmd_ACT_GEN_SET, 0, NULL, false)) {
                wiphy_err(wiphy, "11D: setting domain info in FW\n");
                return -1;
        }
@@ -580,9 +580,9 @@ mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr)
            frag_thr > MWIFIEX_FRAG_MAX_VALUE)
                frag_thr = MWIFIEX_FRAG_MAX_VALUE;
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                                    HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
-                                    &frag_thr);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                               HostCmd_ACT_GEN_SET, FRAG_THRESH_I,
+                               &frag_thr, true);
 }
 
 /*
@@ -597,9 +597,9 @@ mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr)
        if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE)
                rts_thr = MWIFIEX_RTS_MAX_VALUE;
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                                   HostCmd_ACT_GEN_SET, RTS_THRESH_I,
-                                   &rts_thr);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                               HostCmd_ACT_GEN_SET, RTS_THRESH_I,
+                               &rts_thr, true);
 }
 
 /*
@@ -637,20 +637,19 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
 
                        bss_started = priv->bss_started;
 
-                       ret = mwifiex_send_cmd_sync(priv,
-                                                   HostCmd_CMD_UAP_BSS_STOP,
-                                                   HostCmd_ACT_GEN_SET, 0,
-                                                   NULL);
+                       ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              NULL, true);
                        if (ret) {
                                wiphy_err(wiphy, "Failed to stop the BSS\n");
                                kfree(bss_cfg);
                                return ret;
                        }
 
-                       ret = mwifiex_send_cmd_async(priv,
-                                                    HostCmd_CMD_UAP_SYS_CONFIG,
-                                                    HostCmd_ACT_GEN_SET,
-                                                    UAP_BSS_PARAMS_I, bss_cfg);
+                       ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+                                              HostCmd_ACT_GEN_SET,
+                                              UAP_BSS_PARAMS_I, bss_cfg,
+                                              false);
 
                        kfree(bss_cfg);
 
@@ -662,10 +661,9 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
                        if (!bss_started)
                                break;
 
-                       ret = mwifiex_send_cmd_async(priv,
-                                                    HostCmd_CMD_UAP_BSS_START,
-                                                    HostCmd_ACT_GEN_SET, 0,
-                                                    NULL);
+                       ret = mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              NULL, false);
                        if (ret) {
                                wiphy_err(wiphy, "Failed to start BSS\n");
                                return ret;
@@ -700,8 +698,8 @@ mwifiex_cfg80211_deinit_p2p(struct mwifiex_private *priv)
        if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA)
                mwifiex_set_bss_role(priv, MWIFIEX_BSS_ROLE_STA);
 
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
-                                 HostCmd_ACT_GEN_SET, 0, &mode))
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+                            HostCmd_ACT_GEN_SET, 0, &mode, true))
                return -1;
 
        return 0;
@@ -721,13 +719,13 @@ mwifiex_cfg80211_init_p2p_client(struct mwifiex_private *priv)
                return -1;
 
        mode = P2P_MODE_DEVICE;
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
-                                 HostCmd_ACT_GEN_SET, 0, &mode))
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+                            HostCmd_ACT_GEN_SET, 0, &mode, true))
                return -1;
 
        mode = P2P_MODE_CLIENT;
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
-                                 HostCmd_ACT_GEN_SET, 0, &mode))
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+                            HostCmd_ACT_GEN_SET, 0, &mode, true))
                return -1;
 
        return 0;
@@ -747,13 +745,13 @@ mwifiex_cfg80211_init_p2p_go(struct mwifiex_private *priv)
                return -1;
 
        mode = P2P_MODE_DEVICE;
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
-                                 HostCmd_ACT_GEN_SET, 0, &mode))
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+                            HostCmd_ACT_GEN_SET, 0, &mode, true))
                return -1;
 
        mode = P2P_MODE_GO;
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_P2P_MODE_CFG,
-                                 HostCmd_ACT_GEN_SET, 0, &mode))
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_P2P_MODE_CFG,
+                            HostCmd_ACT_GEN_SET, 0, &mode, true))
                return -1;
 
        if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP)
@@ -853,8 +851,8 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
 
        priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM;
 
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
-                                   HostCmd_ACT_GEN_SET, 0, NULL);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+                              HostCmd_ACT_GEN_SET, 0, NULL, true);
 
        return ret;
 }
@@ -942,8 +940,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
                        STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
 
        /* Get signal information from the firmware */
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
-                                 HostCmd_ACT_GEN_GET, 0, NULL)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+                            HostCmd_ACT_GEN_GET, 0, NULL, true)) {
                dev_err(priv->adapter->dev, "failed to get signal information\n");
                return -EFAULT;
        }
@@ -954,9 +952,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
        }
 
        /* Get DTIM period information from firmware */
-       mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                             HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
-                             &priv->dtim_period);
+       mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                        HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
+                        &priv->dtim_period, true);
 
        mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate);
 
@@ -1160,9 +1158,10 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
        u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
        enum ieee80211_band band;
+       struct mwifiex_adapter *adapter = priv->adapter;
 
        if (!priv->media_connected) {
-               dev_err(priv->adapter->dev,
+               dev_err(adapter->dev,
                        "Can not set Tx data rate in disconnected state\n");
                return -EINVAL;
        }
@@ -1183,11 +1182,18 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
 
        /* Fill HT MCS rates */
        bitmap_rates[2] = mask->control[band].ht_mcs[0];
-       if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+       if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
                bitmap_rates[2] |= mask->control[band].ht_mcs[1] << 8;
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
-                                    HostCmd_ACT_GEN_SET, 0, bitmap_rates);
+       /* Fill VHT MCS rates */
+       if (adapter->fw_api_ver == MWIFIEX_FW_V15) {
+               bitmap_rates[10] = mask->control[band].vht_mcs[0];
+               if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+                       bitmap_rates[11] = mask->control[band].vht_mcs[1];
+       }
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
+                               HostCmd_ACT_GEN_SET, 0, bitmap_rates, true);
 }
 
 /*
@@ -1216,14 +1222,14 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
                subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold);
                subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
                subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
-               return mwifiex_send_cmd_sync(priv,
-                                            HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
-                                            0, 0, &subsc_evt);
+               return mwifiex_send_cmd(priv,
+                                       HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+                                       0, 0, &subsc_evt, true);
        } else {
                subsc_evt.action = HostCmd_ACT_BITWISE_CLR;
-               return mwifiex_send_cmd_sync(priv,
-                                            HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
-                                            0, 0, &subsc_evt);
+               return mwifiex_send_cmd(priv,
+                                       HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+                                       0, 0, &subsc_evt, true);
        }
 
        return 0;
@@ -1276,10 +1282,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
        if (!mac || is_broadcast_ether_addr(mac)) {
                wiphy_dbg(wiphy, "%s: NULL/broadcast mac address\n", __func__);
                list_for_each_entry(sta_node, &priv->sta_list, list) {
-                       if (mwifiex_send_cmd_sync(priv,
-                                                 HostCmd_CMD_UAP_STA_DEAUTH,
-                                                 HostCmd_ACT_GEN_SET, 0,
-                                                 sta_node->mac_addr))
+                       if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+                                            HostCmd_ACT_GEN_SET, 0,
+                                            sta_node->mac_addr, true))
                                return -1;
                        mwifiex_uap_del_sta_data(priv, sta_node);
                }
@@ -1289,10 +1294,9 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
                sta_node = mwifiex_get_sta_entry(priv, mac);
                spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
                if (sta_node) {
-                       if (mwifiex_send_cmd_sync(priv,
-                                                 HostCmd_CMD_UAP_STA_DEAUTH,
-                                                 HostCmd_ACT_GEN_SET, 0,
-                                                 sta_node->mac_addr))
+                       if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
+                                            HostCmd_ACT_GEN_SET, 0,
+                                            sta_node->mac_addr, true))
                                return -1;
                        mwifiex_uap_del_sta_data(priv, sta_node);
                }
@@ -1328,13 +1332,40 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
                        tx_ant = RF_ANTENNA_AUTO;
                        rx_ant = RF_ANTENNA_AUTO;
                }
+       } else {
+               struct ieee80211_sta_ht_cap *ht_info;
+               int rx_mcs_supp;
+               enum ieee80211_band band;
+
+               if ((tx_ant == 0x1 && rx_ant == 0x1)) {
+                       adapter->user_dev_mcs_support = HT_STREAM_1X1;
+                       if (adapter->is_hw_11ac_capable)
+                               adapter->usr_dot_11ac_mcs_support =
+                                               MWIFIEX_11AC_MCS_MAP_1X1;
+               } else {
+                       adapter->user_dev_mcs_support = HT_STREAM_2X2;
+                       if (adapter->is_hw_11ac_capable)
+                               adapter->usr_dot_11ac_mcs_support =
+                                               MWIFIEX_11AC_MCS_MAP_2X2;
+               }
+
+               for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+                       if (!adapter->wiphy->bands[band])
+                               continue;
+
+                       ht_info = &adapter->wiphy->bands[band]->ht_cap;
+                       rx_mcs_supp =
+                               GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+                       memset(&ht_info->mcs, 0, adapter->number_of_antenna);
+                       memset(&ht_info->mcs, 0xff, rx_mcs_supp);
+               }
        }
 
        ant_cfg.tx_ant = tx_ant;
        ant_cfg.rx_ant = rx_ant;
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_ANTENNA,
-                                    HostCmd_ACT_GEN_SET, 0, &ant_cfg);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_RF_ANTENNA,
+                               HostCmd_ACT_GEN_SET, 0, &ant_cfg, true);
 }
 
 /* cfg80211 operation handler for stop ap.
@@ -1349,8 +1380,8 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
 
        priv->ap_11n_enabled = 0;
 
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
-                                 HostCmd_ACT_GEN_SET, 0, NULL)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+                            HostCmd_ACT_GEN_SET, 0, NULL, true)) {
                wiphy_err(wiphy, "Failed to stop the BSS\n");
                return -1;
        }
@@ -1416,9 +1447,6 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
 
                if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
                        config_bands |= BAND_GN;
-
-               if (params->chandef.width > NL80211_CHAN_WIDTH_40)
-                       config_bands |= BAND_GAC;
        } else {
                bss_cfg->band_cfg = BAND_CONFIG_A;
                config_bands = BAND_A;
@@ -1464,16 +1492,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
                bss_cfg->ps_sta_ao_timer = 10 * params->inactivity_timeout;
        }
 
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
-                                 HostCmd_ACT_GEN_SET, 0, NULL)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+                            HostCmd_ACT_GEN_SET, 0, NULL, true)) {
                wiphy_err(wiphy, "Failed to stop the BSS\n");
                kfree(bss_cfg);
                return -1;
        }
 
-       if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
-                                  HostCmd_ACT_GEN_SET,
-                                  UAP_BSS_PARAMS_I, bss_cfg)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+                            HostCmd_ACT_GEN_SET,
+                            UAP_BSS_PARAMS_I, bss_cfg, false)) {
                wiphy_err(wiphy, "Failed to set the SSID\n");
                kfree(bss_cfg);
                return -1;
@@ -1481,8 +1509,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
 
        kfree(bss_cfg);
 
-       if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_BSS_START,
-                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
+                            HostCmd_ACT_GEN_SET, 0, NULL, false)) {
                wiphy_err(wiphy, "Failed to start the BSS\n");
                return -1;
        }
@@ -1492,9 +1520,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
        else
                priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
 
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
-                                 HostCmd_ACT_GEN_SET, 0,
-                                 &priv->curr_pkt_filter))
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+                            HostCmd_ACT_GEN_SET, 0,
+                            &priv->curr_pkt_filter, true))
                return -1;
 
        return 0;
@@ -1583,8 +1611,9 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
  * the function notifies the CFG802.11 subsystem of the new BSS connection.
  */
 static int
-mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
-                      u8 *bssid, int mode, struct ieee80211_channel *channel,
+mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
+                      const u8 *ssid, const u8 *bssid, int mode,
+                      struct ieee80211_channel *channel,
                       struct cfg80211_connect_params *sme, bool privacy)
 {
        struct cfg80211_ssid req_ssid;
@@ -1881,7 +1910,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
                                     params->privacy);
 done:
        if (!ret) {
-               cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+               cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
+                                    params->chandef.chan, GFP_KERNEL);
                dev_dbg(priv->adapter->dev,
                        "info: joined/created adhoc network with bssid"
                        " %pM successfully\n", priv->cfg_bssid);
@@ -2070,10 +2100,10 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
        else
                ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
 
-       if (ISSUPP_RXSTBC(adapter->hw_dot_11n_dev_cap))
-               ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
+       if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
+               ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
        else
-               ht_info->cap &= ~(3 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+               ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
 
        if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap))
                ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
@@ -2098,8 +2128,8 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
        ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
        ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
 
-       rx_mcs_supp = GET_RXMCSSUPP(adapter->hw_dev_mcs_support);
-       /* Set MCS for 1x1 */
+       rx_mcs_supp = GET_RXMCSSUPP(adapter->user_dev_mcs_support);
+       /* Set MCS for 1x1/2x2 */
        memset(mcs, 0xff, rx_mcs_supp);
        /* Clear all the other values */
        memset(&mcs[rx_mcs_supp], 0,
@@ -2460,9 +2490,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
                                   MWIFIEX_CRITERIA_UNICAST |
                                   MWIFIEX_CRITERIA_MULTICAST;
 
-       ret =  mwifiex_send_cmd_sync(priv, HostCmd_CMD_MEF_CFG,
-                                    HostCmd_ACT_GEN_SET, 0,
-                                    &mef_cfg);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
+                              HostCmd_ACT_GEN_SET, 0, &mef_cfg, true);
 
        kfree(mef_entry);
        return ret;
@@ -2574,9 +2603,9 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
        if (!coalesce) {
                dev_dbg(adapter->dev,
                        "Disable coalesce and reset all previous rules\n");
-               return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
-                                            HostCmd_ACT_GEN_SET, 0,
-                                            &coalesce_cfg);
+               return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
+                                       HostCmd_ACT_GEN_SET, 0,
+                                       &coalesce_cfg, true);
        }
 
        coalesce_cfg.num_of_rules = coalesce->n_rules;
@@ -2591,8 +2620,172 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
                }
        }
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
-                                    HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
+                               HostCmd_ACT_GEN_SET, 0, &coalesce_cfg, true);
+}
+
+/* cfg80211 ops handler for tdls_mgmt.
+ * Function prepares TDLS action frame packets and forwards them to FW
+ */
+static int
+mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+                          u8 *peer, u8 action_code, u8 dialog_token,
+                          u16 status_code, u32 peer_capability,
+                          const u8 *extra_ies, size_t extra_ies_len)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       int ret;
+
+       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+               return -ENOTSUPP;
+
+       /* make sure we are in station mode and connected */
+       if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+               return -ENOTSUPP;
+
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+               dev_dbg(priv->adapter->dev,
+                       "Send TDLS Setup Request to %pM status_code=%d\n", peer,
+                        status_code);
+               ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  extra_ies, extra_ies_len);
+               break;
+       case WLAN_TDLS_SETUP_RESPONSE:
+               dev_dbg(priv->adapter->dev,
+                       "Send TDLS Setup Response to %pM status_code=%d\n",
+                       peer, status_code);
+               ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  extra_ies, extra_ies_len);
+               break;
+       case WLAN_TDLS_SETUP_CONFIRM:
+               dev_dbg(priv->adapter->dev,
+                       "Send TDLS Confirm to %pM status_code=%d\n", peer,
+                       status_code);
+               ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  extra_ies, extra_ies_len);
+               break;
+       case WLAN_TDLS_TEARDOWN:
+               dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
+                       peer);
+               ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  extra_ies, extra_ies_len);
+               break;
+       case WLAN_TDLS_DISCOVERY_REQUEST:
+               dev_dbg(priv->adapter->dev,
+                       "Send TDLS Discovery Request to %pM\n", peer);
+               ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  extra_ies, extra_ies_len);
+               break;
+       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+               dev_dbg(priv->adapter->dev,
+                       "Send TDLS Discovery Response to %pM\n", peer);
+               ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  extra_ies, extra_ies_len);
+               break;
+       default:
+               dev_warn(priv->adapter->dev,
+                        "Unknown TDLS mgmt/action frame %pM\n", peer);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int
+mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+                          u8 *peer, enum nl80211_tdls_operation action)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+           !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+               return -ENOTSUPP;
+
+       /* make sure we are in station mode and connected */
+       if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+               return -ENOTSUPP;
+
+       dev_dbg(priv->adapter->dev,
+               "TDLS peer=%pM, oper=%d\n", peer, action);
+
+       switch (action) {
+       case NL80211_TDLS_ENABLE_LINK:
+               action = MWIFIEX_TDLS_ENABLE_LINK;
+               break;
+       case NL80211_TDLS_DISABLE_LINK:
+               action = MWIFIEX_TDLS_DISABLE_LINK;
+               break;
+       case NL80211_TDLS_TEARDOWN:
+               /* shouldn't happen!*/
+               dev_warn(priv->adapter->dev,
+                        "tdls_oper: teardown from driver not supported\n");
+               return -EINVAL;
+       case NL80211_TDLS_SETUP:
+               /* shouldn't happen!*/
+               dev_warn(priv->adapter->dev,
+                        "tdls_oper: setup from driver not supported\n");
+               return -EINVAL;
+       case NL80211_TDLS_DISCOVERY_REQ:
+               /* shouldn't happen!*/
+               dev_warn(priv->adapter->dev,
+                        "tdls_oper: discovery from driver not supported\n");
+               return -EINVAL;
+       default:
+               dev_err(priv->adapter->dev,
+                       "tdls_oper: operation not supported\n");
+               return -ENOTSUPP;
+       }
+
+       return mwifiex_tdls_oper(priv, peer, action);
+}
+
+static int
+mwifiex_cfg80211_add_station(struct wiphy *wiphy,
+                            struct net_device *dev,
+                            u8 *mac, struct station_parameters *params)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+               return -ENOTSUPP;
+
+       /* make sure we are in station mode and connected */
+       if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+               return -ENOTSUPP;
+
+       return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK);
+}
+
+static int
+mwifiex_cfg80211_change_station(struct wiphy *wiphy,
+                               struct net_device *dev,
+                               u8 *mac, struct station_parameters *params)
+{
+       int ret;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       /* we support change_station handler only for TDLS peers*/
+       if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+               return -ENOTSUPP;
+
+       /* make sure we are in station mode and connected */
+       if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+               return -ENOTSUPP;
+
+       priv->sta_params = params;
+
+       ret = mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CONFIG_LINK);
+       priv->sta_params = NULL;
+
+       return ret;
 }
 
 /* station cfg80211 operations */
@@ -2630,6 +2823,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .set_wakeup = mwifiex_cfg80211_set_wakeup,
 #endif
        .set_coalesce = mwifiex_cfg80211_set_coalesce,
+       .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
+       .tdls_oper = mwifiex_cfg80211_tdls_oper,
+       .add_station = mwifiex_cfg80211_add_station,
+       .change_station = mwifiex_cfg80211_change_station,
 };
 
 #ifdef CONFIG_PM
@@ -2715,6 +2912,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                        WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
                        WIPHY_FLAG_AP_UAPSD |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+       if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+               wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+                               WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
        wiphy->regulatory_flags |=
                        REGULATORY_CUSTOM_REG |
                        REGULATORY_STRICT_REG;
@@ -2736,7 +2938,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
 
        wiphy->features |= NL80211_FEATURE_HT_IBSS |
                           NL80211_FEATURE_INACTIVITY_TIMER |
-                          NL80211_FEATURE_LOW_PRIORITY_SCAN;
+                          NL80211_FEATURE_LOW_PRIORITY_SCAN |
+                          NL80211_FEATURE_NEED_OBSS_SCAN;
 
        /* Reserve space for mwifiex specific private data for BSS */
        wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -2767,17 +2970,17 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                                   country_code);
        }
 
-       mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                             HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr);
+       mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                        HostCmd_ACT_GEN_GET, FRAG_THRESH_I, &thr, true);
        wiphy->frag_threshold = thr;
-       mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                             HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr);
+       mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                        HostCmd_ACT_GEN_GET, RTS_THRESH_I, &thr, true);
        wiphy->rts_threshold = thr;
-       mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                             HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry);
+       mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                        HostCmd_ACT_GEN_GET, SHORT_RETRY_LIM_I, &retry, true);
        wiphy->retry_short = (u8) retry;
-       mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                             HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry);
+       mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                        HostCmd_ACT_GEN_GET, LONG_RETRY_LIM_I, &retry, true);
        wiphy->retry_long = (u8) retry;
 
        adapter->wiphy = wiphy;
index 9eefacbc844bfab2eae14777c70719db377b3f7d..0ddec3d4b059cbd7d03221578035d5b1721db9b2 100644 (file)
@@ -71,6 +71,95 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
 
 static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
 
+/* For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+static const u16 mcs_rate[4][16] = {
+       /* LGI 40M */
+       { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+         0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+       /* SGI 40M */
+       { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+         0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+       /* LGI 20M */
+       { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+         0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+       /* SGI 20M */
+       { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+         0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+};
+
+/* AC rates */
+static const u16 ac_mcs_rate_nss1[8][10] = {
+       /* LG 160M */
+       { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+         0x492, 0x57C, 0x618 },
+
+       /* SG 160M */
+       { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+         0x514, 0x618, 0x6C6 },
+
+       /* LG 80M */
+       { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
+         0x249, 0x2BE, 0x30C },
+
+       /* SG 80M */
+       { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
+         0x28A, 0x30C, 0x363 },
+
+       /* LG 40M */
+       { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
+         0x10E, 0x144, 0x168 },
+
+       /* SG 40M */
+       { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
+         0x12C, 0x168, 0x190 },
+
+       /* LG 20M */
+       { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
+
+       /* SG 20M */
+       { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
+};
+
+/* NSS2 note: the value in the table is 2 multiplier of the actual rate */
+static const u16 ac_mcs_rate_nss2[8][10] = {
+       /* LG 160M */
+       { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
+         0x924, 0xAF8, 0xC30 },
+
+       /* SG 160M */
+       { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
+         0xA28, 0xC30, 0xD8B },
+
+       /* LG 80M */
+       { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+         0x492, 0x57C, 0x618 },
+
+       /* SG 80M */
+       { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+         0x514, 0x618, 0x6C6 },
+
+       /* LG 40M */
+       { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
+         0x21C, 0x288, 0x2D0 },
+
+       /* SG 40M */
+       { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
+         0x258, 0x2D0, 0x320 },
+
+       /* LG 20M */
+       { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
+         0x138, 0x00 },
+
+       /* SG 20M */
+       { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
+         0x15B, 0x00 },
+};
+
 struct region_code_mapping {
        u8 code;
        u8 region[IEEE80211_COUNTRY_STRING_LEN];
@@ -109,95 +198,6 @@ u8 *mwifiex_11d_code_2_region(u8 code)
 u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
                                   u8 index, u8 ht_info)
 {
-       /*
-        * For every mcs_rate line, the first 8 bytes are for stream 1x1,
-        * and all 16 bytes are for stream 2x2.
-        */
-       u16  mcs_rate[4][16] = {
-               /* LGI 40M */
-               { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
-                 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
-               /* SGI 40M */
-               { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
-                 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
-               /* LGI 20M */
-               { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
-                 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
-               /* SGI 20M */
-               { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
-                 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
-       };
-       /* AC rates */
-       u16 ac_mcs_rate_nss1[8][10] = {
-               /* LG 160M */
-               { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
-                 0x492, 0x57C, 0x618 },
-
-               /* SG 160M */
-               { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
-                 0x514, 0x618, 0x6C6 },
-
-               /* LG 80M */
-               { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
-                 0x249, 0x2BE, 0x30C },
-
-               /* SG 80M */
-               { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
-                 0x28A, 0x30C, 0x363 },
-
-               /* LG 40M */
-               { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
-                 0x10E, 0x144, 0x168 },
-
-               /* SG 40M */
-               { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
-                 0x12C, 0x168, 0x190 },
-
-               /* LG 20M */
-               { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
-
-               /* SG 20M */
-               { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
-       };
-       /* NSS2 note: the value in the table is 2 multiplier of the actual
-        * rate
-        */
-       u16 ac_mcs_rate_nss2[8][10] = {
-               /* LG 160M */
-               { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
-                 0x924, 0xAF8, 0xC30 },
-
-               /* SG 160M */
-               { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
-                 0xA28, 0xC30, 0xD8B },
-
-               /* LG 80M */
-               { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
-                 0x492, 0x57C, 0x618 },
-
-               /* SG 80M */
-               { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
-                 0x514, 0x618, 0x6C6 },
-
-               /* LG 40M */
-               { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
-                 0x21C, 0x288, 0x2D0 },
-
-               /* SG 40M */
-               { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
-                 0x258, 0x2D0, 0x320 },
-
-               /* LG 20M */
-               { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
-                 0x138, 0x00 },
-
-               /* SG 20M */
-               { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
-                 0x15B, 0x00 },
-       };
        u32 rate = 0;
        u8 mcs_index = 0;
        u8 bw = 0;
@@ -252,28 +252,8 @@ u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
 u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
                               u8 index, u8 ht_info)
 {
-       /* For every mcs_rate line, the first 8 bytes are for stream 1x1,
-        * and all 16 bytes are for stream 2x2.
-        */
-       u16  mcs_rate[4][16] = {
-               /* LGI 40M */
-               { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
-                 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
-               /* SGI 40M */
-               { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
-                 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
-               /* LGI 20M */
-               { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
-                 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
-               /* SGI 20M */
-               { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
-                 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
-       };
        u32 mcs_num_supp =
-               (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
+               (priv->adapter->user_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
        u32 rate;
 
        if (priv->adapter->is_hw_11ac_capable)
@@ -458,7 +438,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
                        break;
                case BAND_G:
                case BAND_G | BAND_GN:
-               case BAND_G | BAND_GN | BAND_GAC:
                        dev_dbg(adapter->dev, "info: infra band=%d "
                                "supported_rates_g\n", adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_g,
@@ -469,10 +448,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
                case BAND_A | BAND_B:
                case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
                case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
-               case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN |
-                    BAND_AAC | BAND_GAC:
                case BAND_B | BAND_G | BAND_GN:
-               case BAND_B | BAND_G | BAND_GN | BAND_GAC:
                        dev_dbg(adapter->dev, "info: infra band=%d "
                                "supported_rates_bg\n", adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_bg,
@@ -496,7 +472,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
                                               sizeof(supported_rates_a));
                        break;
                case BAND_GN:
-               case BAND_GN | BAND_GAC:
                        dev_dbg(adapter->dev, "info: infra band=%d "
                                "supported_rates_n\n", adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_n,
index 1ddc8b2e3722d5d9a9e72fad2422296b1754829f..1062c918a7bffb19cf93c1aba0daa4490856ba65 100644 (file)
 static void
 mwifiex_init_cmd_node(struct mwifiex_private *priv,
                      struct cmd_ctrl_node *cmd_node,
-                     u32 cmd_oid, void *data_buf)
+                     u32 cmd_oid, void *data_buf, bool sync)
 {
        cmd_node->priv = priv;
        cmd_node->cmd_oid = cmd_oid;
-       if (priv->adapter->cmd_wait_q_required) {
-               cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required;
-               priv->adapter->cmd_wait_q_required = false;
+       if (sync) {
+               cmd_node->wait_q_enabled = true;
                cmd_node->cmd_wait_q_woken = false;
                cmd_node->condition = &cmd_node->cmd_wait_q_woken;
        }
@@ -166,8 +165,10 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
                dev_err(adapter->dev,
                        "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
                        cmd_code);
-               mwifiex_complete_cmd(adapter, cmd_node);
+               if (cmd_node->wait_q_enabled)
+                       mwifiex_complete_cmd(adapter, cmd_node);
                mwifiex_recycle_cmd_node(adapter, cmd_node);
+               queue_work(adapter->workqueue, &adapter->main_work);
                return -1;
        }
 
@@ -276,11 +277,11 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
 
        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 
+       adapter->seq_num++;
        sleep_cfm_buf->seq_num =
                cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
                                        (adapter->seq_num, priv->bss_num,
                                         priv->bss_type)));
-       adapter->seq_num++;
 
        if (adapter->iface_type == MWIFIEX_USB) {
                sleep_cfm_tmp =
@@ -480,28 +481,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
 }
 
 /*
- * This function is used to send synchronous command to the firmware.
- *
- * it allocates a wait queue for the command and wait for the command
- * response.
- */
-int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
-                         u16 cmd_action, u32 cmd_oid, void *data_buf)
-{
-       int ret = 0;
-       struct mwifiex_adapter *adapter = priv->adapter;
-
-       adapter->cmd_wait_q_required = true;
-
-       ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
-                                    data_buf);
-
-       return ret;
-}
-
-
-/*
- * This function prepares a command and asynchronously send it to the firmware.
+ * This function prepares a command and send it to the firmware.
  *
  * Preparation includes -
  *      - Sanity tests to make sure the card is still present or the FW
@@ -511,8 +491,8 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
  *      - Fill up the non-default parameters and buffer pointers
  *      - Add the command to pending queue
  */
-int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
-                          u16 cmd_action, u32 cmd_oid, void *data_buf)
+int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
+                    u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync)
 {
        int ret;
        struct mwifiex_adapter *adapter = priv->adapter;
@@ -529,11 +509,21 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
                return -1;
        }
 
+       if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+               dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
+               return -1;
+       }
+
        if (adapter->surprise_removed) {
                dev_err(adapter->dev, "PREP_CMD: card is removed\n");
                return -1;
        }
 
+       if (adapter->is_cmd_timedout) {
+               dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
+               return -1;
+       }
+
        if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
                if (cmd_no != HostCmd_CMD_FUNC_INIT) {
                        dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
@@ -550,7 +540,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
        }
 
        /* Initialize the command node */
-       mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf);
+       mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
 
        if (!cmd_node->cmd_skb) {
                dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
@@ -595,7 +585,8 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
        }
 
        /* Send command */
-       if (cmd_no == HostCmd_CMD_802_11_SCAN) {
+       if (cmd_no == HostCmd_CMD_802_11_SCAN ||
+           cmd_no == HostCmd_CMD_802_11_SCAN_EXT) {
                mwifiex_queue_scan_cmd(priv, cmd_node);
        } else {
                mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
@@ -785,7 +776,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
        unsigned long flags;
 
        /* Now we got response from FW, cancel the command timer */
-       del_timer(&adapter->cmd_timer);
+       del_timer_sync(&adapter->cmd_timer);
 
        if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
                resp = (struct host_cmd_ds_command *) adapter->upld_buf;
@@ -794,7 +785,7 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
                return -1;
        }
 
-       adapter->num_cmd_timeout = 0;
+       adapter->is_cmd_timedout = 0;
 
        resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
        if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
@@ -905,8 +896,7 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
        struct cmd_ctrl_node *cmd_node;
        struct timeval tstamp;
 
-       adapter->num_cmd_timeout++;
-       adapter->dbg.num_cmd_timeout++;
+       adapter->is_cmd_timedout = 1;
        if (!adapter->curr_cmd) {
                dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
                return;
@@ -929,8 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
                dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
                        adapter->dbg.num_cmd_host_to_card_failure);
 
-               dev_err(adapter->dev, "num_cmd_timeout = %d\n",
-                       adapter->dbg.num_cmd_timeout);
+               dev_err(adapter->dev, "is_cmd_timedout = %d\n",
+                       adapter->is_cmd_timedout);
                dev_err(adapter->dev, "num_tx_timeout = %d\n",
                        adapter->dbg.num_tx_timeout);
 
@@ -987,13 +977,14 @@ void
 mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
 {
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
-       unsigned long flags;
+       unsigned long flags, cmd_flags;
+       struct mwifiex_private *priv;
+       int i;
 
+       spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
        /* Cancel current cmd */
        if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
-               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
                adapter->curr_cmd->wait_q_enabled = false;
-               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
                adapter->cmd_wait_q.status = -1;
                mwifiex_complete_cmd(adapter, adapter->curr_cmd);
        }
@@ -1013,6 +1004,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
                spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
        }
        spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+       spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
 
        /* Cancel all pending scan command */
        spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
@@ -1027,9 +1019,21 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
        }
        spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
 
-       spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-       adapter->scan_processing = false;
-       spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+       if (adapter->scan_processing) {
+               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+               adapter->scan_processing = false;
+               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+               for (i = 0; i < adapter->priv_num; i++) {
+                       priv = adapter->priv[i];
+                       if (!priv)
+                               continue;
+                       if (priv->scan_request) {
+                               dev_dbg(adapter->dev, "info: aborting scan\n");
+                               cfg80211_scan_done(priv->scan_request, 1);
+                               priv->scan_request = NULL;
+                       }
+               }
+       }
 }
 
 /*
@@ -1048,7 +1052,8 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
        unsigned long cmd_flags;
        unsigned long scan_pending_q_flags;
-       bool cancel_scan_cmd = false;
+       struct mwifiex_private *priv;
+       int i;
 
        if ((adapter->curr_cmd) &&
            (adapter->curr_cmd->wait_q_enabled)) {
@@ -1074,15 +1079,24 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
                spin_lock_irqsave(&adapter->scan_pending_q_lock,
                                  scan_pending_q_flags);
-               cancel_scan_cmd = true;
        }
        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                               scan_pending_q_flags);
 
-       if (cancel_scan_cmd) {
+       if (adapter->scan_processing) {
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
                adapter->scan_processing = false;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+               for (i = 0; i < adapter->priv_num; i++) {
+                       priv = adapter->priv[i];
+                       if (!priv)
+                               continue;
+                       if (priv->scan_request) {
+                               dev_dbg(adapter->dev, "info: aborting scan\n");
+                               cfg80211_scan_done(priv->scan_request, 1);
+                               priv->scan_request = NULL;
+                       }
+               }
        }
        adapter->cmd_wait_q.status = -1;
 }
@@ -1454,7 +1468,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
 {
        struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
        struct mwifiex_adapter *adapter = priv->adapter;
-       int i;
+       struct mwifiex_ie_types_header *tlv;
+       struct hw_spec_fw_api_rev *api_rev;
+       u16 resp_size, api_id;
+       int i, left_len, parsed_len = 0;
 
        adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
 
@@ -1490,6 +1507,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
        }
 
        adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
+       adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff;
        adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
 
        if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) {
@@ -1498,8 +1516,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
                /* Copy 11AC cap */
                adapter->hw_dot_11ac_dev_cap =
                                        le32_to_cpu(hw_spec->dot_11ac_dev_cap);
-               adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap;
-               adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap;
+               adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap
+                                       & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
+               adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap
+                                       & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
 
                /* Copy 11AC mcs */
                adapter->hw_dot_11ac_mcs_support =
@@ -1510,6 +1530,46 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
                adapter->is_hw_11ac_capable = false;
        }
 
+       resp_size = le16_to_cpu(resp->size) - S_DS_GEN;
+       if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) {
+               /* we have variable HW SPEC information */
+               left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec);
+               while (left_len > sizeof(struct mwifiex_ie_types_header)) {
+                       tlv = (void *)&hw_spec->tlvs + parsed_len;
+                       switch (le16_to_cpu(tlv->type)) {
+                       case TLV_TYPE_FW_API_REV:
+                               api_rev = (struct hw_spec_fw_api_rev *)tlv;
+                               api_id = le16_to_cpu(api_rev->api_id);
+                               switch (api_id) {
+                               case KEY_API_VER_ID:
+                                       adapter->fw_key_api_major_ver =
+                                                       api_rev->major_ver;
+                                       adapter->fw_key_api_minor_ver =
+                                                       api_rev->minor_ver;
+                                       dev_dbg(adapter->dev,
+                                               "fw_key_api v%d.%d\n",
+                                               adapter->fw_key_api_major_ver,
+                                               adapter->fw_key_api_minor_ver);
+                                       break;
+                               default:
+                                       dev_warn(adapter->dev,
+                                                "Unknown FW api_id: %d\n",
+                                                api_id);
+                                       break;
+                               }
+                               break;
+                       default:
+                               dev_warn(adapter->dev,
+                                        "Unknown GET_HW_SPEC TLV type: %#x\n",
+                                        le16_to_cpu(tlv->type));
+                               break;
+                       }
+                       parsed_len += le16_to_cpu(tlv->len) +
+                                     sizeof(struct mwifiex_ie_types_header);
+                       left_len -= parsed_len;
+               }
+       }
+
        dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
                adapter->fw_release_number);
        dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
@@ -1538,6 +1598,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
 
        adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
        adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support;
+       adapter->user_dev_mcs_support = adapter->hw_dev_mcs_support;
 
        if (adapter->if_ops.update_mp_end_port)
                adapter->if_ops.update_mp_end_port(adapter,
index a5f9875cfd6e311f7e87b40890538ee312b37f4b..b8a49aad12fd662434ce2a29aaa8edecfb52ba0b 100644 (file)
@@ -85,8 +85,8 @@ static struct mwifiex_debug_data items[] = {
         item_addr(hs_activated), 1},
        {"num_tx_timeout", item_size(num_tx_timeout),
         item_addr(num_tx_timeout), 1},
-       {"num_cmd_timeout", item_size(num_cmd_timeout),
-        item_addr(num_cmd_timeout), 1},
+       {"is_cmd_timedout", item_size(is_cmd_timedout),
+        item_addr(is_cmd_timedout), 1},
        {"timeout_cmd_id", item_size(timeout_cmd_id),
         item_addr(timeout_cmd_id), 1},
        {"timeout_cmd_act", item_size(timeout_cmd_act),
@@ -493,7 +493,7 @@ mwifiex_regrdwr_write(struct file *file,
 {
        unsigned long addr = get_zeroed_page(GFP_KERNEL);
        char *buf = (char *) addr;
-       size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
+       size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
        int ret;
        u32 reg_type = 0, reg_offset = 0, reg_value = UINT_MAX;
 
@@ -594,7 +594,7 @@ mwifiex_rdeeprom_write(struct file *file,
 {
        unsigned long addr = get_zeroed_page(GFP_KERNEL);
        char *buf = (char *) addr;
-       size_t buf_size = min(count, (size_t) (PAGE_SIZE - 1));
+       size_t buf_size = min_t(size_t, count, PAGE_SIZE - 1);
        int ret = 0;
        int offset = -1, bytes = -1;
 
index 3a21bd03d6db89f387224428a3c334a30199634f..e7b3e16e5d34f1f8703ec2f6d4e21e9388c49edd 100644 (file)
 
 #define MWIFIEX_BUF_FLAG_REQUEUED_PKT      BIT(0)
 #define MWIFIEX_BUF_FLAG_BRIDGED_PKT      BIT(1)
+#define MWIFIEX_BUF_FLAG_TDLS_PKT         BIT(2)
 
 #define MWIFIEX_BRIDGED_PKTS_THR_HIGH      1024
 #define MWIFIEX_BRIDGED_PKTS_THR_LOW        128
 
+#define MWIFIEX_TDLS_DISABLE_LINK             0x00
+#define MWIFIEX_TDLS_ENABLE_LINK              0x01
+#define MWIFIEX_TDLS_CREATE_LINK              0x02
+#define MWIFIEX_TDLS_CONFIG_LINK              0x03
+
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
        MWIFIEX_BSS_TYPE_UAP = 1,
@@ -92,6 +98,23 @@ enum mwifiex_bss_role {
        MWIFIEX_BSS_ROLE_ANY = 0xff,
 };
 
+enum mwifiex_tdls_status {
+       TDLS_NOT_SETUP = 0,
+       TDLS_SETUP_INPROGRESS,
+       TDLS_SETUP_COMPLETE,
+       TDLS_SETUP_FAILURE,
+       TDLS_LINK_TEARDOWN,
+};
+
+enum mwifiex_tdls_error_code {
+       TDLS_ERR_NO_ERROR = 0,
+       TDLS_ERR_INTERNAL_ERROR,
+       TDLS_ERR_MAX_LINKS_EST,
+       TDLS_ERR_LINK_EXISTS,
+       TDLS_ERR_LINK_NONEXISTENT,
+       TDLS_ERR_PEER_STA_UNREACHABLE = 25,
+};
+
 #define BSS_ROLE_BIT_MASK    BIT(0)
 
 #define GET_BSS_ROLE(priv)   ((priv)->bss_role & BSS_ROLE_BIT_MASK)
index 5fa932d5f905531ba17c3e93375eecd0fbca6f70..b485dc1ae5ebc42c5606e37723189d20e7d98b16 100644 (file)
@@ -50,21 +50,23 @@ struct tx_packet_hdr {
 #define HOSTCMD_SUPPORTED_RATES         14
 #define N_SUPPORTED_RATES               3
 #define ALL_802_11_BANDS           (BAND_A | BAND_B | BAND_G | BAND_GN | \
-                                   BAND_AN | BAND_GAC | BAND_AAC)
+                                   BAND_AN | BAND_AAC)
 
 #define FW_MULTI_BANDS_SUPPORT  (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \
-                                BIT(12) | BIT(13))
+                                BIT(13))
 #define IS_SUPPORT_MULTI_BANDS(adapter)        \
        (adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
 
-/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14
- * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit
- * bit 14 for AAC, in order to be compatible with the band capability
- * defined in the driver after right shift of 8 bits.
+/* bit 13: 11ac BAND_AAC
+ * bit 12: reserved for lab testing, will be reused for BAND_AN
+ * bit 11: 11n  BAND_GN
+ * bit 10: 11a  BAND_A
+ * bit 9: 11g   BAND_G
+ * bit 8: 11b   BAND_B
+ * Map these bits to band capability by right shifting 8 bits.
  */
 #define GET_FW_DEFAULT_BANDS(adapter)  \
-           (((((adapter->fw_cap_info & 0x3000) << 1) | \
-              (adapter->fw_cap_info & ~0xF000)) >> 8) & \
+           (((adapter->fw_cap_info & 0x2f00) >> 8) & \
             ALL_802_11_BANDS)
 
 #define HostCmd_WEP_KEY_INDEX_MASK              0x3fff
@@ -77,12 +79,21 @@ enum KEY_TYPE_ID {
        KEY_TYPE_ID_WAPI,
        KEY_TYPE_ID_AES_CMAC,
 };
+
+#define WPA_PN_SIZE            8
+#define KEY_PARAMS_FIXED_LEN   10
+#define KEY_INDEX_MASK         0xf
+#define FW_KEY_API_VER_MAJOR_V2        2
+
 #define KEY_MCAST      BIT(0)
 #define KEY_UNICAST    BIT(1)
 #define KEY_ENABLED    BIT(2)
+#define KEY_DEFAULT    BIT(3)
+#define KEY_TX_KEY     BIT(4)
+#define KEY_RX_KEY     BIT(5)
 #define KEY_IGTK       BIT(10)
 
-#define WAPI_KEY_LEN                   50
+#define WAPI_KEY_LEN                   (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
 
 #define MAX_POLL_TRIES                 100
 #define MAX_FIRMWARE_POLL_TRIES                        100
@@ -130,6 +141,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_RSSI_HIGH          (PROPRIETARY_TLV_BASE_ID + 22)
 #define TLV_TYPE_AUTH_TYPE          (PROPRIETARY_TLV_BASE_ID + 31)
 #define TLV_TYPE_STA_MAC_ADDR       (PROPRIETARY_TLV_BASE_ID + 32)
+#define TLV_TYPE_BSSID              (PROPRIETARY_TLV_BASE_ID + 35)
 #define TLV_TYPE_CHANNELBANDLIST    (PROPRIETARY_TLV_BASE_ID + 42)
 #define TLV_TYPE_UAP_BEACON_PERIOD  (PROPRIETARY_TLV_BASE_ID + 44)
 #define TLV_TYPE_UAP_DTIM_PERIOD    (PROPRIETARY_TLV_BASE_ID + 45)
@@ -144,6 +156,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_RATE_DROP_CONTROL  (PROPRIETARY_TLV_BASE_ID + 82)
 #define TLV_TYPE_RATE_SCOPE         (PROPRIETARY_TLV_BASE_ID + 83)
 #define TLV_TYPE_POWER_GROUP        (PROPRIETARY_TLV_BASE_ID + 84)
+#define TLV_TYPE_BSS_SCAN_RSP       (PROPRIETARY_TLV_BASE_ID + 86)
+#define TLV_TYPE_BSS_SCAN_INFO      (PROPRIETARY_TLV_BASE_ID + 87)
 #define TLV_TYPE_UAP_RETRY_LIMIT    (PROPRIETARY_TLV_BASE_ID + 93)
 #define TLV_TYPE_WAPI_IE            (PROPRIETARY_TLV_BASE_ID + 94)
 #define TLV_TYPE_UAP_MGMT_FRAME     (PROPRIETARY_TLV_BASE_ID + 104)
@@ -154,6 +168,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
+#define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_FW_API_REV         (PROPRIETARY_TLV_BASE_ID + 199)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -176,13 +192,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define MWIFIEX_TX_DATA_BUF_SIZE_8K        8192
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
+#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
 
 #define MWIFIEX_DEF_HT_CAP     (IEEE80211_HT_CAP_DSSSCCK40 | \
                                 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
                                 IEEE80211_HT_CAP_SM_PS)
 
+#define MWIFIEX_DEF_11N_TX_BF_CAP      0x09E1E008
+
 #define MWIFIEX_DEF_AMPDU      IEEE80211_HT_AMPDU_PARM_FACTOR
 
+#define GET_RXSTBC(x) (x & IEEE80211_HT_CAP_RX_STBC)
+#define MWIFIEX_RX_STBC1       0x0100
+#define MWIFIEX_RX_STBC12      0x0200
+#define MWIFIEX_RX_STBC123     0x0300
+
 /* dev_cap bitmap
  * BIT
  * 0-16                reserved
@@ -204,6 +228,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
 #define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
 #define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
+#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
 
 /* httxcfg bitmap
  * 0           reserved
@@ -216,8 +241,21 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
  */
 #define MWIFIEX_FW_DEF_HTTXCFG (BIT(1) | BIT(4) | BIT(5) | BIT(6))
 
+/* 11AC Tx and Rx MCS map for 1x1 mode:
+ * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1
+ * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 7 streams
+ */
+#define MWIFIEX_11AC_MCS_MAP_1X1       0xfffefffe
+
+/* 11AC Tx and Rx MCS map for 2x2 mode:
+ * IEEE80211_VHT_MCS_SUPPORT_0_9 for stream 1 and 2
+ * IEEE80211_VHT_MCS_NOT_SUPPORTED for remaining 6 streams
+ */
+#define MWIFIEX_11AC_MCS_MAP_2X2       0xfffafffa
+
 #define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
 #define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_1X1  0x11
 #define HT_STREAM_2X2  0x22
 
 #define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@@ -226,17 +264,24 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 
 /* HW_SPEC fw_cap_info */
 
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & BIT(13))
 
 #define GET_VHTCAP_CHWDSET(vht_cap_info)    ((vht_cap_info >> 2) & 0x3)
 #define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
 #define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
                                              (2 * (nss - 1)))
-#define NO_NSS_SUPPORT         0x3
-
 #define GET_DEVTXMCSMAP(dev_mcs_map)      (dev_mcs_map >> 16)
 #define GET_DEVRXMCSMAP(dev_mcs_map)      (dev_mcs_map & 0xFFFF)
 
+/* Clear SU Beanformer, MU beanformer, MU beanformee and
+ * sounding dimensions bits
+ */
+#define MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK \
+                       (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | \
+                        IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE | \
+                        IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | \
+                        IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
+
 #define MOD_CLASS_HR_DSSS       0x03
 #define MOD_CLASS_OFDM          0x07
 #define MOD_CLASS_HT            0x08
@@ -295,10 +340,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_CAU_REG_ACCESS                    0x00ed
 #define HostCmd_CMD_SET_BSS_MODE                      0x00f7
 #define HostCmd_CMD_PCIE_DESC_DETAILS                 0x00fa
+#define HostCmd_CMD_802_11_SCAN_EXT                   0x0107
 #define HostCmd_CMD_COALESCE_CFG                      0x010a
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
+#define HostCmd_CMD_TDLS_OPER                         0x0122
 
 #define PROTOCOL_NO_SECURITY        0x01
 #define PROTOCOL_STATIC_WEP         0x02
@@ -440,6 +487,7 @@ enum P2P_MODES {
 #define EVENT_UAP_MIC_COUNTERMEASURES   0x0000004c
 #define EVENT_HOSTWAKE_STAIE           0x0000004d
 #define EVENT_CHANNEL_SWITCH_ANN        0x00000050
+#define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 
 #define EVENT_ID_MASK                   0xffff
@@ -468,6 +516,12 @@ enum P2P_MODES {
 #define MWIFIEX_CRITERIA_UNICAST       BIT(1)
 #define MWIFIEX_CRITERIA_MULTICAST     BIT(3)
 
+#define ACT_TDLS_DELETE            0x00
+#define ACT_TDLS_CREATE            0x01
+#define ACT_TDLS_CONFIG            0x02
+
+#define MWIFIEX_FW_V15            15
+
 struct mwifiex_ie_types_header {
        __le16 type;
        __le16 len;
@@ -480,6 +534,7 @@ struct mwifiex_ie_types_data {
 
 #define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
 #define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
+#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET      0x10
 
 struct txpd {
        u8 bss_type;
@@ -676,6 +731,56 @@ struct mwifiex_cmac_param {
        u8 key[WLAN_KEY_LEN_AES_CMAC];
 } __packed;
 
+struct mwifiex_wep_param {
+       __le16 key_len;
+       u8 key[WLAN_KEY_LEN_WEP104];
+} __packed;
+
+struct mwifiex_tkip_param {
+       u8 pn[WPA_PN_SIZE];
+       __le16 key_len;
+       u8 key[WLAN_KEY_LEN_TKIP];
+} __packed;
+
+struct mwifiex_aes_param {
+       u8 pn[WPA_PN_SIZE];
+       __le16 key_len;
+       u8 key[WLAN_KEY_LEN_CCMP];
+} __packed;
+
+struct mwifiex_wapi_param {
+       u8 pn[PN_LEN];
+       __le16 key_len;
+       u8 key[WLAN_KEY_LEN_SMS4];
+} __packed;
+
+struct mwifiex_cmac_aes_param {
+       u8 ipn[IGTK_PN_LEN];
+       __le16 key_len;
+       u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
+struct mwifiex_ie_type_key_param_set_v2 {
+       __le16 type;
+       __le16 len;
+       u8 mac_addr[ETH_ALEN];
+       u8 key_idx;
+       u8 key_type;
+       __le16 key_info;
+       union {
+               struct mwifiex_wep_param wep;
+               struct mwifiex_tkip_param tkip;
+               struct mwifiex_aes_param aes;
+               struct mwifiex_wapi_param wapi;
+               struct mwifiex_cmac_aes_param cmac_aes;
+       } key_params;
+} __packed;
+
+struct host_cmd_ds_802_11_key_material_v2 {
+       __le16 action;
+       struct mwifiex_ie_type_key_param_set_v2 key_param_set;
+} __packed;
+
 struct host_cmd_ds_802_11_key_material {
        __le16 action;
        struct mwifiex_ie_type_key_param_set key_param_set;
@@ -727,6 +832,17 @@ struct host_cmd_ds_802_11_ps_mode_enh {
        } params;
 } __packed;
 
+enum FW_API_VER_ID {
+       KEY_API_VER_ID = 1,
+};
+
+struct hw_spec_fw_api_rev {
+       struct mwifiex_ie_types_header header;
+       __le16 api_id;
+       u8 major_ver;
+       u8 minor_ver;
+} __packed;
+
 struct host_cmd_ds_get_hw_spec {
        __le16 hw_if_version;
        __le16 version;
@@ -748,6 +864,7 @@ struct host_cmd_ds_get_hw_spec {
        __le32 reserved_6;
        __le32 dot_11ac_dev_cap;
        __le32 dot_11ac_mcs_support;
+       u8 tlvs[0];
 } __packed;
 
 struct host_cmd_ds_802_11_rssi_info {
@@ -993,6 +1110,7 @@ struct mwifiex_rate_scope {
        __le16 hr_dsss_rate_bitmap;
        __le16 ofdm_rate_bitmap;
        __le16 ht_mcs_rate_bitmap[8];
+       __le16 vht_mcs_rate_bitmap[8];
 } __packed;
 
 struct mwifiex_rate_drop_pattern {
@@ -1047,14 +1165,28 @@ struct host_cmd_ds_rf_ant_siso {
        __le16 ant_mode;
 };
 
-struct mwifiex_bcn_param {
-       u8 bssid[ETH_ALEN];
-       u8 rssi;
+struct host_cmd_ds_tdls_oper {
+       __le16 tdls_action;
+       __le16 reason;
+       u8 peer_mac[ETH_ALEN];
+} __packed;
+
+struct mwifiex_fixed_bcn_param {
        __le64 timestamp;
        __le16 beacon_period;
        __le16 cap_info_bitmap;
 } __packed;
 
+struct mwifiex_event_scan_result {
+       __le16 event_id;
+       u8 bss_index;
+       u8 bss_type;
+       u8 more_event;
+       u8 reserved[3];
+       __le16 buf_size;
+       u8 num_of_set;
+} __packed;
+
 #define MWIFIEX_USER_SCAN_CHAN_MAX             50
 
 #define MWIFIEX_MAX_SSID_LIST_LENGTH         10
@@ -1124,6 +1256,28 @@ struct host_cmd_ds_802_11_scan_rsp {
        u8 bss_desc_and_tlv_buffer[1];
 } __packed;
 
+struct host_cmd_ds_802_11_scan_ext {
+       u32   reserved;
+       u8    tlv_buffer[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_rsp {
+       struct mwifiex_ie_types_header header;
+       u8 bssid[ETH_ALEN];
+       u8 frame_body[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_info {
+       struct mwifiex_ie_types_header header;
+       __le16 rssi;
+       __le16 anpi;
+       u8 cca_busy_fraction;
+       u8 radio_type;
+       u8 channel;
+       u8 reserved;
+       __le64 tsf;
+} __packed;
+
 struct host_cmd_ds_802_11_bg_scan_query {
        u8 flush;
 } __packed;
@@ -1296,6 +1450,11 @@ struct mwifiex_ie_types_vhtcap {
        struct ieee80211_vht_cap vht_cap;
 } __packed;
 
+struct mwifiex_ie_types_aid {
+       struct mwifiex_ie_types_header header;
+       __le16 aid;
+} __packed;
+
 struct mwifiex_ie_types_oper_mode_ntf {
        struct mwifiex_ie_types_header header;
        u8 oper_mode;
@@ -1331,6 +1490,11 @@ struct mwifiex_ie_types_extcap {
        u8 ext_capab[0];
 } __packed;
 
+struct mwifiex_ie_types_qos_info {
+       struct mwifiex_ie_types_header header;
+       u8 qos_info;
+} __packed;
+
 struct host_cmd_ds_mac_reg_access {
        __le16 action;
        __le16 offset;
@@ -1441,6 +1605,11 @@ struct host_cmd_tlv_rates {
        u8 rates[0];
 } __packed;
 
+struct mwifiex_ie_types_bssid_list {
+       struct mwifiex_ie_types_header header;
+       u8 bssid[ETH_ALEN];
+} __packed;
+
 struct host_cmd_tlv_bcast_ssid {
        struct mwifiex_ie_types_header header;
        u8 bcast_ctl;
@@ -1634,6 +1803,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
                struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
                struct host_cmd_ds_802_11_scan scan;
+               struct host_cmd_ds_802_11_scan_ext ext_scan;
                struct host_cmd_ds_802_11_scan_rsp scan_resp;
                struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
                struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
@@ -1653,6 +1823,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_11n_cfg htcfg;
                struct host_cmd_ds_wmm_get_status get_wmm_status;
                struct host_cmd_ds_802_11_key_material key_material;
+               struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
                struct host_cmd_ds_version_ext verext;
                struct host_cmd_ds_mgmt_frame_reg reg_mask;
                struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1671,6 +1842,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_sta_deauth sta_deauth;
                struct host_cmd_11ac_vht_cfg vht_cfg;
                struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+               struct host_cmd_ds_tdls_oper tdls_oper;
        } params;
 } __packed;
 
index 81ac001ee74187d325f7a7d166666f9ec3c497fa..3bf3d58bbc029b0a48a937a0c65d15a83552b295 100644 (file)
@@ -138,9 +138,9 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
        }
 
        if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
-               return mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
-                                             HostCmd_ACT_GEN_SET,
-                                             UAP_CUSTOM_IE_I, ie_list);
+               return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
+                                       HostCmd_ACT_GEN_SET,
+                                       UAP_CUSTOM_IE_I, ie_list, false);
 
        return 0;
 }
index 1d0a817f2bf05de1c5c7873fad2eacaa6cb1838f..4ecd0b208ac64f5be6ffa75b9f15a1be965250a2 100644 (file)
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
        priv->csa_expire_time = 0;
        priv->del_list_idx = 0;
        priv->hs2_enabled = false;
+       memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
 
        return mwifiex_add_bss_prio_tbl(priv);
 }
@@ -233,7 +234,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
 
        adapter->pm_wakeup_fw_try = false;
 
-       adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
        adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
 
        adapter->is_hs_configured = false;
@@ -281,6 +281,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        adapter->arp_filter_size = 0;
        adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
        adapter->empty_tx_q_cnt = 0;
+       adapter->ext_scan = true;
+       adapter->fw_key_api_major_ver = 0;
+       adapter->fw_key_api_minor_ver = 0;
 }
 
 /*
@@ -450,6 +453,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
                INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
                INIT_LIST_HEAD(&priv->sta_list);
+               skb_queue_head_init(&priv->tdls_txq);
 
                spin_lock_init(&priv->tx_ba_stream_tbl_lock);
                spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -615,7 +619,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
        /* cancel current command */
        if (adapter->curr_cmd) {
                dev_warn(adapter->dev, "curr_cmd is still in processing\n");
-               del_timer(&adapter->cmd_timer);
+               del_timer_sync(&adapter->cmd_timer);
                mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
                adapter->curr_cmd = NULL;
        }
index 00a95f4c6a6c1885eabab4652ff4cbe173d2b7ae..ee494db5406097c35a0f22b365e2ad2e1ac674f8 100644 (file)
@@ -60,8 +60,7 @@ enum {
        BAND_A = 4,
        BAND_GN = 8,
        BAND_AN = 16,
-       BAND_GAC = 32,
-       BAND_AAC = 64,
+       BAND_AAC = 32,
 };
 
 #define MWIFIEX_WPA_PASSHPHRASE_LEN 64
@@ -86,6 +85,10 @@ struct wep_key {
 #define BAND_CONFIG_A           0x01
 #define MWIFIEX_SUPPORTED_RATES                 14
 #define MWIFIEX_SUPPORTED_RATES_EXT             32
+#define MWIFIEX_TDLS_SUPPORTED_RATES           8
+#define MWIFIEX_TDLS_DEF_QOS_CAPAB             0xf
+#define MWIFIEX_PRIO_BK                                2
+#define MWIFIEX_PRIO_VI                                5
 
 struct mwifiex_uap_bss_param {
        u8 channel;
@@ -174,6 +177,7 @@ struct mwifiex_ds_rx_reorder_tbl {
 struct mwifiex_ds_tx_ba_stream_tbl {
        u16 tid;
        u8 ra[ETH_ALEN];
+       u8 amsdu;
 };
 
 #define DBG_CMD_NUM    5
@@ -206,7 +210,7 @@ struct mwifiex_debug_info {
        u32 num_cmd_assoc_success;
        u32 num_cmd_assoc_failure;
        u32 num_tx_timeout;
-       u32 num_cmd_timeout;
+       u8 is_cmd_timedout;
        u16 timeout_cmd_id;
        u16 timeout_cmd_act;
        u16 last_cmd_id[DBG_CMD_NUM];
@@ -233,7 +237,10 @@ struct mwifiex_ds_encrypt_key {
        u8 mac_addr[ETH_ALEN];
        u32 is_wapi_key;
        u8 pn[PN_LEN];          /* packet number */
+       u8 pn_len;
        u8 is_igtk_key;
+       u8 is_current_wep_key;
+       u8 is_rx_seq_valid;
 };
 
 struct mwifiex_power_cfg {
@@ -432,4 +439,16 @@ struct mwifiex_ds_coalesce_cfg {
        struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
 };
 
+struct mwifiex_ds_tdls_oper {
+       u16 tdls_action;
+       u8 peer_mac[ETH_ALEN];
+       u16 capability;
+       u8 qos_info;
+       u8 *ext_capab;
+       u8 ext_capab_len;
+       u8 *supp_rates;
+       u8 supp_rates_len;
+       u8 *ht_capab;
+};
+
 #endif /* !_MWIFIEX_IOCTL_H_ */
index 4e4686e6ac092b3ce23282c575eadc2b40071eff..89dc62a467f4d2ba8b7cc2fcf6b7d1d63b058f68 100644 (file)
@@ -515,8 +515,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
 
        if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
            !bss_desc->disable_11n && !bss_desc->disable_11ac &&
-           (priv->adapter->config_bands & BAND_GAC ||
-            priv->adapter->config_bands & BAND_AAC))
+           priv->adapter->config_bands & BAND_AAC)
                mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
 
        /* Append vendor specific IE TLV */
@@ -902,9 +901,9 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
        mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
        if ((adapter->adhoc_start_band & BAND_G) &&
            (priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
-               if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
-                                          HostCmd_ACT_GEN_SET, 0,
-                                          &priv->curr_pkt_filter)) {
+               if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+                                    HostCmd_ACT_GEN_SET, 0,
+                                    &priv->curr_pkt_filter, false)) {
                        dev_err(adapter->dev,
                                "ADHOC_S_CMD: G Protection config failed\n");
                        return -1;
@@ -983,7 +982,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
                       cpu_to_le16(sizeof(struct ieee80211_ht_cap));
                radio_type = mwifiex_band_to_radio_type(
                                        priv->adapter->config_bands);
-               mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+               mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
 
                if (adapter->sec_chan_offset ==
                                        IEEE80211_HT_PARAM_CHA_SEC_NONE) {
@@ -1074,9 +1073,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
                        priv->
                        curr_pkt_filter | HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON;
 
-               if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
-                                          HostCmd_ACT_GEN_SET, 0,
-                                          &curr_pkt_filter)) {
+               if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+                                    HostCmd_ACT_GEN_SET, 0,
+                                    &curr_pkt_filter, false)) {
                        dev_err(priv->adapter->dev,
                                "ADHOC_J_CMD: G Protection config failed\n");
                        return -1;
@@ -1300,8 +1299,7 @@ int mwifiex_associate(struct mwifiex_private *priv,
 
        if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
            !bss_desc->disable_11n && !bss_desc->disable_11ac &&
-           (priv->adapter->config_bands & BAND_GAC ||
-            priv->adapter->config_bands & BAND_AAC))
+           priv->adapter->config_bands & BAND_AAC)
                mwifiex_set_11ac_ba_params(priv);
        else
                mwifiex_set_ba_params(priv);
@@ -1314,8 +1312,8 @@ int mwifiex_associate(struct mwifiex_private *priv,
           retrieval */
        priv->assoc_rsp_size = 0;
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_ASSOCIATE,
-                                   HostCmd_ACT_GEN_SET, 0, bss_desc);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_ASSOCIATE,
+                               HostCmd_ACT_GEN_SET, 0, bss_desc, true);
 }
 
 /*
@@ -1335,14 +1333,13 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
                priv->curr_bss_params.band);
 
        if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
-           (priv->adapter->config_bands & BAND_GAC ||
-            priv->adapter->config_bands & BAND_AAC))
+           priv->adapter->config_bands & BAND_AAC)
                mwifiex_set_11ac_ba_params(priv);
        else
                mwifiex_set_ba_params(priv);
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_START,
-                                   HostCmd_ACT_GEN_SET, 0, adhoc_ssid);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_START,
+                               HostCmd_ACT_GEN_SET, 0, adhoc_ssid, true);
 }
 
 /*
@@ -1376,8 +1373,7 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
 
        if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
            !bss_desc->disable_11n && !bss_desc->disable_11ac &&
-           (priv->adapter->config_bands & BAND_GAC ||
-            priv->adapter->config_bands & BAND_AAC))
+           priv->adapter->config_bands & BAND_AAC)
                mwifiex_set_11ac_ba_params(priv);
        else
                mwifiex_set_ba_params(priv);
@@ -1387,8 +1383,8 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
        dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
                priv->curr_bss_params.band);
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
-                                   HostCmd_ACT_GEN_SET, 0, bss_desc);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
+                               HostCmd_ACT_GEN_SET, 0, bss_desc, true);
 }
 
 /*
@@ -1407,8 +1403,8 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
        else
                memcpy(mac_address, mac, ETH_ALEN);
 
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
-                                   HostCmd_ACT_GEN_SET, 0, mac_address);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
+                              HostCmd_ACT_GEN_SET, 0, mac_address, true);
 
        return ret;
 }
@@ -1436,19 +1432,31 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
                                              GFP_KERNEL);
                break;
        case NL80211_IFTYPE_ADHOC:
-               return mwifiex_send_cmd_sync(priv,
-                                            HostCmd_CMD_802_11_AD_HOC_STOP,
-                                            HostCmd_ACT_GEN_SET, 0, NULL);
+               return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
+                                       HostCmd_ACT_GEN_SET, 0, NULL, true);
        case NL80211_IFTYPE_AP:
-               return mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
-                                            HostCmd_ACT_GEN_SET, 0, NULL);
+               return mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
+                                       HostCmd_ACT_GEN_SET, 0, NULL, true);
        default:
                break;
        }
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
+
+/* This function deauthenticates/disconnects from all BSS. */
+void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter)
+{
+       struct mwifiex_private *priv;
+       int i;
+
+       for (i = 0; i < adapter->priv_num; i++) {
+               priv = adapter->priv[i];
+               if (priv)
+                       mwifiex_deauthenticate(priv, NULL);
+       }
+}
+EXPORT_SYMBOL_GPL(mwifiex_deauthenticate_all);
 
 /*
  * This function converts band to radio type used in channel TLV.
index 9d3d2758ec355381ebaceea681e3fd2ebae5d753..77db0886c6e2e9fa764f2a0add8876b477972915 100644 (file)
@@ -38,7 +38,8 @@ static void scan_delay_timer_fn(unsigned long data)
        if (adapter->surprise_removed)
                return;
 
-       if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+       if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT ||
+           !adapter->scan_processing) {
                /*
                 * Abort scan operation by cancelling all pending scan
                 * commands
@@ -194,7 +195,7 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
        if (adapter->if_ops.cleanup_if)
                adapter->if_ops.cleanup_if(adapter);
 
-       del_timer(&adapter->cmd_timer);
+       del_timer_sync(&adapter->cmd_timer);
 
        /* Free private structures */
        for (i = 0; i < adapter->priv_num; i++) {
@@ -678,8 +679,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
        memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN);
 
        /* Send request to firmware */
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
-                                   HostCmd_ACT_GEN_SET, 0, NULL);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS,
+                              HostCmd_ACT_GEN_SET, 0, NULL, true);
 
        if (!ret)
                memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
@@ -871,7 +872,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
        adapter->is_suspended = false;
        adapter->hs_activated = false;
        init_waitqueue_head(&adapter->hs_activate_wait_q);
-       adapter->cmd_wait_q_required = false;
        init_waitqueue_head(&adapter->cmd_wait_q.wait);
        adapter->cmd_wait_q.status = 0;
        adapter->scan_wait_q_woken = false;
index d8ad554ce39f566cbf95221317fca751482d7e5c..d53e1e8c9467a62663c4d28df86e623237cdc45f 100644 (file)
@@ -59,7 +59,7 @@ enum {
 
 #define MWIFIEX_UPLD_SIZE               (2312)
 
-#define MAX_EVENT_SIZE                  1024
+#define MAX_EVENT_SIZE                  2048
 
 #define ARP_FILTER_MAX_BUF_SIZE         68
 
@@ -116,7 +116,7 @@ enum {
 #define MWIFIEX_TYPE_DATA                              0
 #define MWIFIEX_TYPE_EVENT                             3
 
-#define MAX_BITMAP_RATES_SIZE                  10
+#define MAX_BITMAP_RATES_SIZE                  18
 
 #define MAX_CHANNEL_BAND_BG     14
 #define MAX_CHANNEL_BAND_A      165
@@ -145,7 +145,6 @@ struct mwifiex_dbg {
        u32 num_cmd_assoc_success;
        u32 num_cmd_assoc_failure;
        u32 num_tx_timeout;
-       u32 num_cmd_timeout;
        u16 timeout_cmd_id;
        u16 timeout_cmd_act;
        u16 last_cmd_id[DBG_CMD_NUM];
@@ -193,6 +192,8 @@ struct mwifiex_add_ba_param {
        u32 tx_win_size;
        u32 rx_win_size;
        u32 timeout;
+       u8 tx_amsdu;
+       u8 rx_amsdu;
 };
 
 struct mwifiex_tx_aggr {
@@ -210,6 +211,7 @@ struct mwifiex_ra_list_tbl {
        u16 ba_pkt_count;
        u8 ba_packet_thr;
        u16 total_pkt_count;
+       bool tdls_link;
 };
 
 struct mwifiex_tid_tbl {
@@ -262,6 +264,31 @@ struct ieee_types_generic {
        u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
 } __packed;
 
+struct ieee_types_bss_co_2040 {
+       struct ieee_types_header ieee_hdr;
+       u8 bss_2040co;
+} __packed;
+
+struct ieee_types_extcap {
+       struct ieee_types_header ieee_hdr;
+       u8 ext_capab[8];
+} __packed;
+
+struct ieee_types_vht_cap {
+       struct ieee_types_header ieee_hdr;
+       struct ieee80211_vht_cap vhtcap;
+} __packed;
+
+struct ieee_types_vht_oper {
+       struct ieee_types_header ieee_hdr;
+       struct ieee80211_vht_operation vhtoper;
+} __packed;
+
+struct ieee_types_aid {
+       struct ieee_types_header ieee_hdr;
+       u16 aid;
+} __packed;
+
 struct mwifiex_bssdescriptor {
        u8 mac_address[ETH_ALEN];
        struct cfg80211_ssid ssid;
@@ -443,6 +470,7 @@ struct mwifiex_private {
        u8 wpa_ie_len;
        u8 wpa_is_gtk_set;
        struct host_cmd_ds_802_11_key_material aes_key;
+       struct host_cmd_ds_802_11_key_material_v2 aes_key_v2;
        u8 wapi_ie[256];
        u8 wapi_ie_len;
        u8 *wps_ie;
@@ -461,6 +489,7 @@ struct mwifiex_private {
        struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
        struct mwifiex_add_ba_param add_ba_param;
        u16 rx_seq[MAX_NUM_TID];
+       u8 tos_to_tid_inv[MAX_NUM_TID];
        struct list_head rx_reorder_tbl_ptr;
        /* spin lock for rx_reorder_tbl_ptr queue */
        spinlock_t rx_reorder_tbl_lock;
@@ -518,6 +547,8 @@ struct mwifiex_private {
        unsigned long csa_expire_time;
        u8 del_list_idx;
        bool hs2_enabled;
+       struct station_parameters *sta_params;
+       struct sk_buff_head tdls_txq;
 };
 
 enum mwifiex_ba_status {
@@ -531,6 +562,7 @@ struct mwifiex_tx_ba_stream_tbl {
        int tid;
        u8 ra[ETH_ALEN];
        enum mwifiex_ba_status ba_status;
+       u8 amsdu;
 };
 
 struct mwifiex_rx_reorder_tbl;
@@ -545,10 +577,12 @@ struct mwifiex_rx_reorder_tbl {
        struct list_head list;
        int tid;
        u8 ta[ETH_ALEN];
+       int init_win;
        int start_win;
        int win_size;
        void **rx_reorder_ptr;
        struct reorder_tmr_cnxt timer_context;
+       u8 amsdu;
        u8 flags;
 };
 
@@ -583,17 +617,35 @@ struct mwifiex_bss_priv {
        u64 fw_tsf;
 };
 
-/* This is AP specific structure which stores information
- * about associated STA
+struct mwifiex_tdls_capab {
+       __le16 capab;
+       u8 rates[32];
+       u8 rates_len;
+       u8 qos_info;
+       u8 coex_2040;
+       u16 aid;
+       struct ieee80211_ht_cap ht_capb;
+       struct ieee80211_ht_operation ht_oper;
+       struct ieee_types_extcap extcap;
+       struct ieee_types_generic rsn_ie;
+       struct ieee80211_vht_cap vhtcap;
+       struct ieee80211_vht_operation vhtoper;
+};
+
+/* This is AP/TDLS specific structure which stores information
+ * about associated/peer STA
  */
 struct mwifiex_sta_node {
        struct list_head list;
        u8 mac_addr[ETH_ALEN];
        u8 is_wmm_enabled;
        u8 is_11n_enabled;
+       u8 is_11ac_enabled;
        u8 ampdu_sta[MAX_NUM_TID];
        u16 rx_seq[MAX_NUM_TID];
        u16 max_amsdu;
+       u8 tdls_status;
+       struct mwifiex_tdls_capab tdls_cap;
 };
 
 struct mwifiex_if_ops {
@@ -671,7 +723,7 @@ struct mwifiex_adapter {
        struct cmd_ctrl_node *curr_cmd;
        /* spin lock for command */
        spinlock_t mwifiex_cmd_lock;
-       u32 num_cmd_timeout;
+       u8 is_cmd_timedout;
        u16 last_init_cmd;
        struct timer_list cmd_timer;
        struct list_head cmd_free_q;
@@ -722,15 +774,16 @@ struct mwifiex_adapter {
        u16 hs_activate_wait_q_woken;
        wait_queue_head_t hs_activate_wait_q;
        bool is_suspended;
+       bool hs_enabling;
        u8 event_body[MAX_EVENT_SIZE];
        u32 hw_dot_11n_dev_cap;
        u8 hw_dev_mcs_support;
+       u8 user_dev_mcs_support;
        u8 adhoc_11n_enabled;
        u8 sec_chan_offset;
        struct mwifiex_dbg dbg;
        u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
        u32 arp_filter_size;
-       u16 cmd_wait_q_required;
        struct mwifiex_wait_queue cmd_wait_q;
        u8 scan_wait_q_woken;
        spinlock_t queue_lock;          /* lock for tx queues */
@@ -753,6 +806,9 @@ struct mwifiex_adapter {
        atomic_t is_tx_received;
        atomic_t pending_bridged_pkts;
        struct semaphore *card_sem;
+       bool ext_scan;
+       u8 fw_api_ver;
+       u8 fw_key_api_major_ver, fw_key_api_minor_ver;
 };
 
 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -788,11 +844,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter);
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
                         struct cmd_ctrl_node *cmd_node);
 
-int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
-                          u16 cmd_action, u32 cmd_oid, void *data_buf);
-
-int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
-                         u16 cmd_action, u32 cmd_oid, void *data_buf);
+int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
+                    u16 cmd_action, u32 cmd_oid, void *data_buf, bool sync);
 
 void mwifiex_cmd_timeout_func(unsigned long function_context);
 
@@ -880,6 +933,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
 void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason);
 u8 mwifiex_band_to_radio_type(u8 band);
 int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter);
 int mwifiex_adhoc_start(struct mwifiex_private *priv,
                        struct cfg80211_ssid *adhoc_ssid);
 int mwifiex_adhoc_join(struct mwifiex_private *priv,
@@ -938,6 +992,12 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
                       struct cfg80211_ap_settings *params);
 void mwifiex_set_ba_params(struct mwifiex_private *priv);
 void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+                               struct host_cmd_ds_command *cmd,
+                               void *data_buf);
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+                                        void *buf);
 
 /*
  * This function checks if the queuing is RA based or not.
@@ -1078,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
                       const u8 *key, int key_len, u8 key_index,
                       const u8 *mac_addr, int disable);
 
-int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
+int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
 
 int mwifiex_get_ver_ext(struct mwifiex_private *priv);
 
@@ -1159,6 +1219,32 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
 
 extern const struct ethtool_ops mwifiex_ethtool_ops;
 
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+                      int ies_len, struct mwifiex_sta_node *node);
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+                                u8 action_code, u8 dialog_token,
+                                u16 status_code, const u8 *extra_ies,
+                                size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+                                u8 *peer, u8 action_code, u8 dialog_token,
+                                u16 status_code, const u8 *extra_ies,
+                                size_t extra_ies_len);
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+                                      u8 *buf, int len);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+                                u32 pri_chan, u8 chan_bw);
+
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
 void mwifiex_debugfs_remove(void);
index 7fe7b53fb17a28d75cb7fa9a6fc315c9f0ddd937..a7e8b96b2d9024de8c34e5e04b317c66d2e22820 100644 (file)
@@ -39,20 +39,31 @@ static struct semaphore add_remove_card_sem;
 
 static int
 mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
-                      int size, int flags)
+                      size_t size, int flags)
 {
        struct pcie_service_card *card = adapter->card;
-       dma_addr_t buf_pa;
+       struct mwifiex_dma_mapping mapping;
 
-       buf_pa = pci_map_single(card->dev, skb->data, size, flags);
-       if (pci_dma_mapping_error(card->dev, buf_pa)) {
+       mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
+       if (pci_dma_mapping_error(card->dev, mapping.addr)) {
                dev_err(adapter->dev, "failed to map pci memory!\n");
                return -1;
        }
-       memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t));
+       mapping.len = size;
+       memcpy(skb->cb, &mapping, sizeof(mapping));
        return 0;
 }
 
+static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
+                                    struct sk_buff *skb, int flags)
+{
+       struct pcie_service_card *card = adapter->card;
+       struct mwifiex_dma_mapping mapping;
+
+       MWIFIEX_SKB_PACB(skb, &mapping);
+       pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+}
+
 /*
  * This function reads sleep cookie and checks if FW is ready
  */
@@ -109,6 +120,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
 
        /* Indicate device suspended */
        adapter->is_suspended = true;
+       adapter->hs_enabling = false;
 
        return 0;
 }
@@ -179,6 +191,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
                card->pcie.firmware = data->firmware;
                card->pcie.reg = data->reg;
                card->pcie.blksz_fw_dl = data->blksz_fw_dl;
+               card->pcie.tx_buf_size = data->tx_buf_size;
        }
 
        if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops,
@@ -199,7 +212,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        struct pcie_service_card *card;
        struct mwifiex_adapter *adapter;
        struct mwifiex_private *priv;
-       int i;
 
        card = pci_get_drvdata(pdev);
        if (!card)
@@ -218,11 +230,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
                        mwifiex_pcie_resume(&pdev->dev);
 #endif
 
-               for (i = 0; i < adapter->priv_num; i++)
-                       if ((GET_BSS_ROLE(adapter->priv[i]) ==
-                            MWIFIEX_BSS_ROLE_STA) &&
-                           adapter->priv[i]->media_connected)
-                               mwifiex_deauthenticate(adapter->priv[i], NULL);
+               mwifiex_deauthenticate_all(adapter);
 
                priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 
@@ -320,6 +328,30 @@ static void mwifiex_pcie_dev_wakeup_delay(struct mwifiex_adapter *adapter)
        return;
 }
 
+static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
+                                          u32 max_delay_loop_cnt)
+{
+       struct pcie_service_card *card = adapter->card;
+       u8 *buffer;
+       u32 sleep_cookie, count;
+
+       for (count = 0; count < max_delay_loop_cnt; count++) {
+               buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN;
+               sleep_cookie = *(u32 *)buffer;
+
+               if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
+                       dev_dbg(adapter->dev,
+                               "sleep cookie found at count %d\n", count);
+                       break;
+               }
+               usleep_range(20, 30);
+       }
+
+       if (count >= max_delay_loop_cnt)
+               dev_dbg(adapter->dev,
+                       "max count reached while accessing sleep cookie\n");
+}
+
 /* This function wakes up the card by reading fw_status register. */
 static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 {
@@ -456,7 +488,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
                                           PCI_DMA_FROMDEVICE))
                        return -1;
 
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
+               buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
 
                dev_dbg(adapter->dev,
                        "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -513,7 +545,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
                                           PCI_DMA_FROMDEVICE))
                        return -1;
 
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
+               buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
 
                dev_dbg(adapter->dev,
                        "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -549,8 +581,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
                        desc2 = card->txbd_ring[i];
                        if (card->tx_buf_list[i]) {
                                skb = card->tx_buf_list[i];
-                               pci_unmap_single(card->dev, desc2->paddr,
-                                                skb->len, PCI_DMA_TODEVICE);
+                               mwifiex_unmap_pci_memory(adapter, skb,
+                                                        PCI_DMA_TODEVICE);
                                dev_kfree_skb_any(skb);
                        }
                        memset(desc2, 0, sizeof(*desc2));
@@ -558,8 +590,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
                        desc = card->txbd_ring[i];
                        if (card->tx_buf_list[i]) {
                                skb = card->tx_buf_list[i];
-                               pci_unmap_single(card->dev, desc->paddr,
-                                                skb->len, PCI_DMA_TODEVICE);
+                               mwifiex_unmap_pci_memory(adapter, skb,
+                                                        PCI_DMA_TODEVICE);
                                dev_kfree_skb_any(skb);
                        }
                        memset(desc, 0, sizeof(*desc));
@@ -587,8 +619,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
                        desc2 = card->rxbd_ring[i];
                        if (card->rx_buf_list[i]) {
                                skb = card->rx_buf_list[i];
-                               pci_unmap_single(card->dev, desc2->paddr,
-                                                skb->len, PCI_DMA_FROMDEVICE);
+                               mwifiex_unmap_pci_memory(adapter, skb,
+                                                        PCI_DMA_FROMDEVICE);
                                dev_kfree_skb_any(skb);
                        }
                        memset(desc2, 0, sizeof(*desc2));
@@ -596,8 +628,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
                        desc = card->rxbd_ring[i];
                        if (card->rx_buf_list[i]) {
                                skb = card->rx_buf_list[i];
-                               pci_unmap_single(card->dev, desc->paddr,
-                                                skb->len, PCI_DMA_FROMDEVICE);
+                               mwifiex_unmap_pci_memory(adapter, skb,
+                                                        PCI_DMA_FROMDEVICE);
                                dev_kfree_skb_any(skb);
                        }
                        memset(desc, 0, sizeof(*desc));
@@ -622,8 +654,8 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
                desc = card->evtbd_ring[i];
                if (card->evt_buf_list[i]) {
                        skb = card->evt_buf_list[i];
-                       pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
-                                        PCI_DMA_FROMDEVICE);
+                       mwifiex_unmap_pci_memory(adapter, skb,
+                                                PCI_DMA_FROMDEVICE);
                        dev_kfree_skb_any(skb);
                }
                card->evt_buf_list[i] = NULL;
@@ -861,7 +893,6 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
 static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card;
-       dma_addr_t buf_pa;
 
        if (!adapter)
                return 0;
@@ -869,16 +900,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
        card = adapter->card;
 
        if (card && card->cmdrsp_buf) {
-               MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa);
-               pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
-                                PCI_DMA_FROMDEVICE);
+               mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
+                                        PCI_DMA_FROMDEVICE);
                dev_kfree_skb_any(card->cmdrsp_buf);
        }
 
        if (card && card->cmd_buf) {
-               MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa);
-               pci_unmap_single(card->dev, buf_pa, card->cmd_buf->len,
-                                PCI_DMA_TODEVICE);
+               mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+                                        PCI_DMA_TODEVICE);
        }
        return 0;
 }
@@ -956,7 +985,6 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
 static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
 {
        struct sk_buff *skb;
-       dma_addr_t buf_pa;
        u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
        struct mwifiex_pcie_buf_desc *desc;
        struct mwifiex_pfu_buf_desc *desc2;
@@ -986,13 +1014,13 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
                            reg->tx_start_ptr;
 
                skb = card->tx_buf_list[wrdoneidx];
+
                if (skb) {
                        dev_dbg(adapter->dev,
                                "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
                                skb, wrdoneidx);
-                       MWIFIEX_SKB_PACB(skb, &buf_pa);
-                       pci_unmap_single(card->dev, buf_pa, skb->len,
-                                        PCI_DMA_TODEVICE);
+                       mwifiex_unmap_pci_memory(adapter, skb,
+                                                PCI_DMA_TODEVICE);
 
                        unmap_count++;
 
@@ -1006,7 +1034,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
                card->tx_buf_list[wrdoneidx] = NULL;
 
                if (reg->pfu_enabled) {
-                       desc2 = (void *)card->txbd_ring[wrdoneidx];
+                       desc2 = card->txbd_ring[wrdoneidx];
                        memset(desc2, 0, sizeof(*desc2));
                } else {
                        desc = card->txbd_ring[wrdoneidx];
@@ -1082,16 +1110,16 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                tmp = (__le16 *)&payload[2];
                *tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
 
-               if (mwifiex_map_pci_memory(adapter, skb, skb->len ,
+               if (mwifiex_map_pci_memory(adapter, skb, skb->len,
                                           PCI_DMA_TODEVICE))
                        return -1;
 
                wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
+               buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
                card->tx_buf_list[wrindx] = skb;
 
                if (reg->pfu_enabled) {
-                       desc2 = (void *)card->txbd_ring[wrindx];
+                       desc2 = card->txbd_ring[wrindx];
                        desc2->paddr = buf_pa;
                        desc2->len = (u16)skb->len;
                        desc2->frag_len = (u16)skb->len;
@@ -1162,8 +1190,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
 
        return -EINPROGRESS;
 done_unmap:
-       MWIFIEX_SKB_PACB(skb, &buf_pa);
-       pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
+       mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
        card->tx_buf_list[wrindx] = NULL;
        if (reg->pfu_enabled)
                memset(desc2, 0, sizeof(*desc2));
@@ -1217,9 +1244,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                if (!skb_data)
                        return -ENOMEM;
 
-               MWIFIEX_SKB_PACB(skb_data, &buf_pa);
-               pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
-                                PCI_DMA_FROMDEVICE);
+               mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
                card->rx_buf_list[rd_index] = NULL;
 
                /* Get data length from interface header -
@@ -1246,7 +1271,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                                           PCI_DMA_FROMDEVICE))
                        return -1;
 
-               MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
+               buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
 
                dev_dbg(adapter->dev,
                        "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
@@ -1254,7 +1279,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                card->rx_buf_list[rd_index] = skb_tmp;
 
                if (reg->pfu_enabled) {
-                       desc2 = (void *)card->rxbd_ring[rd_index];
+                       desc2 = card->rxbd_ring[rd_index];
                        desc2->paddr = buf_pa;
                        desc2->len = skb_tmp->len;
                        desc2->frag_len = skb_tmp->len;
@@ -1322,7 +1347,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
                return -1;
 
-       MWIFIEX_SKB_PACB(skb, &buf_pa);
+       buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
 
        /* Write the lower 32bits of the physical address to low command
         * address scratch register
@@ -1331,8 +1356,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                dev_err(adapter->dev,
                        "%s: failed to write download command to boot code.\n",
                        __func__);
-               pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
-                                PCI_DMA_TODEVICE);
+               mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
 
@@ -1344,8 +1368,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                dev_err(adapter->dev,
                        "%s: failed to write download command to boot code.\n",
                        __func__);
-               pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
-                                PCI_DMA_TODEVICE);
+               mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
 
@@ -1354,8 +1377,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                dev_err(adapter->dev,
                        "%s: failed to write command len to cmd_size scratch reg\n",
                        __func__);
-               pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
-                                PCI_DMA_TODEVICE);
+               mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
 
@@ -1364,8 +1386,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                              CPU_INTR_DOOR_BELL)) {
                dev_err(adapter->dev,
                        "%s: failed to assert door-bell intr\n", __func__);
-               pci_unmap_single(card->dev, buf_pa,
-                                MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE);
+               mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
 
@@ -1439,7 +1460,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        */
 
        if (card->cmdrsp_buf) {
-               MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
+               cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf);
                /* Write the lower 32bits of the cmdrsp buffer physical
                   address */
                if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
@@ -1460,7 +1481,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                }
        }
 
-       MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
+       cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf);
        /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
        if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
                              (u32)cmd_buf_pa)) {
@@ -1514,13 +1535,17 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
        int count = 0;
        u16 rx_len;
        __le16 pkt_len;
-       dma_addr_t buf_pa;
 
        dev_dbg(adapter->dev, "info: Rx CMD Response\n");
 
-       MWIFIEX_SKB_PACB(skb, &buf_pa);
-       pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
-                        PCI_DMA_FROMDEVICE);
+       mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+
+       /* Unmap the command as a response has been received. */
+       if (card->cmd_buf) {
+               mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+                                        PCI_DMA_TODEVICE);
+               card->cmd_buf = NULL;
+       }
 
        pkt_len = *((__le16 *)skb->data);
        rx_len = le16_to_cpu(pkt_len);
@@ -1539,6 +1564,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                                         "Write register failed\n");
                                return -1;
                        }
+                       mwifiex_delay_for_sleep_cookie(adapter,
+                                                      MWIFIEX_MAX_DELAY_COUNT);
                        while (reg->sleep_cookie && (count++ < 10) &&
                               mwifiex_pcie_ok_to_access_hw(adapter))
                                usleep_range(50, 60);
@@ -1552,8 +1579,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
                                           PCI_DMA_FROMDEVICE))
                        return -1;
-
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
        } else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
                adapter->curr_cmd->resp_skb = skb;
                adapter->cmd_resp_received = true;
@@ -1588,8 +1613,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
                                        struct sk_buff *skb)
 {
        struct pcie_service_card *card = adapter->card;
-       dma_addr_t buf_pa;
-       struct sk_buff *skb_tmp;
 
        if (skb) {
                card->cmdrsp_buf = skb;
@@ -1599,14 +1622,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
                        return -1;
        }
 
-       skb_tmp = card->cmd_buf;
-       if (skb_tmp) {
-               MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
-               pci_unmap_single(card->dev, buf_pa, skb_tmp->len,
-                                PCI_DMA_FROMDEVICE);
-               card->cmd_buf = NULL;
-       }
-
        return 0;
 }
 
@@ -1619,7 +1634,6 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
        u32 wrptr, event;
-       dma_addr_t buf_pa;
        struct mwifiex_evt_buf_desc *desc;
 
        if (!mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1655,9 +1669,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
 
                dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
                skb_cmd = card->evt_buf_list[rdptr];
-               MWIFIEX_SKB_PACB(skb_cmd, &buf_pa);
-               pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE,
-                                PCI_DMA_FROMDEVICE);
+               mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
 
                /* Take the pointer and set it to event pointer in adapter
                   and will return back after event handling callback */
@@ -1703,7 +1715,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
        int ret = 0;
        u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
        u32 wrptr;
-       dma_addr_t buf_pa;
        struct mwifiex_evt_buf_desc *desc;
 
        if (!skb)
@@ -1728,11 +1739,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
                                           MAX_EVENT_SIZE,
                                           PCI_DMA_FROMDEVICE))
                        return -1;
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
                card->evt_buf_list[rdptr] = skb;
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
                desc = card->evtbd_ring[rdptr];
-               desc->paddr = buf_pa;
+               desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
                desc->len = (u16)skb->len;
                desc->flags = 0;
                skb = NULL;
@@ -1782,7 +1791,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        struct sk_buff *skb;
        u32 txlen, tx_blocks = 0, tries, len;
        u32 block_retry_cnt = 0;
-       dma_addr_t buf_pa;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
@@ -1880,8 +1888,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        goto done;
                }
 
-               MWIFIEX_SKB_PACB(skb, &buf_pa);
-
                /* Wait for the command done interrupt */
                do {
                        if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
@@ -1889,16 +1895,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                dev_err(adapter->dev, "%s: Failed to read "
                                        "interrupt status during fw dnld.\n",
                                        __func__);
-                               pci_unmap_single(card->dev, buf_pa, skb->len,
-                                                PCI_DMA_TODEVICE);
+                               mwifiex_unmap_pci_memory(adapter, skb,
+                                                        PCI_DMA_TODEVICE);
                                ret = -1;
                                goto done;
                        }
                } while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
                         CPU_INTR_DOOR_BELL);
 
-               pci_unmap_single(card->dev, buf_pa, skb->len,
-                                PCI_DMA_TODEVICE);
+               mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
 
                offset += txlen;
        } while (true);
@@ -2338,6 +2343,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        }
 
        adapter->dev = &pdev->dev;
+       adapter->tx_buf_size = card->pcie.tx_buf_size;
        strcpy(adapter->fw_name, card->pcie.firmware);
 
        return 0;
index d322ab8604ea806e8216ab122d1ae30df6cc618d..e8ec561f8a642495e410793539a0f2b808435cdf 100644 (file)
@@ -97,6 +97,8 @@
 #define MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD                256
 /* FW awake cookie after FW ready */
 #define FW_AWAKE_COOKIE                                                (0xAA55AA55)
+#define MWIFIEX_DEF_SLEEP_COOKIE                       0xBEEFBEEF
+#define MWIFIEX_MAX_DELAY_COUNT                                5
 
 struct mwifiex_pcie_card_reg {
        u16 cmd_addr_lo;
@@ -195,18 +197,21 @@ struct mwifiex_pcie_device {
        const char *firmware;
        const struct mwifiex_pcie_card_reg *reg;
        u16 blksz_fw_dl;
+       u16 tx_buf_size;
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
        .firmware       = PCIE8766_DEFAULT_FW_NAME,
        .reg            = &mwifiex_reg_8766,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
 };
 
 static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .firmware       = PCIE8897_DEFAULT_FW_NAME,
        .reg            = &mwifiex_reg_8897,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
 };
 
 struct mwifiex_evt_buf_desc {
index 668547c2de8464ed93fedc37066ef0fc3c50f320..7b3af3d29ded478ad658eed5a3836403d6dd7542 100644 (file)
@@ -591,11 +591,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                          *chan_tlv_out,
                          struct mwifiex_chan_scan_param_set *scan_chan_list)
 {
+       struct mwifiex_adapter *adapter = priv->adapter;
        int ret = 0;
        struct mwifiex_chan_scan_param_set *tmp_chan_list;
        struct mwifiex_chan_scan_param_set *start_chan;
-
-       u32 tlv_idx, rates_size;
+       struct cmd_ctrl_node *cmd_node, *tmp_node;
+       unsigned long flags;
+       u32 tlv_idx, rates_size, cmd_no;
        u32 total_scan_time;
        u32 done_early;
        u8 radio_type;
@@ -733,9 +735,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
 
                /* Send the scan command to the firmware with the specified
                   cfg */
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
-                                            HostCmd_ACT_GEN_SET, 0,
-                                            scan_cfg_out);
+               if (priv->adapter->ext_scan)
+                       cmd_no = HostCmd_CMD_802_11_SCAN_EXT;
+               else
+                       cmd_no = HostCmd_CMD_802_11_SCAN;
+
+               ret = mwifiex_send_cmd(priv, cmd_no, HostCmd_ACT_GEN_SET,
+                                      0, scan_cfg_out, false);
 
                /* rate IE is updated per scan command but same starting
                 * pointer is used each time so that rate IE from earlier
@@ -744,8 +750,19 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                scan_cfg_out->tlv_buf_len -=
                            sizeof(struct mwifiex_ie_types_header) + rates_size;
 
-               if (ret)
+               if (ret) {
+                       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+                       list_for_each_entry_safe(cmd_node, tmp_node,
+                                                &adapter->scan_pending_q,
+                                                list) {
+                               list_del(&cmd_node->list);
+                               cmd_node->wait_q_enabled = false;
+                               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+                       }
+                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+                                              flags);
                        break;
+               }
        }
 
        if (ret)
@@ -786,6 +803,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        struct mwifiex_adapter *adapter = priv->adapter;
        struct mwifiex_ie_types_num_probes *num_probes_tlv;
        struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+       struct mwifiex_ie_types_bssid_list *bssid_tlv;
        u8 *tlv_pos;
        u32 num_probes;
        u32 ssid_len;
@@ -848,6 +866,17 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                       user_scan_in->specific_bssid,
                       sizeof(scan_cfg_out->specific_bssid));
 
+               if (adapter->ext_scan &&
+                   !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
+                       bssid_tlv =
+                               (struct mwifiex_ie_types_bssid_list *)tlv_pos;
+                       bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
+                       bssid_tlv->header.len = cpu_to_le16(ETH_ALEN);
+                       memcpy(bssid_tlv->bssid, user_scan_in->specific_bssid,
+                              ETH_ALEN);
+                       tlv_pos += sizeof(struct mwifiex_ie_types_bssid_list);
+               }
+
                for (i = 0; i < user_scan_in->num_ssids; i++) {
                        ssid_len = user_scan_in->ssid_list[i].ssid_len;
 
@@ -941,7 +970,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                                cpu_to_le16(sizeof(struct ieee80211_ht_cap));
                radio_type =
                        mwifiex_band_to_radio_type(priv->adapter->config_bands);
-               mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+               mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
                tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
        }
 
@@ -1576,6 +1605,228 @@ done:
        return 0;
 }
 
+static int
+mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
+                                 u32 *bytes_left, u64 fw_tsf, u8 *radio_type,
+                                 bool ext_scan, s32 rssi_val)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct mwifiex_chan_freq_power *cfp;
+       struct cfg80211_bss *bss;
+       u8 bssid[ETH_ALEN];
+       s32 rssi;
+       const u8 *ie_buf;
+       size_t ie_len;
+       u16 channel = 0;
+       u16 beacon_size = 0;
+       u32 curr_bcn_bytes;
+       u32 freq;
+       u16 beacon_period;
+       u16 cap_info_bitmap;
+       u8 *current_ptr;
+       u64 timestamp;
+       struct mwifiex_fixed_bcn_param *bcn_param;
+       struct mwifiex_bss_priv *bss_priv;
+
+       if (*bytes_left >= sizeof(beacon_size)) {
+               /* Extract & convert beacon size from command buffer */
+               memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
+               *bytes_left -= sizeof(beacon_size);
+               *bss_info += sizeof(beacon_size);
+       }
+
+       if (!beacon_size || beacon_size > *bytes_left) {
+               *bss_info += *bytes_left;
+               *bytes_left = 0;
+               return -EFAULT;
+       }
+
+       /* Initialize the current working beacon pointer for this BSS
+        * iteration
+        */
+       current_ptr = *bss_info;
+
+       /* Advance the return beacon pointer past the current beacon */
+       *bss_info += beacon_size;
+       *bytes_left -= beacon_size;
+
+       curr_bcn_bytes = beacon_size;
+
+       /* First 5 fields are bssid, RSSI(for legacy scan only),
+        * time stamp, beacon interval, and capability information
+        */
+       if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
+           sizeof(struct mwifiex_fixed_bcn_param)) {
+               dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+               return -EFAULT;
+       }
+
+       memcpy(bssid, current_ptr, ETH_ALEN);
+       current_ptr += ETH_ALEN;
+       curr_bcn_bytes -= ETH_ALEN;
+
+       if (!ext_scan) {
+               rssi = (s32) *current_ptr;
+               rssi = (-rssi) * 100;           /* Convert dBm to mBm */
+               current_ptr += sizeof(u8);
+               curr_bcn_bytes -= sizeof(u8);
+               dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+       } else {
+               rssi = rssi_val;
+       }
+
+       bcn_param = (struct mwifiex_fixed_bcn_param *)current_ptr;
+       current_ptr += sizeof(*bcn_param);
+       curr_bcn_bytes -= sizeof(*bcn_param);
+
+       timestamp = le64_to_cpu(bcn_param->timestamp);
+       beacon_period = le16_to_cpu(bcn_param->beacon_period);
+
+       cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
+       dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
+               cap_info_bitmap);
+
+       /* Rest of the current buffer are IE's */
+       ie_buf = current_ptr;
+       ie_len = curr_bcn_bytes;
+       dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
+               curr_bcn_bytes);
+
+       while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
+               u8 element_id, element_len;
+
+               element_id = *current_ptr;
+               element_len = *(current_ptr + 1);
+               if (curr_bcn_bytes < element_len +
+                               sizeof(struct ieee_types_header)) {
+                       dev_err(adapter->dev,
+                               "%s: bytes left < IE length\n", __func__);
+                       return -EFAULT;
+               }
+               if (element_id == WLAN_EID_DS_PARAMS) {
+                       channel = *(current_ptr +
+                                   sizeof(struct ieee_types_header));
+                       break;
+               }
+
+               current_ptr += element_len + sizeof(struct ieee_types_header);
+               curr_bcn_bytes -= element_len +
+                                       sizeof(struct ieee_types_header);
+       }
+
+       if (channel) {
+               struct ieee80211_channel *chan;
+               u8 band;
+
+               /* Skip entry if on csa closed channel */
+               if (channel == priv->csa_chan) {
+                       dev_dbg(adapter->dev,
+                               "Dropping entry on csa closed channel\n");
+                       return 0;
+               }
+
+               band = BAND_G;
+               if (radio_type)
+                       band = mwifiex_radio_type_to_band(*radio_type &
+                                                         (BIT(0) | BIT(1)));
+
+               cfp = mwifiex_get_cfp(priv, band, channel, 0);
+
+               freq = cfp ? cfp->freq : 0;
+
+               chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
+
+               if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
+                       bss = cfg80211_inform_bss(priv->wdev->wiphy,
+                                           chan, bssid, timestamp,
+                                           cap_info_bitmap, beacon_period,
+                                           ie_buf, ie_len, rssi, GFP_KERNEL);
+                       bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+                       bss_priv->band = band;
+                       bss_priv->fw_tsf = fw_tsf;
+                       if (priv->media_connected &&
+                           !memcmp(bssid, priv->curr_bss_params.bss_descriptor
+                                   .mac_address, ETH_ALEN))
+                               mwifiex_update_curr_bss_params(priv, bss);
+                       cfg80211_put_bss(priv->wdev->wiphy, bss);
+               }
+       } else {
+               dev_dbg(adapter->dev, "missing BSS channel IE\n");
+       }
+
+       return 0;
+}
+
+static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       struct cmd_ctrl_node *cmd_node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+       if (list_empty(&adapter->scan_pending_q)) {
+               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+               adapter->scan_processing = false;
+               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+               /* Need to indicate IOCTL complete */
+               if (adapter->curr_cmd->wait_q_enabled) {
+                       adapter->cmd_wait_q.status = 0;
+                       if (!priv->scan_request) {
+                               dev_dbg(adapter->dev,
+                                       "complete internal scan\n");
+                               mwifiex_complete_cmd(adapter,
+                                                    adapter->curr_cmd);
+                       }
+               }
+               if (priv->report_scan_result)
+                       priv->report_scan_result = false;
+
+               if (priv->scan_request) {
+                       dev_dbg(adapter->dev, "info: notifying scan done\n");
+                       cfg80211_scan_done(priv->scan_request, 0);
+                       priv->scan_request = NULL;
+               } else {
+                       priv->scan_aborting = false;
+                       dev_dbg(adapter->dev, "info: scan already aborted\n");
+               }
+       } else {
+               if ((priv->scan_aborting && !priv->scan_request) ||
+                   priv->scan_block) {
+                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+                                              flags);
+                       adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
+                       mod_timer(&priv->scan_delay_timer, jiffies);
+                       dev_dbg(priv->adapter->dev,
+                               "info: %s: triggerring scan abort\n", __func__);
+               } else if (!mwifiex_wmm_lists_empty(adapter) &&
+                          (priv->scan_request && (priv->scan_request->flags &
+                                           NL80211_SCAN_FLAG_LOW_PRIORITY))) {
+                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+                                              flags);
+                       adapter->scan_delay_cnt = 1;
+                       mod_timer(&priv->scan_delay_timer, jiffies +
+                                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+                       dev_dbg(priv->adapter->dev,
+                               "info: %s: deferring scan\n", __func__);
+               } else {
+                       /* Get scan command from scan_pending_q and put to
+                        * cmd_pending_q
+                        */
+                       cmd_node = list_first_entry(&adapter->scan_pending_q,
+                                                   struct cmd_ctrl_node, list);
+                       list_del(&cmd_node->list);
+                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+                                              flags);
+                       mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+                                                       true);
+               }
+       }
+
+       return;
+}
+
 /*
  * This function handles the command response of scan.
  *
@@ -1600,7 +1851,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
 {
        int ret = 0;
        struct mwifiex_adapter *adapter = priv->adapter;
-       struct cmd_ctrl_node *cmd_node;
        struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
        struct mwifiex_ie_types_data *tlv_data;
        struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
@@ -1609,12 +1859,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
        u32 bytes_left;
        u32 idx;
        u32 tlv_buf_size;
-       struct mwifiex_chan_freq_power *cfp;
        struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
        struct chan_band_param_set *chan_band;
        u8 is_bgscan_resp;
-       unsigned long flags;
-       struct cfg80211_bss *bss;
+       __le64 fw_tsf = 0;
+       u8 *radio_type;
 
        is_bgscan_resp = (le16_to_cpu(resp->command)
                          == HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -1676,220 +1925,194 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                             &chan_band_tlv);
 
        for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
-               u8 bssid[ETH_ALEN];
-               s32 rssi;
-               const u8 *ie_buf;
-               size_t ie_len;
-               u16 channel = 0;
-               __le64 fw_tsf = 0;
-               u16 beacon_size = 0;
-               u32 curr_bcn_bytes;
-               u32 freq;
-               u16 beacon_period;
-               u16 cap_info_bitmap;
-               u8 *current_ptr;
-               u64 timestamp;
-               struct mwifiex_bcn_param *bcn_param;
-               struct mwifiex_bss_priv *bss_priv;
-
-               if (bytes_left >= sizeof(beacon_size)) {
-                       /* Extract & convert beacon size from command buffer */
-                       memcpy(&beacon_size, bss_info, sizeof(beacon_size));
-                       bytes_left -= sizeof(beacon_size);
-                       bss_info += sizeof(beacon_size);
-               }
+               /*
+                * If the TSF TLV was appended to the scan results, save this
+                * entry's TSF value in the fw_tsf field. It is the firmware's
+                * TSF value at the time the beacon or probe response was
+                * received.
+                */
+               if (tsf_tlv)
+                       memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+                              sizeof(fw_tsf));
 
-               if (!beacon_size || beacon_size > bytes_left) {
-                       bss_info += bytes_left;
-                       bytes_left = 0;
-                       ret = -1;
-                       goto check_next_scan;
+               if (chan_band_tlv) {
+                       chan_band = &chan_band_tlv->chan_band_param[idx];
+                       radio_type = &chan_band->radio_type;
+               } else {
+                       radio_type = NULL;
                }
 
-               /* Initialize the current working beacon pointer for this BSS
-                * iteration */
-               current_ptr = bss_info;
+               ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+                                                       &bytes_left,
+                                                       le64_to_cpu(fw_tsf),
+                                                       radio_type, false, 0);
+               if (ret)
+                       goto check_next_scan;
+       }
 
-               /* Advance the return beacon pointer past the current beacon */
-               bss_info += beacon_size;
-               bytes_left -= beacon_size;
+check_next_scan:
+       mwifiex_check_next_scan_command(priv);
+       return ret;
+}
 
-               curr_bcn_bytes = beacon_size;
+/*
+ * This function prepares an extended scan command to be sent to the firmware
+ *
+ * This uses the scan command configuration sent to the command processing
+ * module in command preparation stage to configure a extended scan command
+ * structure to send to firmware.
+ */
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+                               struct host_cmd_ds_command *cmd,
+                               void *data_buf)
+{
+       struct host_cmd_ds_802_11_scan_ext *ext_scan = &cmd->params.ext_scan;
+       struct mwifiex_scan_cmd_config *scan_cfg = data_buf;
 
-               /*
-                * First 5 fields are bssid, RSSI, time stamp, beacon interval,
-                *   and capability information
-                */
-               if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
-                       dev_err(adapter->dev,
-                               "InterpretIE: not enough bytes left\n");
-                       continue;
-               }
-               bcn_param = (struct mwifiex_bcn_param *)current_ptr;
-               current_ptr += sizeof(*bcn_param);
-               curr_bcn_bytes -= sizeof(*bcn_param);
+       memcpy(ext_scan->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
 
-               memcpy(bssid, bcn_param->bssid, ETH_ALEN);
+       cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN_EXT);
 
-               rssi = (s32) bcn_param->rssi;
-               rssi = (-rssi) * 100;           /* Convert dBm to mBm */
-               dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+       /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
+       cmd->size = cpu_to_le16((u16)(sizeof(ext_scan->reserved)
+                                     + scan_cfg->tlv_buf_len + S_DS_GEN));
 
-               timestamp = le64_to_cpu(bcn_param->timestamp);
-               beacon_period = le16_to_cpu(bcn_param->beacon_period);
+       return 0;
+}
 
-               cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
-               dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
-                       cap_info_bitmap);
+/* This function handles the command response of extended scan */
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
+{
+       dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+       return 0;
+}
 
-               /* Rest of the current buffer are IE's */
-               ie_buf = current_ptr;
-               ie_len = curr_bcn_bytes;
-               dev_dbg(adapter->dev,
-                       "info: InterpretIE: IELength for this AP = %d\n",
-                       curr_bcn_bytes);
+/* This function This function handles the event extended scan report. It
+ * parses extended scan results and informs to cfg80211 stack.
+ */
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+                                        void *buf)
+{
+       int ret = 0;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       u8 *bss_info;
+       u32 bytes_left, bytes_left_for_tlv, idx;
+       u16 type, len;
+       struct mwifiex_ie_types_data *tlv;
+       struct mwifiex_ie_types_bss_scan_rsp *scan_rsp_tlv;
+       struct mwifiex_ie_types_bss_scan_info *scan_info_tlv;
+       u8 *radio_type;
+       u64 fw_tsf = 0;
+       s32 rssi = 0;
+       struct mwifiex_event_scan_result *event_scan = buf;
+       u8 num_of_set = event_scan->num_of_set;
+       u8 *scan_resp = buf + sizeof(struct mwifiex_event_scan_result);
+       u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
+
+       if (num_of_set > MWIFIEX_MAX_AP) {
+               dev_err(adapter->dev,
+                       "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+                       num_of_set);
+               ret = -1;
+               goto check_next_scan;
+       }
 
-               while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
-                       u8 element_id, element_len;
+       bytes_left = scan_resp_size;
+       dev_dbg(adapter->dev,
+               "EXT_SCAN: size %d, returned %d APs...",
+               scan_resp_size, num_of_set);
 
-                       element_id = *current_ptr;
-                       element_len = *(current_ptr + 1);
-                       if (curr_bcn_bytes < element_len +
-                                       sizeof(struct ieee_types_header)) {
-                               dev_err(priv->adapter->dev,
-                                       "%s: bytes left < IE length\n",
-                                       __func__);
-                               goto check_next_scan;
-                       }
-                       if (element_id == WLAN_EID_DS_PARAMS) {
-                               channel = *(current_ptr + sizeof(struct ieee_types_header));
-                               break;
-                       }
+       tlv = (struct mwifiex_ie_types_data *)scan_resp;
 
-                       current_ptr += element_len +
-                                       sizeof(struct ieee_types_header);
-                       curr_bcn_bytes -= element_len +
-                                       sizeof(struct ieee_types_header);
+       for (idx = 0; idx < num_of_set && bytes_left; idx++) {
+               type = le16_to_cpu(tlv->header.type);
+               len = le16_to_cpu(tlv->header.len);
+               if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
+                       dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+                       break;
                }
+               scan_rsp_tlv = NULL;
+               scan_info_tlv = NULL;
+               bytes_left_for_tlv = bytes_left;
 
-               /*
-                * If the TSF TLV was appended to the scan results, save this
-                * entry's TSF value in the fw_tsf field. It is the firmware's
-                * TSF value at the time the beacon or probe response was
-                * received.
+               /* BSS response TLV with beacon or probe response buffer
+                * at the initial position of each descriptor
                 */
-               if (tsf_tlv)
-                       memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
-                              sizeof(fw_tsf));
-
-               if (channel) {
-                       struct ieee80211_channel *chan;
-                       u8 band;
+               if (type != TLV_TYPE_BSS_SCAN_RSP)
+                       break;
 
-                       /* Skip entry if on csa closed channel */
-                       if (channel == priv->csa_chan) {
-                               dev_dbg(adapter->dev,
-                                       "Dropping entry on csa closed channel\n");
+               bss_info = (u8 *)tlv;
+               scan_rsp_tlv = (struct mwifiex_ie_types_bss_scan_rsp *)tlv;
+               tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+               bytes_left_for_tlv -=
+                               (len + sizeof(struct mwifiex_ie_types_header));
+
+               while (bytes_left_for_tlv >=
+                      sizeof(struct mwifiex_ie_types_header) &&
+                      le16_to_cpu(tlv->header.type) != TLV_TYPE_BSS_SCAN_RSP) {
+                       type = le16_to_cpu(tlv->header.type);
+                       len = le16_to_cpu(tlv->header.len);
+                       if (bytes_left_for_tlv <
+                           sizeof(struct mwifiex_ie_types_header) + len) {
+                               dev_err(adapter->dev,
+                                       "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+                               scan_rsp_tlv = NULL;
+                               bytes_left_for_tlv = 0;
                                continue;
                        }
-
-                       band = BAND_G;
-                       if (chan_band_tlv) {
-                               chan_band =
-                                       &chan_band_tlv->chan_band_param[idx];
-                               band = mwifiex_radio_type_to_band(
-                                               chan_band->radio_type
-                                               & (BIT(0) | BIT(1)));
-                       }
-
-                       cfp = mwifiex_get_cfp(priv, band, channel, 0);
-
-                       freq = cfp ? cfp->freq : 0;
-
-                       chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
-
-                       if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
-                               bss = cfg80211_inform_bss(priv->wdev->wiphy,
-                                             chan, bssid, timestamp,
-                                             cap_info_bitmap, beacon_period,
-                                             ie_buf, ie_len, rssi, GFP_KERNEL);
-                               bss_priv = (struct mwifiex_bss_priv *)bss->priv;
-                               bss_priv->band = band;
-                               bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
-                               if (priv->media_connected &&
-                                   !memcmp(bssid,
-                                           priv->curr_bss_params.bss_descriptor
-                                           .mac_address, ETH_ALEN))
-                                       mwifiex_update_curr_bss_params(priv,
-                                                                      bss);
-                               cfg80211_put_bss(priv->wdev->wiphy, bss);
+                       switch (type) {
+                       case TLV_TYPE_BSS_SCAN_INFO:
+                               scan_info_tlv =
+                                 (struct mwifiex_ie_types_bss_scan_info *)tlv;
+                               if (len !=
+                                sizeof(struct mwifiex_ie_types_bss_scan_info) -
+                                sizeof(struct mwifiex_ie_types_header)) {
+                                       bytes_left_for_tlv = 0;
+                                       continue;
+                               }
+                               break;
+                       default:
+                               break;
                        }
-               } else {
-                       dev_dbg(adapter->dev, "missing BSS channel IE\n");
+                       tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+                       bytes_left -=
+                               (len + sizeof(struct mwifiex_ie_types_header));
+                       bytes_left_for_tlv -=
+                               (len + sizeof(struct mwifiex_ie_types_header));
                }
-       }
 
-check_next_scan:
-       spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
-       if (list_empty(&adapter->scan_pending_q)) {
-               spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-               adapter->scan_processing = false;
-               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+               if (!scan_rsp_tlv)
+                       break;
 
-               /* Need to indicate IOCTL complete */
-               if (adapter->curr_cmd->wait_q_enabled) {
-                       adapter->cmd_wait_q.status = 0;
-                       if (!priv->scan_request) {
-                               dev_dbg(adapter->dev,
-                                       "complete internal scan\n");
-                               mwifiex_complete_cmd(adapter,
-                                                    adapter->curr_cmd);
-                       }
-               }
-               if (priv->report_scan_result)
-                       priv->report_scan_result = false;
+               /* Advance pointer to the beacon buffer length and
+                * update the bytes count so that the function
+                * wlan_interpret_bss_desc_with_ie() can handle the
+                * scan buffer withut any change
+                */
+               bss_info += sizeof(u16);
+               bytes_left -= sizeof(u16);
 
-               if (priv->scan_request) {
-                       dev_dbg(adapter->dev, "info: notifying scan done\n");
-                       cfg80211_scan_done(priv->scan_request, 0);
-                       priv->scan_request = NULL;
-               } else {
-                       priv->scan_aborting = false;
-                       dev_dbg(adapter->dev, "info: scan already aborted\n");
-               }
-       } else {
-               if ((priv->scan_aborting && !priv->scan_request) ||
-                   priv->scan_block) {
-                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-                                              flags);
-                       adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
-                       mod_timer(&priv->scan_delay_timer, jiffies);
-                       dev_dbg(priv->adapter->dev,
-                               "info: %s: triggerring scan abort\n", __func__);
-               } else if (!mwifiex_wmm_lists_empty(adapter) &&
-                          (priv->scan_request && (priv->scan_request->flags &
-                                           NL80211_SCAN_FLAG_LOW_PRIORITY))) {
-                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-                                              flags);
-                       adapter->scan_delay_cnt = 1;
-                       mod_timer(&priv->scan_delay_timer, jiffies +
-                                 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
-                       dev_dbg(priv->adapter->dev,
-                               "info: %s: deferring scan\n", __func__);
+               if (scan_info_tlv) {
+                       rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
+                       rssi *= 100;           /* Convert dBm to mBm */
+                       dev_dbg(adapter->dev,
+                               "info: InterpretIE: RSSI=%d\n", rssi);
+                       fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
+                       radio_type = &scan_info_tlv->radio_type;
                } else {
-                       /* Get scan command from scan_pending_q and put to
-                          cmd_pending_q */
-                       cmd_node = list_first_entry(&adapter->scan_pending_q,
-                                                   struct cmd_ctrl_node, list);
-                       list_del(&cmd_node->list);
-                       spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
-                                              flags);
-                       mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
-                                                       true);
+                       radio_type = NULL;
                }
+               ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+                                                       &bytes_left, fw_tsf,
+                                                       radio_type, true, rssi);
+               if (ret)
+                       goto check_next_scan;
        }
 
+check_next_scan:
+       if (!event_scan->more_event)
+               mwifiex_check_next_scan_command(priv);
+
        return ret;
 }
 
index b44a31523461e2c5ba7d768c941fc0934abaacb6..d206f04d499498d6d9c7ba92a80685588ae7bc96 100644 (file)
@@ -84,6 +84,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->mp_agg_pkt_limit = data->mp_agg_pkt_limit;
                card->supports_sdio_new_mode = data->supports_sdio_new_mode;
                card->has_control_mask = data->has_control_mask;
+               card->tx_buf_size = data->tx_buf_size;
        }
 
        sdio_claim_host(func);
@@ -165,7 +166,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
        struct sdio_mmc_card *card;
        struct mwifiex_adapter *adapter;
        struct mwifiex_private *priv;
-       int i;
 
        pr_debug("info: SDIO func num=%d\n", func->num);
 
@@ -184,11 +184,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
 
-               for (i = 0; i < adapter->priv_num; i++)
-                       if ((GET_BSS_ROLE(adapter->priv[i]) ==
-                                               MWIFIEX_BSS_ROLE_STA) &&
-                           adapter->priv[i]->media_connected)
-                               mwifiex_deauthenticate(adapter->priv[i], NULL);
+               mwifiex_deauthenticate_all(adapter);
 
                priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
                mwifiex_disable_auto_ds(priv);
@@ -241,6 +237,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
        /* Enable the Host Sleep */
        if (!mwifiex_enable_hs(adapter)) {
                dev_err(adapter->dev, "cmd: failed to suspend\n");
+               adapter->hs_enabling = false;
                return -EFAULT;
        }
 
@@ -249,6 +246,7 @@ static int mwifiex_sdio_suspend(struct device *dev)
 
        /* Indicate device suspended */
        adapter->is_suspended = true;
+       adapter->hs_enabling = false;
 
        return ret;
 }
@@ -1760,6 +1758,7 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
 
        /* save adapter pointer in card */
        card->adapter = adapter;
+       adapter->tx_buf_size = card->tx_buf_size;
 
        sdio_claim_host(func);
 
index 532ae0ac4dfb3e40bb12592db32ec94e702a4c2b..c71201b2e2a333c20f926bacb4842395230c6926 100644 (file)
@@ -233,6 +233,7 @@ struct sdio_mmc_card {
        u8 mp_agg_pkt_limit;
        bool supports_sdio_new_mode;
        bool has_control_mask;
+       u16 tx_buf_size;
 
        u32 mp_rd_bitmap;
        u32 mp_wr_bitmap;
@@ -256,6 +257,7 @@ struct mwifiex_sdio_device {
        u8 mp_agg_pkt_limit;
        bool supports_sdio_new_mode;
        bool has_control_mask;
+       u16 tx_buf_size;
 };
 
 static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
@@ -312,6 +314,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
        .mp_agg_pkt_limit = 8,
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
@@ -321,6 +324,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
        .mp_agg_pkt_limit = 8,
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
@@ -330,6 +334,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
        .mp_agg_pkt_limit = 8,
        .supports_sdio_new_mode = false,
        .has_control_mask = true,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
 };
 
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
@@ -339,6 +344,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .mp_agg_pkt_limit = 16,
        .supports_sdio_new_mode = true,
        .has_control_mask = false,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
 };
 
 /*
index 9208a8816b800f619426eb41403d4177ad2d0ad3..e3cac1495cc705bcf3c05b479a16981578deff98 100644 (file)
@@ -185,6 +185,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
                     i++)
                        rate_scope->ht_mcs_rate_bitmap[i] =
                                cpu_to_le16(pbitmap_rates[2 + i]);
+               if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+                       for (i = 0;
+                            i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
+                            i++)
+                               rate_scope->vht_mcs_rate_bitmap[i] =
+                                       cpu_to_le16(pbitmap_rates[10 + i]);
+               }
        } else {
                rate_scope->hr_dsss_rate_bitmap =
                        cpu_to_le16(priv->bitmap_rates[0]);
@@ -195,6 +202,13 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv,
                     i++)
                        rate_scope->ht_mcs_rate_bitmap[i] =
                                cpu_to_le16(priv->bitmap_rates[2 + i]);
+               if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+                       for (i = 0;
+                            i < ARRAY_SIZE(rate_scope->vht_mcs_rate_bitmap);
+                            i++)
+                               rate_scope->vht_mcs_rate_bitmap[i] =
+                                       cpu_to_le16(priv->bitmap_rates[10 + i]);
+               }
        }
 
        rate_drop = (struct mwifiex_rate_drop_pattern *) ((u8 *) rate_scope +
@@ -532,8 +546,228 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
        return 0;
 }
 
+/* This function populates key material v2 command
+ * to set network key for AES & CMAC AES.
+ */
+static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
+                                 struct host_cmd_ds_command *cmd,
+                                 struct mwifiex_ds_encrypt_key *enc_key,
+                                 struct host_cmd_ds_802_11_key_material_v2 *km)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       u16 size, len = KEY_PARAMS_FIXED_LEN;
+
+       if (enc_key->is_igtk_key) {
+               dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+               if (enc_key->is_rx_seq_valid)
+                       memcpy(km->key_param_set.key_params.cmac_aes.ipn,
+                              enc_key->pn, enc_key->pn_len);
+               km->key_param_set.key_info &= cpu_to_le16(~KEY_MCAST);
+               km->key_param_set.key_info |= cpu_to_le16(KEY_IGTK);
+               km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC;
+               km->key_param_set.key_params.cmac_aes.key_len =
+                                         cpu_to_le16(enc_key->key_len);
+               memcpy(km->key_param_set.key_params.cmac_aes.key,
+                      enc_key->key_material, enc_key->key_len);
+               len += sizeof(struct mwifiex_cmac_aes_param);
+       } else {
+               dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+               if (enc_key->is_rx_seq_valid)
+                       memcpy(km->key_param_set.key_params.aes.pn,
+                              enc_key->pn, enc_key->pn_len);
+               km->key_param_set.key_type = KEY_TYPE_ID_AES;
+               km->key_param_set.key_params.aes.key_len =
+                                         cpu_to_le16(enc_key->key_len);
+               memcpy(km->key_param_set.key_params.aes.key,
+                      enc_key->key_material, enc_key->key_len);
+               len += sizeof(struct mwifiex_aes_param);
+       }
+
+       km->key_param_set.len = cpu_to_le16(len);
+       size = len + sizeof(struct mwifiex_ie_types_header) +
+              sizeof(km->action) + S_DS_GEN;
+       cmd->size = cpu_to_le16(size);
+
+       return 0;
+}
+
+/* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V2 format.
+ * Preparation includes -
+ *      - Setting command ID, action and proper size
+ *      - Setting WEP keys, WAPI keys or WPA keys along with required
+ *        encryption (TKIP, AES) (as required)
+ *      - Ensuring correct endian-ness
+ */
+static int
+mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
+                                  struct host_cmd_ds_command *cmd,
+                                  u16 cmd_action, u32 cmd_oid,
+                                  struct mwifiex_ds_encrypt_key *enc_key)
+{
+       struct mwifiex_adapter *adapter = priv->adapter;
+       u8 *mac = enc_key->mac_addr;
+       u16 key_info, len = KEY_PARAMS_FIXED_LEN;
+       struct host_cmd_ds_802_11_key_material_v2 *km =
+                                               &cmd->params.key_material_v2;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
+       km->action = cpu_to_le16(cmd_action);
+
+       if (cmd_action == HostCmd_ACT_GEN_GET) {
+               dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+               km->key_param_set.key_idx =
+                                       enc_key->key_index & KEY_INDEX_MASK;
+               km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+               km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+               memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+               if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+                       key_info = KEY_UNICAST;
+               else
+                       key_info = KEY_MCAST;
+
+               if (enc_key->is_igtk_key)
+                       key_info |= KEY_IGTK;
+
+               km->key_param_set.key_info = cpu_to_le16(key_info);
+
+               cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+                                       S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+                                       sizeof(km->action));
+               return 0;
+       }
+
+       memset(&km->key_param_set, 0,
+              sizeof(struct mwifiex_ie_type_key_param_set_v2));
+
+       if (enc_key->key_disable) {
+               dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+               km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
+               km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+               km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+               km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+               key_info = KEY_MCAST | KEY_UNICAST;
+               km->key_param_set.key_info = cpu_to_le16(key_info);
+               memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+               cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+                                       S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+                                       sizeof(km->action));
+               return 0;
+       }
+
+       km->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
+       km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+       km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+       key_info = KEY_ENABLED;
+       memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+       if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
+               dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+               len += sizeof(struct mwifiex_wep_param);
+               km->key_param_set.len = cpu_to_le16(len);
+               km->key_param_set.key_type = KEY_TYPE_ID_WEP;
+
+               if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+                               key_info |= KEY_MCAST | KEY_UNICAST;
+               } else {
+                       if (enc_key->is_current_wep_key) {
+                               key_info |= KEY_MCAST | KEY_UNICAST;
+                               if (km->key_param_set.key_idx ==
+                                   (priv->wep_key_curr_index & KEY_INDEX_MASK))
+                                       key_info |= KEY_DEFAULT;
+                       } else {
+                               if (mac) {
+                                       if (is_broadcast_ether_addr(mac))
+                                               key_info |= KEY_MCAST;
+                                       else
+                                               key_info |= KEY_UNICAST |
+                                                           KEY_DEFAULT;
+                               } else {
+                                       key_info |= KEY_MCAST;
+                               }
+                       }
+               }
+               km->key_param_set.key_info = cpu_to_le16(key_info);
+
+               km->key_param_set.key_params.wep.key_len =
+                                                 cpu_to_le16(enc_key->key_len);
+               memcpy(km->key_param_set.key_params.wep.key,
+                      enc_key->key_material, enc_key->key_len);
+
+               cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+                                       len + sizeof(km->action) + S_DS_GEN);
+               return 0;
+       }
+
+       if (is_broadcast_ether_addr(mac))
+               key_info |= KEY_MCAST | KEY_RX_KEY;
+       else
+               key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
+
+       if (enc_key->is_wapi_key) {
+               dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+               km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
+               memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
+                      PN_LEN);
+               km->key_param_set.key_params.wapi.key_len =
+                                               cpu_to_le16(enc_key->key_len);
+               memcpy(km->key_param_set.key_params.wapi.key,
+                      enc_key->key_material, enc_key->key_len);
+               if (is_broadcast_ether_addr(mac))
+                       priv->sec_info.wapi_key_on = true;
+
+               if (!priv->sec_info.wapi_key_on)
+                       key_info |= KEY_DEFAULT;
+               km->key_param_set.key_info = cpu_to_le16(key_info);
+
+               len += sizeof(struct mwifiex_wapi_param);
+               km->key_param_set.len = cpu_to_le16(len);
+               cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+                                       len + sizeof(km->action) + S_DS_GEN);
+               return 0;
+       }
+
+       if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+               key_info |= KEY_DEFAULT;
+               /* Enable unicast bit for WPA-NONE/ADHOC_AES */
+               if (!priv->sec_info.wpa2_enabled &&
+                   !is_broadcast_ether_addr(mac))
+                       key_info |= KEY_UNICAST;
+       } else {
+               /* Enable default key for WPA/WPA2 */
+               if (!priv->wpa_is_gtk_set)
+                       key_info |= KEY_DEFAULT;
+       }
+
+       km->key_param_set.key_info = cpu_to_le16(key_info);
+
+       if (enc_key->key_len == WLAN_KEY_LEN_CCMP)
+               return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
+
+       if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
+               dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+               if (enc_key->is_rx_seq_valid)
+                       memcpy(km->key_param_set.key_params.tkip.pn,
+                              enc_key->pn, enc_key->pn_len);
+               km->key_param_set.key_type = KEY_TYPE_ID_TKIP;
+               km->key_param_set.key_params.tkip.key_len =
+                                               cpu_to_le16(enc_key->key_len);
+               memcpy(km->key_param_set.key_params.tkip.key,
+                      enc_key->key_material, enc_key->key_len);
+
+               len += sizeof(struct mwifiex_tkip_param);
+               km->key_param_set.len = cpu_to_le16(len);
+               cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+                                       len + sizeof(km->action) + S_DS_GEN);
+       }
+
+       return 0;
+}
+
 /*
  * This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V1 format.
  *
  * Preparation includes -
  *      - Setting command ID, action and proper size
@@ -542,10 +776,10 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
  *      - Ensuring correct endian-ness
  */
 static int
-mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
-                               struct host_cmd_ds_command *cmd,
-                               u16 cmd_action, u32 cmd_oid,
-                               struct mwifiex_ds_encrypt_key *enc_key)
+mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
+                                  struct host_cmd_ds_command *cmd,
+                                  u16 cmd_action, u32 cmd_oid,
+                                  struct mwifiex_ds_encrypt_key *enc_key)
 {
        struct host_cmd_ds_802_11_key_material *key_material =
                &cmd->params.key_material;
@@ -724,6 +958,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
        return ret;
 }
 
+/* Wrapper function for setting network key depending upon FW KEY API version */
+static int
+mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
+                               struct host_cmd_ds_command *cmd,
+                               u16 cmd_action, u32 cmd_oid,
+                               struct mwifiex_ds_encrypt_key *enc_key)
+{
+       if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+               return mwifiex_cmd_802_11_key_material_v2(priv, cmd,
+                                                         cmd_action, cmd_oid,
+                                                         enc_key);
+
+       else
+               return mwifiex_cmd_802_11_key_material_v1(priv, cmd,
+                                                         cmd_action, cmd_oid,
+                                                         enc_key);
+}
+
 /*
  * This function prepares command to set/get 11d domain information.
  *
@@ -1173,9 +1425,9 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
                /* property header is 6 bytes, data must fit in cmd buffer */
                if (prop && prop->value && prop->length > 6 &&
                    prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
-                       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
-                                                   HostCmd_ACT_GEN_SET, 0,
-                                                   prop);
+                       ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              prop, true);
                        if (ret)
                                return ret;
                }
@@ -1280,6 +1532,127 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
        return 0;
 }
 
+static int
+mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
+                     struct host_cmd_ds_command *cmd,
+                     void *data_buf)
+{
+       struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
+       struct mwifiex_ds_tdls_oper *oper = data_buf;
+       struct mwifiex_sta_node *sta_ptr;
+       struct host_cmd_tlv_rates *tlv_rates;
+       struct mwifiex_ie_types_htcap *ht_capab;
+       struct mwifiex_ie_types_qos_info *wmm_qos_info;
+       struct mwifiex_ie_types_extcap *extcap;
+       struct mwifiex_ie_types_vhtcap *vht_capab;
+       struct mwifiex_ie_types_aid *aid;
+       u8 *pos, qos_info;
+       u16 config_len = 0;
+       struct station_parameters *params = priv->sta_params;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
+       cmd->size = cpu_to_le16(S_DS_GEN);
+       le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
+
+       tdls_oper->reason = 0;
+       memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
+       sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
+
+       pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
+
+       switch (oper->tdls_action) {
+       case MWIFIEX_TDLS_DISABLE_LINK:
+               tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_DELETE);
+               break;
+       case MWIFIEX_TDLS_CREATE_LINK:
+               tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CREATE);
+               break;
+       case MWIFIEX_TDLS_CONFIG_LINK:
+               tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
+
+               if (!params) {
+                       dev_err(priv->adapter->dev,
+                               "TDLS config params not available for %pM\n",
+                               oper->peer_mac);
+                       return -ENODATA;
+               }
+
+               *(__le16 *)pos = cpu_to_le16(params->capability);
+               config_len += sizeof(params->capability);
+
+               qos_info = params->uapsd_queues | (params->max_sp << 5);
+               wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos +
+                                                                   config_len);
+               wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
+               wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info));
+               wmm_qos_info->qos_info = qos_info;
+               config_len += sizeof(struct mwifiex_ie_types_qos_info);
+
+               if (params->ht_capa) {
+                       ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
+                                                                   config_len);
+                       ht_capab->header.type =
+                                           cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+                       ht_capab->header.len =
+                                  cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+                       memcpy(&ht_capab->ht_cap, params->ht_capa,
+                              sizeof(struct ieee80211_ht_cap));
+                       config_len += sizeof(struct mwifiex_ie_types_htcap);
+               }
+
+               if (params->supported_rates && params->supported_rates_len) {
+                       tlv_rates = (struct host_cmd_tlv_rates *)(pos +
+                                                                 config_len);
+                       tlv_rates->header.type =
+                                              cpu_to_le16(WLAN_EID_SUPP_RATES);
+                       tlv_rates->header.len =
+                                      cpu_to_le16(params->supported_rates_len);
+                       memcpy(tlv_rates->rates, params->supported_rates,
+                              params->supported_rates_len);
+                       config_len += sizeof(struct host_cmd_tlv_rates) +
+                                     params->supported_rates_len;
+               }
+
+               if (params->ext_capab && params->ext_capab_len) {
+                       extcap = (struct mwifiex_ie_types_extcap *)(pos +
+                                                                   config_len);
+                       extcap->header.type =
+                                          cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
+                       extcap->header.len = cpu_to_le16(params->ext_capab_len);
+                       memcpy(extcap->ext_capab, params->ext_capab,
+                              params->ext_capab_len);
+                       config_len += sizeof(struct mwifiex_ie_types_extcap) +
+                                     params->ext_capab_len;
+               }
+               if (params->vht_capa) {
+                       vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
+                                                                   config_len);
+                       vht_capab->header.type =
+                                          cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+                       vht_capab->header.len =
+                                 cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+                       memcpy(&vht_capab->vht_cap, params->vht_capa,
+                              sizeof(struct ieee80211_vht_cap));
+                       config_len += sizeof(struct mwifiex_ie_types_vhtcap);
+               }
+               if (params->aid) {
+                       aid = (struct mwifiex_ie_types_aid *)(pos + config_len);
+                       aid->header.type = cpu_to_le16(WLAN_EID_AID);
+                       aid->header.len = cpu_to_le16(sizeof(params->aid));
+                       aid->aid = cpu_to_le16(params->aid);
+                       config_len += sizeof(struct mwifiex_ie_types_aid);
+               }
+
+               break;
+       default:
+               dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+               return -ENOTSUPP;
+       }
+
+       le16_add_cpu(&cmd->size, config_len);
+
+       return 0;
+}
 /*
  * This function prepares the commands before sending them to the firmware.
  *
@@ -1472,6 +1845,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action,
                                                         data_buf);
                break;
+       case HostCmd_CMD_802_11_SCAN_EXT:
+               ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
+               break;
        case HostCmd_CMD_MAC_REG_ACCESS:
        case HostCmd_CMD_BBP_REG_ACCESS:
        case HostCmd_CMD_RF_REG_ACCESS:
@@ -1507,6 +1883,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
                                               data_buf);
                break;
+       case HostCmd_CMD_TDLS_OPER:
+               ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
+               break;
        default:
                dev_err(priv->adapter->dev,
                        "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1547,15 +1926,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
 
        if (first_sta) {
                if (priv->adapter->iface_type == MWIFIEX_PCIE) {
-                       ret = mwifiex_send_cmd_sync(priv,
-                                               HostCmd_CMD_PCIE_DESC_DETAILS,
-                                               HostCmd_ACT_GEN_SET, 0, NULL);
+                       ret = mwifiex_send_cmd(priv,
+                                              HostCmd_CMD_PCIE_DESC_DETAILS,
+                                              HostCmd_ACT_GEN_SET, 0, NULL,
+                                              true);
                        if (ret)
                                return -1;
                }
 
-               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_FUNC_INIT,
-                                           HostCmd_ACT_GEN_SET, 0, NULL);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_FUNC_INIT,
+                                      HostCmd_ACT_GEN_SET, 0, NULL, true);
                if (ret)
                        return -1;
 
@@ -1573,55 +1953,57 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
                }
 
                if (adapter->cal_data) {
-                       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_CFG_DATA,
-                                               HostCmd_ACT_GEN_SET, 0, NULL);
+                       ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
+                                              HostCmd_ACT_GEN_SET, 0, NULL,
+                                              true);
                        if (ret)
                                return -1;
                }
 
                /* Read MAC address from HW */
-               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_GET_HW_SPEC,
-                                           HostCmd_ACT_GEN_GET, 0, NULL);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC,
+                                      HostCmd_ACT_GEN_GET, 0, NULL, true);
                if (ret)
                        return -1;
 
                /* Reconfigure tx buf size */
-               ret = mwifiex_send_cmd_sync(priv,
-                                           HostCmd_CMD_RECONFIGURE_TX_BUFF,
-                                           HostCmd_ACT_GEN_SET, 0,
-                                           &priv->adapter->tx_buf_size);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
+                                      HostCmd_ACT_GEN_SET, 0,
+                                      &priv->adapter->tx_buf_size, true);
                if (ret)
                        return -1;
 
                if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
                        /* Enable IEEE PS by default */
                        priv->adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
-                       ret = mwifiex_send_cmd_sync(
-                                       priv, HostCmd_CMD_802_11_PS_MODE_ENH,
-                                       EN_AUTO_PS, BITMAP_STA_PS, NULL);
+                       ret = mwifiex_send_cmd(priv,
+                                              HostCmd_CMD_802_11_PS_MODE_ENH,
+                                              EN_AUTO_PS, BITMAP_STA_PS, NULL,
+                                              true);
                        if (ret)
                                return -1;
                }
        }
 
        /* get tx rate */
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
-                                   HostCmd_ACT_GEN_GET, 0, NULL);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_TX_RATE_CFG,
+                              HostCmd_ACT_GEN_GET, 0, NULL, true);
        if (ret)
                return -1;
        priv->data_rate = 0;
 
        /* get tx power */
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_TX_PWR,
-                                   HostCmd_ACT_GEN_GET, 0, NULL);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_RF_TX_PWR,
+                              HostCmd_ACT_GEN_GET, 0, NULL, true);
        if (ret)
                return -1;
 
        if (priv->bss_type == MWIFIEX_BSS_TYPE_STA) {
                /* set ibss coalescing_status */
-               ret = mwifiex_send_cmd_sync(
-                               priv, HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
-                               HostCmd_ACT_GEN_SET, 0, &enable);
+               ret = mwifiex_send_cmd(
+                               priv,
+                               HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
+                               HostCmd_ACT_GEN_SET, 0, &enable, true);
                if (ret)
                        return -1;
        }
@@ -1629,16 +2011,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        memset(&amsdu_aggr_ctrl, 0, sizeof(amsdu_aggr_ctrl));
        amsdu_aggr_ctrl.enable = true;
        /* Send request to firmware */
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
-                                   HostCmd_ACT_GEN_SET, 0,
-                                   &amsdu_aggr_ctrl);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_AMSDU_AGGR_CTRL,
+                              HostCmd_ACT_GEN_SET, 0,
+                              &amsdu_aggr_ctrl, true);
        if (ret)
                return -1;
        /* MAC Control must be the last command in init_fw */
        /* set MAC Control */
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
-                                   HostCmd_ACT_GEN_SET, 0,
-                                   &priv->curr_pkt_filter);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+                              HostCmd_ACT_GEN_SET, 0,
+                              &priv->curr_pkt_filter, true);
        if (ret)
                return -1;
 
@@ -1647,10 +2029,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
                /* Enable auto deep sleep */
                auto_ds.auto_ds = DEEP_SLEEP_ON;
                auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
-               ret = mwifiex_send_cmd_sync(priv,
-                                           HostCmd_CMD_802_11_PS_MODE_ENH,
-                                           EN_AUTO_PS, BITMAP_AUTO_DS,
-                                           &auto_ds);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+                                      EN_AUTO_PS, BITMAP_AUTO_DS,
+                                      &auto_ds, true);
                if (ret)
                        return -1;
        }
@@ -1658,9 +2039,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
        if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
                /* Send cmd to FW to enable/disable 11D function */
                state_11d = ENABLE_11D;
-               ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB,
-                                           HostCmd_ACT_GEN_SET, DOT11D_I,
-                                           &state_11d);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
+                                      HostCmd_ACT_GEN_SET, DOT11D_I,
+                                      &state_11d, true);
                if (ret)
                        dev_err(priv->adapter->dev,
                                "11D: failed to enable 11D\n");
@@ -1673,8 +2054,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
         * (Short GI, Channel BW, Green field support etc.) for transmit
         */
        tx_cfg.tx_htcap = MWIFIEX_FW_DEF_HTTXCFG;
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_11N_CFG,
-                                   HostCmd_ACT_GEN_SET, 0, &tx_cfg);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_11N_CFG,
+                              HostCmd_ACT_GEN_SET, 0, &tx_cfg, true);
 
        ret = -EINPROGRESS;
 
index 24523e4015cba11a9fe02ee449ad462f557e1642..bfebb0144df5ac5ae88bd78dd909888d4bb0a0ea 100644 (file)
@@ -69,6 +69,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
 
                break;
        case HostCmd_CMD_802_11_SCAN:
+       case HostCmd_CMD_802_11_SCAN_EXT:
                /* Cancel all pending scan command */
                spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
                list_for_each_entry_safe(cmd_node, tmp_node,
@@ -157,8 +158,8 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
 
        priv->subsc_evt_rssi_state = EVENT_HANDLED;
 
-       mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
-                              0, 0, subsc_evt);
+       mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
+                        0, 0, subsc_evt, false);
 
        return 0;
 }
@@ -303,6 +304,15 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
                                priv->bitmap_rates[2 + i] =
                                        le16_to_cpu(rate_scope->
                                                    ht_mcs_rate_bitmap[i]);
+
+                       if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) {
+                               for (i = 0; i < ARRAY_SIZE(rate_scope->
+                                                          vht_mcs_rate_bitmap);
+                                    i++)
+                                       priv->bitmap_rates[10 + i] =
+                                           le16_to_cpu(rate_scope->
+                                                       vht_mcs_rate_bitmap[i]);
+                       }
                        break;
                        /* Add RATE_DROP tlv here */
                }
@@ -316,9 +326,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
        if (priv->is_data_rate_auto)
                priv->data_rate = 0;
        else
-               return mwifiex_send_cmd_async(priv,
-                                             HostCmd_CMD_802_11_TX_RATE_QUERY,
-                                             HostCmd_ACT_GEN_GET, 0, NULL);
+               return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+                                       HostCmd_ACT_GEN_GET, 0, NULL, false);
 
        return 0;
 }
@@ -561,13 +570,13 @@ static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
 }
 
 /*
- * This function handles the command response of set/get key material.
+ * This function handles the command response of set/get v1 key material.
  *
  * Handling includes updating the driver parameters to reflect the
  * changes.
  */
-static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
-                                          struct host_cmd_ds_command *resp)
+static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
+                                             struct host_cmd_ds_command *resp)
 {
        struct host_cmd_ds_802_11_key_material *key =
                                                &resp->params.key_material;
@@ -589,6 +598,51 @@ static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
        return 0;
 }
 
+/*
+ * This function handles the command response of set/get v2 key material.
+ *
+ * Handling includes updating the driver parameters to reflect the
+ * changes.
+ */
+static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+                                             struct host_cmd_ds_command *resp)
+{
+       struct host_cmd_ds_802_11_key_material_v2 *key_v2;
+       __le16 len;
+
+       key_v2 = &resp->params.key_material_v2;
+       if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
+               if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
+                       dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+                       priv->wpa_is_gtk_set = true;
+                       priv->scan_block = false;
+               }
+       }
+
+       if (key_v2->key_param_set.key_type != KEY_TYPE_ID_AES)
+               return 0;
+
+       memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
+              WLAN_KEY_LEN_CCMP);
+       priv->aes_key_v2.key_param_set.key_params.aes.key_len =
+                               key_v2->key_param_set.key_params.aes.key_len;
+       len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
+       memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
+              key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
+
+       return 0;
+}
+
+/* Wrapper function for processing response of key material command */
+static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
+                                          struct host_cmd_ds_command *resp)
+{
+       if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+               return mwifiex_ret_802_11_key_material_v2(priv, resp);
+       else
+               return mwifiex_ret_802_11_key_material_v1(priv, resp);
+}
+
 /*
  * This function handles the command response of get 11d domain information.
  */
@@ -800,7 +854,60 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
 
        return 0;
 }
+static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
+                                struct host_cmd_ds_command *resp)
+{
+       struct host_cmd_ds_tdls_oper *cmd_tdls_oper = &resp->params.tdls_oper;
+       u16 reason = le16_to_cpu(cmd_tdls_oper->reason);
+       u16 action = le16_to_cpu(cmd_tdls_oper->tdls_action);
+       struct mwifiex_sta_node *node =
+                          mwifiex_get_sta_entry(priv, cmd_tdls_oper->peer_mac);
 
+       switch (action) {
+       case ACT_TDLS_DELETE:
+               if (reason)
+                       dev_err(priv->adapter->dev,
+                               "TDLS link delete for %pM failed: reason %d\n",
+                               cmd_tdls_oper->peer_mac, reason);
+               else
+                       dev_dbg(priv->adapter->dev,
+                               "TDLS link config for %pM successful\n",
+                               cmd_tdls_oper->peer_mac);
+               break;
+       case ACT_TDLS_CREATE:
+               if (reason) {
+                       dev_err(priv->adapter->dev,
+                               "TDLS link creation for %pM failed: reason %d",
+                               cmd_tdls_oper->peer_mac, reason);
+                       if (node && reason != TDLS_ERR_LINK_EXISTS)
+                               node->tdls_status = TDLS_SETUP_FAILURE;
+               } else {
+                       dev_dbg(priv->adapter->dev,
+                               "TDLS link creation for %pM successful",
+                               cmd_tdls_oper->peer_mac);
+               }
+               break;
+       case ACT_TDLS_CONFIG:
+               if (reason) {
+                       dev_err(priv->adapter->dev,
+                               "TDLS link config for %pM failed, reason %d\n",
+                               cmd_tdls_oper->peer_mac, reason);
+                       if (node)
+                               node->tdls_status = TDLS_SETUP_FAILURE;
+               } else {
+                       dev_dbg(priv->adapter->dev,
+                               "TDLS link config for %pM successful\n",
+                               cmd_tdls_oper->peer_mac);
+               }
+               break;
+       default:
+               dev_err(priv->adapter->dev,
+                       "Unknown TDLS command action respnse %d", action);
+               return -1;
+       }
+
+       return 0;
+}
 /*
  * This function handles the command response for subscribe event command.
  */
@@ -871,6 +978,10 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                ret = mwifiex_ret_802_11_scan(priv, resp);
                adapter->curr_cmd->wait_q_enabled = false;
                break;
+       case HostCmd_CMD_802_11_SCAN_EXT:
+               ret = mwifiex_ret_802_11_scan_ext(priv);
+               adapter->curr_cmd->wait_q_enabled = false;
+               break;
        case HostCmd_CMD_802_11_BG_SCAN_QUERY:
                ret = mwifiex_ret_802_11_scan(priv, resp);
                dev_dbg(adapter->dev,
@@ -999,6 +1110,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_COALESCE_CFG:
                break;
+       case HostCmd_CMD_TDLS_OPER:
+               ret = mwifiex_ret_tdls_oper(priv, resp);
+               break;
        default:
                dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
                        resp->command);
index 8c351f71f72f9f6f6a17650198ba805bf7c1f5cd..368450cc56c7d9e19b8c2a74c47d43a9c5c91b1f 100644 (file)
@@ -54,6 +54,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
 
        priv->scan_block = false;
 
+       if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+           ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+               mwifiex_disable_all_tdls_links(priv);
+
        /* Free Tx and Rx packets, report disconnect to upper layer */
        mwifiex_clean_txrx(priv);
 
@@ -112,7 +116,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        adapter->tx_lock_flag = false;
        adapter->pps_uapsd_mode = false;
 
-       if (adapter->num_cmd_timeout && adapter->curr_cmd)
+       if (adapter->is_cmd_timedout && adapter->curr_cmd)
                return;
        priv->media_connected = false;
        dev_dbg(adapter->dev,
@@ -289,9 +293,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_HS_ACT_REQ:
                dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_802_11_HS_CFG_ENH,
-                                            0, 0, NULL);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
+                                      0, 0, NULL, false);
                break;
 
        case EVENT_MIC_ERR_UNICAST:
@@ -322,27 +325,34 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_BG_SCAN_REPORT:
                dev_dbg(adapter->dev, "event: BGS_REPORT\n");
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_802_11_BG_SCAN_QUERY,
-                                            HostCmd_ACT_GEN_GET, 0, NULL);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
+                                      HostCmd_ACT_GEN_GET, 0, NULL, false);
                break;
 
        case EVENT_PORT_RELEASE:
                dev_dbg(adapter->dev, "event: PORT RELEASE\n");
                break;
 
+       case EVENT_EXT_SCAN_REPORT:
+               dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+               if (adapter->ext_scan)
+                       ret = mwifiex_handle_event_ext_scan_report(priv,
+                                               adapter->event_skb->data);
+
+               break;
+
        case EVENT_WMM_STATUS_CHANGE:
                dev_dbg(adapter->dev, "event: WMM status changed\n");
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_WMM_GET_STATUS,
-                                            0, 0, NULL);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
+                                      0, 0, NULL, false);
                break;
 
        case EVENT_RSSI_LOW:
                cfg80211_cqm_rssi_notify(priv->netdev,
                                         NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
                                         GFP_KERNEL);
-               mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
-                                      HostCmd_ACT_GEN_GET, 0, NULL);
+               mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+                                HostCmd_ACT_GEN_GET, 0, NULL, false);
                priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
                dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
                break;
@@ -356,8 +366,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                cfg80211_cqm_rssi_notify(priv->netdev,
                                         NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
                                         GFP_KERNEL);
-               mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
-                                      HostCmd_ACT_GEN_GET, 0, NULL);
+               mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
+                                HostCmd_ACT_GEN_GET, 0, NULL, false);
                priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
                dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
                break;
@@ -384,15 +394,15 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
        case EVENT_IBSS_COALESCED:
                dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
-               ret = mwifiex_send_cmd_async(priv,
+               ret = mwifiex_send_cmd(priv,
                                HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
-                               HostCmd_ACT_GEN_GET, 0, NULL);
+                               HostCmd_ACT_GEN_GET, 0, NULL, false);
                break;
        case EVENT_ADDBA:
                dev_dbg(adapter->dev, "event: ADDBA Request\n");
-               mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
-                                      HostCmd_ACT_GEN_SET, 0,
-                                      adapter->event_body);
+               mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
+                                HostCmd_ACT_GEN_SET, 0,
+                                adapter->event_body, false);
                break;
        case EVENT_DELBA:
                dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -443,10 +453,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                priv->csa_expire_time =
                                jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
                priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
-               ret = mwifiex_send_cmd_async(priv,
-                       HostCmd_CMD_802_11_DEAUTHENTICATE,
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
                        HostCmd_ACT_GEN_SET, 0,
-                       priv->curr_bss_params.bss_descriptor.mac_address);
+                       priv->curr_bss_params.bss_descriptor.mac_address,
+                       false);
                break;
 
        default:
index c5cb2ed19ec2e240d6a65a7bb9ee4165ad181b68..894270611f2cb6e074669f424fe66978e80e1992 100644 (file)
@@ -64,6 +64,7 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
                                          *(cmd_queued->condition));
        if (status) {
                dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+               mwifiex_cancel_all_pending_cmd(adapter);
                return status;
        }
 
@@ -108,19 +109,19 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
                                "info: Set multicast list=%d\n",
                                mcast_list->num_multicast_addr);
                        /* Send multicast addresses to firmware */
-                       ret = mwifiex_send_cmd_async(priv,
-                               HostCmd_CMD_MAC_MULTICAST_ADR,
-                               HostCmd_ACT_GEN_SET, 0,
-                               mcast_list);
+                       ret = mwifiex_send_cmd(priv,
+                                              HostCmd_CMD_MAC_MULTICAST_ADR,
+                                              HostCmd_ACT_GEN_SET, 0,
+                                              mcast_list, false);
                }
        }
        dev_dbg(priv->adapter->dev,
                "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
               old_pkt_filter, priv->curr_pkt_filter);
        if (old_pkt_filter != priv->curr_pkt_filter) {
-               ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
-                                            HostCmd_ACT_GEN_SET,
-                                            0, &priv->curr_pkt_filter);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+                                      HostCmd_ACT_GEN_SET,
+                                      0, &priv->curr_pkt_filter, false);
        }
 
        return ret;
@@ -237,8 +238,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
 
        rcu_read_unlock();
 
-       if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
-                                  HostCmd_ACT_GEN_SET, 0, NULL)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
+                            HostCmd_ACT_GEN_SET, 0, NULL, false)) {
                wiphy_err(priv->adapter->wiphy,
                          "11D: setting domain info in FW\n");
                return -1;
@@ -290,7 +291,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
 
                if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
                                                HostCmd_SCAN_RADIO_TYPE_BG)
-                       config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
+                       config_bands = BAND_B | BAND_G | BAND_GN;
                else
                        config_bands = BAND_A | BAND_AN | BAND_AAC;
 
@@ -429,16 +430,13 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
                                status = -1;
                                break;
                        }
-                       if (cmd_type == MWIFIEX_SYNC_CMD)
-                               status = mwifiex_send_cmd_sync(priv,
-                                               HostCmd_CMD_802_11_HS_CFG_ENH,
-                                               HostCmd_ACT_GEN_SET, 0,
-                                               &adapter->hs_cfg);
-                       else
-                               status = mwifiex_send_cmd_async(priv,
-                                               HostCmd_CMD_802_11_HS_CFG_ENH,
-                                               HostCmd_ACT_GEN_SET, 0,
-                                               &adapter->hs_cfg);
+
+                       status = mwifiex_send_cmd(priv,
+                                                 HostCmd_CMD_802_11_HS_CFG_ENH,
+                                                 HostCmd_ACT_GEN_SET, 0,
+                                                 &adapter->hs_cfg,
+                                                 cmd_type == MWIFIEX_SYNC_CMD);
+
                        if (hs_cfg->conditions == HS_CFG_CANCEL)
                                /* Restore previous condition */
                                adapter->hs_cfg.conditions =
@@ -511,6 +509,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
        memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
        hscfg.is_invoke_hostcmd = true;
 
+       adapter->hs_enabling = true;
+       mwifiex_cancel_all_pending_cmd(adapter);
+
        if (mwifiex_set_hs_params(mwifiex_get_priv(adapter,
                                                   MWIFIEX_BSS_ROLE_STA),
                                  HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
@@ -519,8 +520,9 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
                return false;
        }
 
-       if (wait_event_interruptible(adapter->hs_activate_wait_q,
-                                    adapter->hs_activate_wait_q_woken)) {
+       if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
+                                            adapter->hs_activate_wait_q_woken,
+                                            (10 * HZ)) <= 0) {
                dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
                return false;
        }
@@ -586,8 +588,8 @@ int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
 
        auto_ds.auto_ds = DEEP_SLEEP_OFF;
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
-                                    DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+                               DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true);
 }
 EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
 
@@ -601,8 +603,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate)
 {
        int ret;
 
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
-                                   HostCmd_ACT_GEN_GET, 0, NULL);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+                              HostCmd_ACT_GEN_GET, 0, NULL, true);
 
        if (!ret) {
                if (priv->is_data_rate_auto)
@@ -698,8 +700,8 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
                pg->power_max = (s8) dbm;
                pg->ht_bandwidth = HT_BW_40;
        }
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TXPWR_CFG,
-                                   HostCmd_ACT_GEN_SET, 0, buf);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_TXPWR_CFG,
+                              HostCmd_ACT_GEN_SET, 0, buf, true);
 
        kfree(buf);
        return ret;
@@ -722,12 +724,11 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
        else
                adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
        sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS;
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
-                                   sub_cmd, BITMAP_STA_PS, NULL);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+                              sub_cmd, BITMAP_STA_PS, NULL, true);
        if ((!ret) && (sub_cmd == DIS_AUTO_PS))
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_802_11_PS_MODE_ENH,
-                                            GET_PS, 0, NULL);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH,
+                                      GET_PS, 0, NULL, false);
 
        return ret;
 }
@@ -851,9 +852,9 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
                               struct mwifiex_ds_encrypt_key *encrypt_key)
 {
 
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
-                                    HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
-                                    encrypt_key);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+                               HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED,
+                               encrypt_key, true);
 }
 
 /*
@@ -865,6 +866,7 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
 static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
                              struct mwifiex_ds_encrypt_key *encrypt_key)
 {
+       struct mwifiex_adapter *adapter = priv->adapter;
        int ret;
        struct mwifiex_wep_key *wep_key;
        int index;
@@ -879,10 +881,17 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
                /* Copy the required key as the current key */
                wep_key = &priv->wep_key[index];
                if (!wep_key->key_length) {
-                       dev_err(priv->adapter->dev,
+                       dev_err(adapter->dev,
                                "key not set, so cannot enable it\n");
                        return -1;
                }
+
+               if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) {
+                       memcpy(encrypt_key->key_material,
+                              wep_key->key_material, wep_key->key_length);
+                       encrypt_key->key_len = wep_key->key_length;
+               }
+
                priv->wep_key_curr_index = (u16) index;
                priv->sec_info.wep_enabled = 1;
        } else {
@@ -897,21 +906,32 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
                priv->sec_info.wep_enabled = 1;
        }
        if (wep_key->key_length) {
+               void *enc_key;
+
+               if (encrypt_key->key_disable)
+                       memset(&priv->wep_key[index], 0,
+                              sizeof(struct mwifiex_wep_key));
+
+               if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+                       enc_key = encrypt_key;
+               else
+                       enc_key = NULL;
+
                /* Send request to firmware */
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_802_11_KEY_MATERIAL,
-                                            HostCmd_ACT_GEN_SET, 0, NULL);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+                                      HostCmd_ACT_GEN_SET, 0, enc_key, false);
                if (ret)
                        return ret;
        }
+
        if (priv->sec_info.wep_enabled)
                priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
        else
                priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
 
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
-                                   HostCmd_ACT_GEN_SET, 0,
-                                   &priv->curr_pkt_filter);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
+                              HostCmd_ACT_GEN_SET, 0,
+                              &priv->curr_pkt_filter, true);
 
        return ret;
 }
@@ -946,10 +966,9 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
                 */
                /* Send the key as PTK to firmware */
                encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
-               ret = mwifiex_send_cmd_async(priv,
-                                            HostCmd_CMD_802_11_KEY_MATERIAL,
-                                            HostCmd_ACT_GEN_SET,
-                                            KEY_INFO_ENABLED, encrypt_key);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+                                      HostCmd_ACT_GEN_SET,
+                                      KEY_INFO_ENABLED, encrypt_key, false);
                if (ret)
                        return ret;
 
@@ -973,15 +992,13 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
                encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST;
 
        if (remove_key)
-               ret = mwifiex_send_cmd_sync(priv,
-                                           HostCmd_CMD_802_11_KEY_MATERIAL,
-                                           HostCmd_ACT_GEN_SET,
-                                           !KEY_INFO_ENABLED, encrypt_key);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+                                      HostCmd_ACT_GEN_SET,
+                                      !KEY_INFO_ENABLED, encrypt_key, true);
        else
-               ret = mwifiex_send_cmd_sync(priv,
-                                           HostCmd_CMD_802_11_KEY_MATERIAL,
-                                           HostCmd_ACT_GEN_SET,
-                                           KEY_INFO_ENABLED, encrypt_key);
+               ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+                                      HostCmd_ACT_GEN_SET,
+                                      KEY_INFO_ENABLED, encrypt_key, true);
 
        return ret;
 }
@@ -1044,19 +1061,27 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
 
        memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
        encrypt_key.key_len = key_len;
+       encrypt_key.key_index = key_index;
 
        if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
                encrypt_key.is_igtk_key = true;
 
        if (!disable) {
-               encrypt_key.key_index = key_index;
                if (key_len)
                        memcpy(encrypt_key.key_material, key, key_len);
+               else
+                       encrypt_key.is_current_wep_key = true;
+
                if (mac_addr)
                        memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
-               if (kp && kp->seq && kp->seq_len)
+               if (kp && kp->seq && kp->seq_len) {
                        memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
+                       encrypt_key.pn_len = kp->seq_len;
+                       encrypt_key.is_rx_seq_valid = true;
+               }
        } else {
+               if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+                       return 0;
                encrypt_key.key_disable = true;
                if (mac_addr)
                        memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
@@ -1077,8 +1102,8 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
        struct mwifiex_ver_ext ver_ext;
 
        memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT,
-                                 HostCmd_ACT_GEN_GET, 0, &ver_ext))
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
+                            HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
                return -1;
 
        return 0;
@@ -1103,8 +1128,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
                        ieee80211_frequency_to_channel(chan->center_freq);
                roc_cfg.duration = cpu_to_le32(duration);
        }
-       if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN,
-                                 action, 0, &roc_cfg)) {
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
+                            action, 0, &roc_cfg, true)) {
                dev_err(priv->adapter->dev, "failed to remain on channel\n");
                return -1;
        }
@@ -1136,8 +1161,8 @@ mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role)
                break;
        }
 
-       mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE,
-                             HostCmd_ACT_GEN_SET, 0, NULL);
+       mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
+                        HostCmd_ACT_GEN_SET, 0, NULL, true);
 
        return mwifiex_sta_init_cmd(priv, false);
 }
@@ -1152,8 +1177,8 @@ int
 mwifiex_get_stats_info(struct mwifiex_private *priv,
                       struct mwifiex_ds_get_stats *log)
 {
-       return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG,
-                                    HostCmd_ACT_GEN_GET, 0, log);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_GET_LOG,
+                               HostCmd_ACT_GEN_GET, 0, log, true);
 }
 
 /*
@@ -1195,8 +1220,7 @@ static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv,
                return -1;
        }
 
-       return mwifiex_send_cmd_sync(priv, cmd_no, action, 0, reg_rw);
-
+       return mwifiex_send_cmd(priv, cmd_no, action, 0, reg_rw, true);
 }
 
 /*
@@ -1261,8 +1285,8 @@ mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes,
        rd_eeprom.byte_count = cpu_to_le16((u16) bytes);
 
        /* Send request to firmware */
-       ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
-                                   HostCmd_ACT_GEN_GET, 0, &rd_eeprom);
+       ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_EEPROM_ACCESS,
+                              HostCmd_ACT_GEN_GET, 0, &rd_eeprom, true);
 
        if (!ret)
                memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA);
@@ -1391,7 +1415,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
  * with requisite parameters and calls the IOCTL handler.
  */
 int
-mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
+mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
 {
        struct mwifiex_ds_misc_gen_ie gen_ie;
 
index 4651d676df380c16f9ec8577a1f97e4ab72a4b8c..ed26387eccf56db59bca98f7ed6fd7e6a4065aae 100644 (file)
@@ -88,11 +88,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
        struct rxpd *local_rx_pd;
        int hdr_chop;
        struct ethhdr *eth;
+       u16 rx_pkt_off, rx_pkt_len;
+       u8 *offset;
 
        local_rx_pd = (struct rxpd *) (skb->data);
 
-       rx_pkt_hdr = (void *)local_rx_pd +
-                    le16_to_cpu(local_rx_pd->rx_pkt_offset);
+       rx_pkt_off = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+       rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+       rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
 
        if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
                     sizeof(bridge_tunnel_header))) ||
@@ -142,6 +145,12 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
                return 0;
        }
 
+       if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+           ntohs(rx_pkt_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) {
+               offset = (u8 *)local_rx_pd + rx_pkt_off;
+               mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
+       }
+
        priv->rxpd_rate = local_rx_pd->rx_rate;
 
        priv->rxpd_htinfo = local_rx_pd->ht_info;
@@ -192,26 +201,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
                return ret;
        }
 
-       if (rx_pkt_type == PKT_TYPE_AMSDU) {
-               struct sk_buff_head list;
-               struct sk_buff *rx_skb;
-
-               __skb_queue_head_init(&list);
-
-               skb_pull(skb, rx_pkt_offset);
-               skb_trim(skb, rx_pkt_length);
-
-               ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
-                                        priv->wdev->iftype, 0, false);
-
-               while (!skb_queue_empty(&list)) {
-                       rx_skb = __skb_dequeue(&list);
-                       ret = mwifiex_recv_packet(priv, rx_skb);
-                       if (ret == -1)
-                               dev_err(adapter->dev, "Rx of A-MSDU failed");
-               }
-               return 0;
-       } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+       if (rx_pkt_type == PKT_TYPE_MGMT) {
                ret = mwifiex_process_mgmt_packet(priv, skb);
                if (ret)
                        dev_err(adapter->dev, "Rx of mgmt packet failed");
index 354d64c9606ff2ab7788206aeee464265a127225..1236a5de7bca833adfd0eab1ed6cce865047479b 100644 (file)
@@ -95,6 +95,9 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
                }
        }
 
+       if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+               local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
        /* Offset of actual data */
        pkt_offset = sizeof(struct txpd) + pad;
        if (pkt_type == PKT_TYPE_MGMT) {
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
new file mode 100644 (file)
index 0000000..97662a1
--- /dev/null
@@ -0,0 +1,1044 @@
+/* Marvell Wireless LAN device driver: TDLS handling
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License").  You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "wmm.h"
+#include "11n.h"
+#include "11n_rxreorder.h"
+#include "11ac.h"
+
+#define TDLS_REQ_FIX_LEN      6
+#define TDLS_RESP_FIX_LEN     8
+#define TDLS_CONFIRM_FIX_LEN  6
+
+static void
+mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+{
+       struct mwifiex_ra_list_tbl *ra_list;
+       struct list_head *tid_list;
+       struct sk_buff *skb, *tmp;
+       struct mwifiex_txinfo *tx_info;
+       unsigned long flags;
+       u32 tid;
+       u8 tid_down;
+
+       dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+       spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+       skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
+               if (!ether_addr_equal(mac, skb->data))
+                       continue;
+
+               __skb_unlink(skb, &priv->tdls_txq);
+               tx_info = MWIFIEX_SKB_TXCB(skb);
+               tid = skb->priority;
+               tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+
+               if (status == TDLS_SETUP_COMPLETE) {
+                       ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
+                       ra_list->tdls_link = true;
+                       tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+               } else {
+                       tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+                       if (!list_empty(tid_list))
+                               ra_list = list_first_entry(tid_list,
+                                             struct mwifiex_ra_list_tbl, list);
+                       else
+                               ra_list = NULL;
+                       tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
+               }
+
+               if (!ra_list) {
+                       mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+                       continue;
+               }
+
+               skb_queue_tail(&ra_list->skb_head, skb);
+
+               ra_list->ba_pkt_count++;
+               ra_list->total_pkt_count++;
+
+               if (atomic_read(&priv->wmm.highest_queued_prio) <
+                                                      tos_to_tid_inv[tid_down])
+                       atomic_set(&priv->wmm.highest_queued_prio,
+                                  tos_to_tid_inv[tid_down]);
+
+               atomic_inc(&priv->wmm.tx_pkts_queued);
+       }
+
+       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+       return;
+}
+
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_ra_list_tbl *ra_list;
+       struct list_head *ra_list_head;
+       struct sk_buff *skb, *tmp;
+       unsigned long flags;
+       int i;
+
+       dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+       spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+       for (i = 0; i < MAX_NUM_TID; i++) {
+               if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
+                       ra_list_head = &priv->wmm.tid_tbl_ptr[i].ra_list;
+                       list_for_each_entry(ra_list, ra_list_head, list) {
+                               skb_queue_walk_safe(&ra_list->skb_head, skb,
+                                                   tmp) {
+                                       if (!ether_addr_equal(mac, skb->data))
+                                               continue;
+                                       __skb_unlink(skb, &ra_list->skb_head);
+                                       atomic_dec(&priv->wmm.tx_pkts_queued);
+                                       ra_list->total_pkt_count--;
+                                       skb_queue_tail(&priv->tdls_txq, skb);
+                               }
+                       }
+               }
+       }
+
+       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+       return;
+}
+
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
+                            struct sk_buff *skb)
+{
+       u8 rates[MWIFIEX_SUPPORTED_RATES], *pos;
+       u16 rates_size, supp_rates_size, ext_rates_size;
+
+       memset(rates, 0, sizeof(rates));
+       rates_size = mwifiex_get_supported_rates(priv, rates);
+
+       supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
+
+       if (skb_tailroom(skb) < rates_size + 4) {
+               dev_err(priv->adapter->dev,
+                       "Insuffient space while adding rates\n");
+               return -ENOMEM;
+       }
+
+       pos = skb_put(skb, supp_rates_size + 2);
+       *pos++ = WLAN_EID_SUPP_RATES;
+       *pos++ = supp_rates_size;
+       memcpy(pos, rates, supp_rates_size);
+
+       if (rates_size > MWIFIEX_TDLS_SUPPORTED_RATES) {
+               ext_rates_size = rates_size - MWIFIEX_TDLS_SUPPORTED_RATES;
+               pos = skb_put(skb, ext_rates_size + 2);
+               *pos++ = WLAN_EID_EXT_SUPP_RATES;
+               *pos++ = ext_rates_size;
+               memcpy(pos, rates + MWIFIEX_TDLS_SUPPORTED_RATES,
+                      ext_rates_size);
+       }
+
+       return 0;
+}
+
+static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
+                               struct sk_buff *skb)
+{
+       struct ieee_types_assoc_rsp *assoc_rsp;
+       u8 *pos;
+
+       assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
+       pos = (void *)skb_put(skb, 4);
+       *pos++ = WLAN_EID_AID;
+       *pos++ = 2;
+       *pos++ = le16_to_cpu(assoc_rsp->a_id);
+
+       return;
+}
+
+static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
+                                     struct sk_buff *skb)
+{
+       struct ieee80211_vht_cap vht_cap;
+       u8 *pos;
+
+       pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+       *pos++ = WLAN_EID_VHT_CAPABILITY;
+       *pos++ = sizeof(struct ieee80211_vht_cap);
+
+       memset(&vht_cap, 0, sizeof(struct ieee80211_vht_cap));
+
+       mwifiex_fill_vht_cap_tlv(priv, &vht_cap, priv->curr_bss_params.band);
+       memcpy(pos, &vht_cap, sizeof(vht_cap));
+
+       return 0;
+}
+
+static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
+                                    u8 *mac, struct sk_buff *skb)
+{
+       struct mwifiex_bssdescriptor *bss_desc;
+       struct ieee80211_vht_operation *vht_oper;
+       struct ieee80211_vht_cap *vht_cap, *ap_vht_cap = NULL;
+       struct mwifiex_sta_node *sta_ptr;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       u8 supp_chwd_set, peer_supp_chwd_set;
+       u8 *pos, ap_supp_chwd_set, chan_bw;
+       u16 mcs_map_user, mcs_map_resp, mcs_map_result;
+       u16 mcs_user, mcs_resp, nss;
+       u32 usr_vht_cap_info;
+
+       bss_desc = &priv->curr_bss_params.bss_descriptor;
+
+       sta_ptr = mwifiex_get_sta_entry(priv, mac);
+       if (unlikely(!sta_ptr)) {
+               dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+               return -1;
+       }
+
+       if (!mwifiex_is_bss_in_11ac_mode(priv)) {
+               if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
+                  WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+                       dev_dbg(adapter->dev,
+                               "TDLS peer doesn't support wider bandwitdh\n");
+                       return 0;
+               }
+       } else {
+               ap_vht_cap = bss_desc->bcn_vht_cap;
+       }
+
+       pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
+       *pos++ = WLAN_EID_VHT_OPERATION;
+       *pos++ = sizeof(struct ieee80211_vht_operation);
+       vht_oper = (struct ieee80211_vht_operation *)pos;
+
+       if (bss_desc->bss_band & BAND_A)
+               usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
+       else
+               usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
+
+       /* find the minmum bandwith between AP/TDLS peers */
+       vht_cap = &sta_ptr->tdls_cap.vhtcap;
+       supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
+       peer_supp_chwd_set =
+                        GET_VHTCAP_CHWDSET(le32_to_cpu(vht_cap->vht_cap_info));
+       supp_chwd_set = min_t(u8, supp_chwd_set, peer_supp_chwd_set);
+
+       /* We need check AP's bandwidth when TDLS_WIDER_BANDWIDTH is off */
+
+       if (ap_vht_cap && sta_ptr->tdls_cap.extcap.ext_capab[7] &
+           WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+               ap_supp_chwd_set =
+                     GET_VHTCAP_CHWDSET(le32_to_cpu(ap_vht_cap->vht_cap_info));
+               supp_chwd_set = min_t(u8, supp_chwd_set, ap_supp_chwd_set);
+       }
+
+       switch (supp_chwd_set) {
+       case IEEE80211_VHT_CHANWIDTH_80MHZ:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_160MHZ:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+               break;
+       default:
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+               break;
+       }
+
+       mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
+       mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
+       mcs_map_result = 0;
+
+       for (nss = 1; nss <= 8; nss++) {
+               mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
+               mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
+
+               if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+                   (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+                       SET_VHTNSSMCS(mcs_map_result, nss,
+                                     IEEE80211_VHT_MCS_NOT_SUPPORTED);
+               else
+                       SET_VHTNSSMCS(mcs_map_result, nss,
+                                     min_t(u16, mcs_user, mcs_resp));
+       }
+
+       vht_oper->basic_mcs_set = cpu_to_le16(mcs_map_result);
+
+       switch (vht_oper->chan_width) {
+       case IEEE80211_VHT_CHANWIDTH_80MHZ:
+               chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_160MHZ:
+               chan_bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
+               break;
+       case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+               chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+               break;
+       default:
+               chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
+               break;
+       }
+       vht_oper->center_freq_seg1_idx =
+                       mwifiex_get_center_freq_index(priv, BAND_AAC,
+                                                     bss_desc->channel,
+                                                     chan_bw);
+
+       return 0;
+}
+
+static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
+                                      struct sk_buff *skb)
+{
+       struct ieee_types_extcap *extcap;
+
+       extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
+       extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
+       extcap->ieee_hdr.len = 8;
+       memset(extcap->ext_capab, 0, 8);
+       extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+
+       if (priv->adapter->is_hw_11ac_capable)
+               extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
+}
+
+static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
+{
+       u8 *pos = (void *)skb_put(skb, 3);
+
+       *pos++ = WLAN_EID_QOS_CAPA;
+       *pos++ = 1;
+       *pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
+}
+
+static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
+                            u8 *peer, u8 action_code, u8 dialog_token,
+                            u16 status_code, struct sk_buff *skb)
+{
+       struct ieee80211_tdls_data *tf;
+       int ret;
+       u16 capab;
+       struct ieee80211_ht_cap *ht_cap;
+       u8 radio, *pos;
+
+       capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+       tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+       memcpy(tf->da, peer, ETH_ALEN);
+       memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
+       tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+       tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+               skb_put(skb, sizeof(tf->u.setup_req));
+               tf->u.setup_req.dialog_token = dialog_token;
+               tf->u.setup_req.capability = cpu_to_le16(capab);
+               ret = mwifiex_tdls_append_rates_ie(priv, skb);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+
+               pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+               *pos++ = WLAN_EID_HT_CAPABILITY;
+               *pos++ = sizeof(struct ieee80211_ht_cap);
+               ht_cap = (void *)pos;
+               radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+               ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+
+               if (priv->adapter->is_hw_11ac_capable) {
+                       ret = mwifiex_tdls_add_vht_capab(priv, skb);
+                       if (ret) {
+                               dev_kfree_skb_any(skb);
+                               return ret;
+                       }
+                       mwifiex_tdls_add_aid(priv, skb);
+               }
+
+               mwifiex_tdls_add_ext_capab(priv, skb);
+               mwifiex_tdls_add_qos_capab(skb);
+               break;
+
+       case WLAN_TDLS_SETUP_RESPONSE:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+               skb_put(skb, sizeof(tf->u.setup_resp));
+               tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+               tf->u.setup_resp.dialog_token = dialog_token;
+               tf->u.setup_resp.capability = cpu_to_le16(capab);
+               ret = mwifiex_tdls_append_rates_ie(priv, skb);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+
+               pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+               *pos++ = WLAN_EID_HT_CAPABILITY;
+               *pos++ = sizeof(struct ieee80211_ht_cap);
+               ht_cap = (void *)pos;
+               radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+               ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+
+               if (priv->adapter->is_hw_11ac_capable) {
+                       ret = mwifiex_tdls_add_vht_capab(priv, skb);
+                       if (ret) {
+                               dev_kfree_skb_any(skb);
+                               return ret;
+                       }
+                       mwifiex_tdls_add_aid(priv, skb);
+               }
+
+               mwifiex_tdls_add_ext_capab(priv, skb);
+               mwifiex_tdls_add_qos_capab(skb);
+               break;
+
+       case WLAN_TDLS_SETUP_CONFIRM:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+               skb_put(skb, sizeof(tf->u.setup_cfm));
+               tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+               tf->u.setup_cfm.dialog_token = dialog_token;
+               if (priv->adapter->is_hw_11ac_capable) {
+                       ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
+                       if (ret) {
+                               dev_kfree_skb_any(skb);
+                               return ret;
+                       }
+               }
+               break;
+
+       case WLAN_TDLS_TEARDOWN:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_TEARDOWN;
+               skb_put(skb, sizeof(tf->u.teardown));
+               tf->u.teardown.reason_code = cpu_to_le16(status_code);
+               break;
+
+       case WLAN_TDLS_DISCOVERY_REQUEST:
+               tf->category = WLAN_CATEGORY_TDLS;
+               tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+               skb_put(skb, sizeof(tf->u.discover_req));
+               tf->u.discover_req.dialog_token = dialog_token;
+               break;
+       default:
+               dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+{
+       struct ieee80211_tdls_lnkie *lnkid;
+
+       lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+       lnkid->ie_type = WLAN_EID_LINK_ID;
+       lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
+                       sizeof(struct ieee_types_header);
+
+       memcpy(lnkid->bssid, bssid, ETH_ALEN);
+       memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+       memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
+                                u8 *peer, u8 action_code, u8 dialog_token,
+                                u16 status_code, const u8 *extra_ies,
+                                size_t extra_ies_len)
+{
+       struct sk_buff *skb;
+       struct mwifiex_txinfo *tx_info;
+       struct timeval tv;
+       int ret;
+       u16 skb_len;
+
+       skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+                 max(sizeof(struct ieee80211_mgmt),
+                     sizeof(struct ieee80211_tdls_data)) +
+                 MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+                 MWIFIEX_SUPPORTED_RATES +
+                 3 + /* Qos Info */
+                 sizeof(struct ieee_types_extcap) +
+                 sizeof(struct ieee80211_ht_cap) +
+                 sizeof(struct ieee_types_bss_co_2040) +
+                 sizeof(struct ieee80211_ht_operation) +
+                 sizeof(struct ieee80211_tdls_lnkie) +
+                 extra_ies_len;
+
+       if (priv->adapter->is_hw_11ac_capable)
+               skb_len += sizeof(struct ieee_types_vht_cap) +
+                          sizeof(struct ieee_types_vht_oper) +
+                          sizeof(struct ieee_types_aid);
+
+       skb = dev_alloc_skb(skb_len);
+       if (!skb) {
+               dev_err(priv->adapter->dev,
+                       "allocate skb failed for management frame\n");
+               return -ENOMEM;
+       }
+       skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+       case WLAN_TDLS_SETUP_CONFIRM:
+       case WLAN_TDLS_TEARDOWN:
+       case WLAN_TDLS_DISCOVERY_REQUEST:
+               ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  skb);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+               if (extra_ies_len)
+                       memcpy(skb_put(skb, extra_ies_len), extra_ies,
+                              extra_ies_len);
+               mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
+                                        priv->cfg_bssid);
+               break;
+       case WLAN_TDLS_SETUP_RESPONSE:
+               ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+                                                  dialog_token, status_code,
+                                                  skb);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+               if (extra_ies_len)
+                       memcpy(skb_put(skb, extra_ies_len), extra_ies,
+                              extra_ies_len);
+               mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+                                        priv->cfg_bssid);
+               break;
+       }
+
+       switch (action_code) {
+       case WLAN_TDLS_SETUP_REQUEST:
+       case WLAN_TDLS_SETUP_RESPONSE:
+               skb->priority = MWIFIEX_PRIO_BK;
+               break;
+       default:
+               skb->priority = MWIFIEX_PRIO_VI;
+               break;
+       }
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       tx_info->bss_num = priv->bss_num;
+       tx_info->bss_type = priv->bss_type;
+
+       do_gettimeofday(&tv);
+       skb->tstamp = timeval_to_ktime(tv);
+       mwifiex_queue_tx_pkt(priv, skb);
+
+       return 0;
+}
+
+static int
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+                                   u8 action_code, u8 dialog_token,
+                                   u16 status_code, struct sk_buff *skb)
+{
+       struct ieee80211_mgmt *mgmt;
+       u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+       int ret;
+       u16 capab;
+       struct ieee80211_ht_cap *ht_cap;
+       u8 radio, *pos;
+
+       capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+       mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
+
+       memset(mgmt, 0, 24);
+       memcpy(mgmt->da, peer, ETH_ALEN);
+       memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
+       memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
+       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                         IEEE80211_STYPE_ACTION);
+
+       /* add address 4 */
+       pos = skb_put(skb, ETH_ALEN);
+
+       switch (action_code) {
+       case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+               skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
+               mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+               mgmt->u.action.u.tdls_discover_resp.action_code =
+                                             WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+               mgmt->u.action.u.tdls_discover_resp.dialog_token =
+                                                                  dialog_token;
+               mgmt->u.action.u.tdls_discover_resp.capability =
+                                                            cpu_to_le16(capab);
+               /* move back for addr4 */
+               memmove(pos + ETH_ALEN, &mgmt->u.action.category,
+                       sizeof(mgmt->u.action.u.tdls_discover_resp));
+               /* init address 4 */
+               memcpy(pos, bc_addr, ETH_ALEN);
+
+               ret = mwifiex_tdls_append_rates_ie(priv, skb);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+
+               pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+               *pos++ = WLAN_EID_HT_CAPABILITY;
+               *pos++ = sizeof(struct ieee80211_ht_cap);
+               ht_cap = (void *)pos;
+               radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+               ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+               if (ret) {
+                       dev_kfree_skb_any(skb);
+                       return ret;
+               }
+
+               if (priv->adapter->is_hw_11ac_capable) {
+                       ret = mwifiex_tdls_add_vht_capab(priv, skb);
+                       if (ret) {
+                               dev_kfree_skb_any(skb);
+                               return ret;
+                       }
+                       mwifiex_tdls_add_aid(priv, skb);
+               }
+
+               mwifiex_tdls_add_ext_capab(priv, skb);
+               mwifiex_tdls_add_qos_capab(skb);
+               break;
+       default:
+               dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+                                u8 *peer, u8 action_code, u8 dialog_token,
+                                u16 status_code, const u8 *extra_ies,
+                                size_t extra_ies_len)
+{
+       struct sk_buff *skb;
+       struct mwifiex_txinfo *tx_info;
+       struct timeval tv;
+       u8 *pos;
+       u32 pkt_type, tx_control;
+       u16 pkt_len, skb_len;
+
+       skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+                 max(sizeof(struct ieee80211_mgmt),
+                     sizeof(struct ieee80211_tdls_data)) +
+                 MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+                 MWIFIEX_SUPPORTED_RATES +
+                 sizeof(struct ieee_types_extcap) +
+                 sizeof(struct ieee80211_ht_cap) +
+                 sizeof(struct ieee_types_bss_co_2040) +
+                 sizeof(struct ieee80211_ht_operation) +
+                 sizeof(struct ieee80211_tdls_lnkie) +
+                 extra_ies_len +
+                 3 + /* Qos Info */
+                 ETH_ALEN; /* Address4 */
+
+       if (priv->adapter->is_hw_11ac_capable)
+               skb_len += sizeof(struct ieee_types_vht_cap) +
+                          sizeof(struct ieee_types_vht_oper) +
+                          sizeof(struct ieee_types_aid);
+
+       skb = dev_alloc_skb(skb_len);
+       if (!skb) {
+               dev_err(priv->adapter->dev,
+                       "allocate skb failed for management frame\n");
+               return -ENOMEM;
+       }
+
+       skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+       pkt_type = PKT_TYPE_MGMT;
+       tx_control = 0;
+       pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+       memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+       memcpy(pos, &pkt_type, sizeof(pkt_type));
+       memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
+
+       if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
+                                               dialog_token, status_code,
+                                               skb)) {
+               dev_kfree_skb_any(skb);
+               return -EINVAL;
+       }
+
+       if (extra_ies_len)
+               memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+       /* the TDLS link IE is always added last we are the responder */
+
+       mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+                                priv->cfg_bssid);
+
+       skb->priority = MWIFIEX_PRIO_VI;
+
+       tx_info = MWIFIEX_SKB_TXCB(skb);
+       tx_info->bss_num = priv->bss_num;
+       tx_info->bss_type = priv->bss_type;
+       tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+
+       pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
+       memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
+              sizeof(pkt_len));
+       do_gettimeofday(&tv);
+       skb->tstamp = timeval_to_ktime(tv);
+       mwifiex_queue_tx_pkt(priv, skb);
+
+       return 0;
+}
+
+/* This function process tdls action frame from peer.
+ * Peer capabilities are stored into station node structure.
+ */
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+                                      u8 *buf, int len)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       u8 *peer, *pos, *end;
+       u8 i, action, basic;
+       int ie_len = 0;
+
+       if (len < (sizeof(struct ethhdr) + 3))
+               return;
+       if (*(buf + sizeof(struct ethhdr)) != WLAN_TDLS_SNAP_RFTYPE)
+               return;
+       if (*(buf + sizeof(struct ethhdr) + 1) != WLAN_CATEGORY_TDLS)
+               return;
+
+       peer = buf + ETH_ALEN;
+       action = *(buf + sizeof(struct ethhdr) + 2);
+
+       /* just handle TDLS setup request/response/confirm */
+       if (action > WLAN_TDLS_SETUP_CONFIRM)
+               return;
+
+       dev_dbg(priv->adapter->dev,
+               "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+
+       sta_ptr = mwifiex_add_sta_entry(priv, peer);
+       if (!sta_ptr)
+               return;
+
+       switch (action) {
+       case WLAN_TDLS_SETUP_REQUEST:
+               if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
+                       return;
+
+               pos = buf + sizeof(struct ethhdr) + 4;
+               /* payload 1+ category 1 + action 1 + dialog 1 */
+               sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+               ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+               pos += 2;
+               break;
+
+       case WLAN_TDLS_SETUP_RESPONSE:
+               if (len < (sizeof(struct ethhdr) + TDLS_RESP_FIX_LEN))
+                       return;
+               /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
+               pos = buf + sizeof(struct ethhdr) + 6;
+               sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+               ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+               pos += 2;
+               break;
+
+       case WLAN_TDLS_SETUP_CONFIRM:
+               if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
+                       return;
+               pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
+               ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+               break;
+       default:
+               dev_warn(priv->adapter->dev, "Unknown TDLS frame type.\n");
+               return;
+       }
+
+       for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
+               if (pos + 2 + pos[1] > end)
+                       break;
+
+               switch (*pos) {
+               case WLAN_EID_SUPP_RATES:
+                       sta_ptr->tdls_cap.rates_len = pos[1];
+                       for (i = 0; i < pos[1]; i++)
+                               sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+                       break;
+
+               case WLAN_EID_EXT_SUPP_RATES:
+                       basic = sta_ptr->tdls_cap.rates_len;
+                       for (i = 0; i < pos[1]; i++)
+                               sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+                       sta_ptr->tdls_cap.rates_len += pos[1];
+                       break;
+               case WLAN_EID_HT_CAPABILITY:
+                       memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+                              sizeof(struct ieee80211_ht_cap));
+                       sta_ptr->is_11n_enabled = 1;
+                       break;
+               case WLAN_EID_HT_OPERATION:
+                       memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+                              sizeof(struct ieee80211_ht_operation));
+                       break;
+               case WLAN_EID_BSS_COEX_2040:
+                       sta_ptr->tdls_cap.coex_2040 = pos[2];
+                       break;
+               case WLAN_EID_EXT_CAPABILITY:
+                       memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+                              sizeof(struct ieee_types_header) +
+                              min_t(u8, pos[1], 8));
+                       break;
+               case WLAN_EID_RSN:
+                       memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+                              sizeof(struct ieee_types_header) + pos[1]);
+                       break;
+               case WLAN_EID_QOS_CAPA:
+                       sta_ptr->tdls_cap.qos_info = pos[2];
+                       break;
+               case WLAN_EID_VHT_OPERATION:
+                       if (priv->adapter->is_hw_11ac_capable)
+                               memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+                                      sizeof(struct ieee80211_vht_operation));
+                       break;
+               case WLAN_EID_VHT_CAPABILITY:
+                       if (priv->adapter->is_hw_11ac_capable) {
+                               memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+                                      sizeof(struct ieee80211_vht_cap));
+                               sta_ptr->is_11ac_enabled = 1;
+                       }
+                       break;
+               case WLAN_EID_AID:
+                       if (priv->adapter->is_hw_11ac_capable)
+                               sta_ptr->tdls_cap.aid =
+                                             le16_to_cpu(*(__le16 *)(pos + 2));
+               default:
+                       break;
+               }
+       }
+
+       return;
+}
+
+static int
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       struct mwifiex_ds_tdls_oper tdls_oper;
+
+       memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+       sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+       if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
+               dev_err(priv->adapter->dev,
+                       "link absent for peer %pM; cannot config\n", peer);
+               return -EINVAL;
+       }
+
+       memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+       tdls_oper.tdls_action = MWIFIEX_TDLS_CONFIG_LINK;
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+                               HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       struct mwifiex_ds_tdls_oper tdls_oper;
+
+       memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+       sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+       if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
+               dev_dbg(priv->adapter->dev,
+                       "Setup already in progress for peer %pM\n", peer);
+               return 0;
+       }
+
+       sta_ptr = mwifiex_add_sta_entry(priv, peer);
+       if (!sta_ptr)
+               return -ENOMEM;
+
+       sta_ptr->tdls_status = TDLS_SETUP_INPROGRESS;
+       mwifiex_hold_tdls_packets(priv, peer);
+       memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+       tdls_oper.tdls_action = MWIFIEX_TDLS_CREATE_LINK;
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+                               HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       struct mwifiex_ds_tdls_oper tdls_oper;
+       unsigned long flags;
+
+       memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+       sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+       if (sta_ptr) {
+               if (sta_ptr->is_11n_enabled) {
+                       mwifiex_11n_cleanup_reorder_tbl(priv);
+                       spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+                                         flags);
+                       mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+                       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+                                              flags);
+               }
+               mwifiex_del_sta_entry(priv, peer);
+       }
+
+       mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+       memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+       tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+                               HostCmd_ACT_GEN_SET, 0, &tdls_oper, true);
+}
+
+static int
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       struct ieee80211_mcs_info mcs;
+       unsigned long flags;
+       int i;
+
+       sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+       if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
+               dev_dbg(priv->adapter->dev,
+                       "tdls: enable link %pM success\n", peer);
+
+               sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
+
+               mcs = sta_ptr->tdls_cap.ht_capb.mcs;
+               if (mcs.rx_mask[0] != 0xff)
+                       sta_ptr->is_11n_enabled = true;
+               if (sta_ptr->is_11n_enabled) {
+                       if (le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info) &
+                           IEEE80211_HT_CAP_MAX_AMSDU)
+                               sta_ptr->max_amsdu =
+                                       MWIFIEX_TX_DATA_BUF_SIZE_8K;
+                       else
+                               sta_ptr->max_amsdu =
+                                       MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+                       for (i = 0; i < MAX_NUM_TID; i++)
+                               sta_ptr->ampdu_sta[i] =
+                                             priv->aggr_prio_tbl[i].ampdu_user;
+               } else {
+                       for (i = 0; i < MAX_NUM_TID; i++)
+                               sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+               }
+
+               memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+               mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
+       } else {
+               dev_dbg(priv->adapter->dev,
+                       "tdls: enable link %pM failed\n", peer);
+               if (sta_ptr) {
+                       mwifiex_11n_cleanup_reorder_tbl(priv);
+                       spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+                                         flags);
+                       mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+                       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+                                              flags);
+                       mwifiex_del_sta_entry(priv, peer);
+               }
+               mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+
+               return -1;
+       }
+
+       return 0;
+}
+
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+{
+       switch (action) {
+       case MWIFIEX_TDLS_ENABLE_LINK:
+               return mwifiex_tdls_process_enable_link(priv, peer);
+       case MWIFIEX_TDLS_DISABLE_LINK:
+               return mwifiex_tdls_process_disable_link(priv, peer);
+       case MWIFIEX_TDLS_CREATE_LINK:
+               return mwifiex_tdls_process_create_link(priv, peer);
+       case MWIFIEX_TDLS_CONFIG_LINK:
+               return mwifiex_tdls_process_config_link(priv, peer);
+       }
+       return 0;
+}
+
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *sta_ptr;
+
+       sta_ptr = mwifiex_get_sta_entry(priv, mac);
+       if (sta_ptr)
+               return sta_ptr->tdls_status;
+
+       return TDLS_NOT_SETUP;
+}
+
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       struct mwifiex_ds_tdls_oper tdls_oper;
+       unsigned long flags;
+
+       if (list_empty(&priv->sta_list))
+               return;
+
+       list_for_each_entry(sta_ptr, &priv->sta_list, list) {
+               memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+
+               if (sta_ptr->is_11n_enabled) {
+                       mwifiex_11n_cleanup_reorder_tbl(priv);
+                       spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+                                         flags);
+                       mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+                       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+                                              flags);
+               }
+
+               mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
+                                            TDLS_LINK_TEARDOWN);
+               memcpy(&tdls_oper.peer_mac, sta_ptr->mac_addr, ETH_ALEN);
+               tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+               if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
+                                    HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
+                       dev_warn(priv->adapter->dev,
+                                "Disable link failed for TDLS peer %pM",
+                                sta_ptr->mac_addr);
+       }
+
+       mwifiex_del_all_sta_list(priv);
+}
index 64424c81b44f5bf55601e75a8a4d47e51b6bc9b9..9be6544bddedf9371e79fd468ebb0aa2dbcabe54 100644 (file)
@@ -159,6 +159,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
                      struct cfg80211_ap_settings *params)
 {
        const u8 *ht_ie;
+       u16 cap_info;
 
        if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
                return;
@@ -168,6 +169,25 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
        if (ht_ie) {
                memcpy(&bss_cfg->ht_cap, ht_ie + 2,
                       sizeof(struct ieee80211_ht_cap));
+               cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
+               memset(&bss_cfg->ht_cap.mcs, 0,
+                      priv->adapter->number_of_antenna);
+               switch (GET_RXSTBC(cap_info)) {
+               case MWIFIEX_RX_STBC1:
+                       /* HT_CAP 1X1 mode */
+                       memset(&bss_cfg->ht_cap.mcs, 0xff, 1);
+                       break;
+               case MWIFIEX_RX_STBC12: /* fall through */
+               case MWIFIEX_RX_STBC123:
+                       /* HT_CAP 2X2 mode */
+                       memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+                       break;
+               default:
+                       dev_warn(priv->adapter->dev,
+                                "Unsupported RX-STBC, default to 2x2\n");
+                       memset(&bss_cfg->ht_cap.mcs, 0xff, 2);
+                       break;
+               }
                priv->ap_11n_enabled = 1;
        } else {
                memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
@@ -226,8 +246,8 @@ void mwifiex_set_vht_width(struct mwifiex_private *priv,
        if (ap_11ac_enable && width >= NL80211_CHAN_WIDTH_80)
                vht_cfg.misc_config |= VHT_BW_80_160_80P80;
 
-       mwifiex_send_cmd_sync(priv, HostCmd_CMD_11AC_CFG,
-                             HostCmd_ACT_GEN_SET, 0, &vht_cfg);
+       mwifiex_send_cmd(priv, HostCmd_CMD_11AC_CFG,
+                        HostCmd_ACT_GEN_SET, 0, &vht_cfg, true);
 
        return;
 }
index 718066577c6c531f22c9f4387639237875f04862..92e77a398ecfcaa0f7bf3493864f4823eb30394d 100644 (file)
 #include "main.h"
 #include "11n.h"
 
-/*
- * This function will return the pointer to station entry in station list
- * table which matches specified mac address.
- * This function should be called after acquiring RA list spinlock.
- * NULL is returned if station entry is not found in associated STA list.
- */
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
-       struct mwifiex_sta_node *node;
-
-       if (!mac)
-               return NULL;
-
-       list_for_each_entry(node, &priv->sta_list, list) {
-               if (!memcmp(node->mac_addr, mac, ETH_ALEN))
-                       return node;
-       }
-
-       return NULL;
-}
-
-/*
- * This function will add a sta_node entry to associated station list
- * table with the given mac address.
- * If entry exist already, existing entry is returned.
- * If received mac address is NULL, NULL is returned.
- */
-static struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
-       struct mwifiex_sta_node *node;
-       unsigned long flags;
-
-       if (!mac)
-               return NULL;
-
-       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-       node = mwifiex_get_sta_entry(priv, mac);
-       if (node)
-               goto done;
-
-       node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
-       if (!node)
-               goto done;
-
-       memcpy(node->mac_addr, mac, ETH_ALEN);
-       list_add_tail(&node->list, &priv->sta_list);
-
-done:
-       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-       return node;
-}
-
-/*
- * This function will search for HT IE in association request IEs
- * and set station HT parameters accordingly.
- */
-static void
-mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
-                      int ies_len, struct mwifiex_sta_node *node)
-{
-       const struct ieee80211_ht_cap *ht_cap;
-
-       if (!ies)
-               return;
 
-       ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
-       if (ht_cap) {
-               node->is_11n_enabled = 1;
-               node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
-                                 IEEE80211_HT_CAP_MAX_AMSDU ?
-                                 MWIFIEX_TX_DATA_BUF_SIZE_8K :
-                                 MWIFIEX_TX_DATA_BUF_SIZE_4K;
-       } else {
-               node->is_11n_enabled = 0;
-       }
 
-       return;
-}
-
-/*
- * This function will delete a station entry from station list
- */
-static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
-       struct mwifiex_sta_node *node;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
-       node = mwifiex_get_sta_entry(priv, mac);
-       if (node) {
-               list_del(&node->list);
-               kfree(node);
-       }
-
-       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-       return;
-}
-
-/*
- * This function will delete all stations from associated station list.
- */
-static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
-{
-       struct mwifiex_sta_node *node, *tmp;
-       unsigned long flags;
-
-       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
-       list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
-               list_del(&node->list);
-               kfree(node);
-       }
-
-       INIT_LIST_HEAD(&priv->sta_list);
-       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-       return;
-}
 
 /*
  * This function handles AP interface specific events generated by firmware.
@@ -268,9 +150,9 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
        case EVENT_ADDBA:
                dev_dbg(adapter->dev, "event: ADDBA Request\n");
                if (priv->media_connected)
-                       mwifiex_send_cmd_async(priv, HostCmd_CMD_11N_ADDBA_RSP,
-                                              HostCmd_ACT_GEN_SET, 0,
-                                              adapter->event_body);
+                       mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
+                                        HostCmd_ACT_GEN_SET, 0,
+                                        adapter->event_body, false);
                break;
        case EVENT_DELBA:
                dev_dbg(adapter->dev, "event: DELBA Request\n");
@@ -284,6 +166,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                        mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
                }
                break;
+       case EVENT_EXT_SCAN_REPORT:
+               dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+               if (adapter->ext_scan)
+                       return mwifiex_handle_event_ext_scan_report(priv,
+                                               adapter->event_skb->data);
+               break;
        default:
                dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
                        eventcause);
index 3c74eb25492790410b9627dbb9a6ca0a0555f010..9a56bc61cb1d29993ebcc057fb4cf058bd1100f2 100644 (file)
@@ -284,27 +284,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
                return 0;
        }
 
-       if (le16_to_cpu(uap_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
-               struct sk_buff_head list;
-               struct sk_buff *rx_skb;
-
-               __skb_queue_head_init(&list);
-               skb_pull(skb, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
-               skb_trim(skb, le16_to_cpu(uap_rx_pd->rx_pkt_length));
-
-               ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
-                                        priv->wdev->iftype, 0, false);
-
-               while (!skb_queue_empty(&list)) {
-                       rx_skb = __skb_dequeue(&list);
-                       ret = mwifiex_recv_packet(priv, rx_skb);
-                       if (ret)
-                               dev_err(adapter->dev,
-                                       "AP:Rx A-MSDU failed");
-               }
-
-               return 0;
-       } else if (rx_pkt_type == PKT_TYPE_MGMT) {
+       if (rx_pkt_type == PKT_TYPE_MGMT) {
                ret = mwifiex_process_mgmt_packet(priv, skb);
                if (ret)
                        dev_err(adapter->dev, "Rx of mgmt packet failed");
index 208748804a55ee56dd1d8d7cbb445e7600525db5..edbe4aff00d85b569534372ea34e7e017552b234 100644 (file)
@@ -459,6 +459,7 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
         * 'suspended' state and a 'disconnect' one.
         */
        adapter->is_suspended = true;
+       adapter->hs_enabling = false;
 
        if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
                usb_kill_urb(card->rx_cmd.urb);
@@ -766,11 +767,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
        case USB8897_PID_1:
        case USB8897_PID_2:
+               adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
                strcpy(adapter->fw_name, USB8897_DEFAULT_FW_NAME);
                break;
        case USB8797_PID_1:
        case USB8797_PID_2:
        default:
+               adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
                strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
                break;
        }
@@ -1024,7 +1027,6 @@ static void mwifiex_usb_cleanup_module(void)
 
        if (usb_card && usb_card->adapter) {
                struct mwifiex_adapter *adapter = usb_card->adapter;
-               int i;
 
                /* In case driver is removed when asynchronous FW downloading is
                 * in progress
@@ -1035,11 +1037,8 @@ static void mwifiex_usb_cleanup_module(void)
                if (adapter->is_suspended)
                        mwifiex_usb_resume(usb_card->intf);
 #endif
-               for (i = 0; i < adapter->priv_num; i++)
-                       if ((GET_BSS_ROLE(adapter->priv[i]) ==
-                            MWIFIEX_BSS_ROLE_STA) &&
-                           adapter->priv[i]->media_connected)
-                               mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+               mwifiex_deauthenticate_all(adapter);
 
                mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
                                                          MWIFIEX_BSS_ROLE_ANY),
index 9b82e225880cbc41d1e7b65060407ff2ce8c5cdc..c3824e37f3f24a30d951510a75cd4640346620d8 100644 (file)
@@ -72,7 +72,7 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
                return -1;
        }
 
-       return mwifiex_send_cmd_sync(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL);
+       return mwifiex_send_cmd(priv, cmd, HostCmd_ACT_GEN_SET, 0, NULL, true);
 }
 EXPORT_SYMBOL_GPL(mwifiex_init_shutdown_fw);
 
@@ -104,6 +104,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
                info->pm_wakeup_fw_try = adapter->pm_wakeup_fw_try;
                info->is_hs_configured = adapter->is_hs_configured;
                info->hs_activated = adapter->hs_activated;
+               info->is_cmd_timedout = adapter->is_cmd_timedout;
                info->num_cmd_host_to_card_failure
                                = adapter->dbg.num_cmd_host_to_card_failure;
                info->num_cmd_sleep_cfm_host_to_card_failure
@@ -119,7 +120,6 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
                info->num_cmd_assoc_failure =
                                        adapter->dbg.num_cmd_assoc_failure;
                info->num_tx_timeout = adapter->dbg.num_tx_timeout;
-               info->num_cmd_timeout = adapter->dbg.num_cmd_timeout;
                info->timeout_cmd_id = adapter->dbg.timeout_cmd_id;
                info->timeout_cmd_act = adapter->dbg.timeout_cmd_act;
                memcpy(info->last_cmd_id, adapter->dbg.last_cmd_id,
@@ -252,3 +252,117 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
 
        return 0;
 }
+
+/* This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+
+       if (!mac)
+               return NULL;
+
+       list_for_each_entry(node, &priv->sta_list, list) {
+               if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+                       return node;
+       }
+
+       return NULL;
+}
+
+/* This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       if (!mac)
+               return NULL;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node)
+               goto done;
+
+       node = kzalloc(sizeof(*node), GFP_ATOMIC);
+       if (!node)
+               goto done;
+
+       memcpy(node->mac_addr, mac, ETH_ALEN);
+       list_add_tail(&node->list, &priv->sta_list);
+
+done:
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return node;
+}
+
+/* This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+                      int ies_len, struct mwifiex_sta_node *node)
+{
+       const struct ieee80211_ht_cap *ht_cap;
+
+       if (!ies)
+               return;
+
+       ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+       if (ht_cap) {
+               node->is_11n_enabled = 1;
+               node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+                                 IEEE80211_HT_CAP_MAX_AMSDU ?
+                                 MWIFIEX_TX_DATA_BUF_SIZE_8K :
+                                 MWIFIEX_TX_DATA_BUF_SIZE_4K;
+       } else {
+               node->is_11n_enabled = 0;
+       }
+
+       return;
+}
+
+/* This function will delete a station entry from station list */
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+       struct mwifiex_sta_node *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       node = mwifiex_get_sta_entry(priv, mac);
+       if (node) {
+               list_del(&node->list);
+               kfree(node);
+       }
+
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
+
+/* This function will delete all stations from associated station list. */
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *node, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+       list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+               list_del(&node->list);
+               kfree(node);
+       }
+
+       INIT_LIST_HEAD(&priv->sta_list);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+       return;
+}
index cb2d0582bd363a42a55c23281eab163fdb05856e..ddae570213977c3585268c88c57cf26061acc48e 100644 (file)
@@ -30,8 +30,24 @@ static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
        return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
 }
 
-static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa)
+struct mwifiex_dma_mapping {
+       dma_addr_t addr;
+       size_t len;
+};
+
+static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
+                                       struct mwifiex_dma_mapping *mapping)
 {
-       memcpy(buf_pa, skb->cb, sizeof(dma_addr_t));
+       memcpy(mapping, skb->cb, sizeof(*mapping));
 }
+
+static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
+{
+       struct mwifiex_dma_mapping mapping;
+
+       MWIFIEX_SKB_PACB(skb, &mapping);
+
+       return mapping.addr;
+}
+
 #endif /* !_MWIFIEX_UTIL_H_ */
index 981cf6e7c73be5b65f4a90bb05ef0e9023c592f4..0a7cc742aed71e0fd31267305be7a26ec71b8b52 100644 (file)
@@ -37,8 +37,8 @@
 /* Offset for TOS field in the IP header */
 #define IPTOS_OFFSET 5
 
-static bool enable_tx_amsdu;
-module_param(enable_tx_amsdu, bool, 0644);
+static bool disable_tx_amsdu;
+module_param(disable_tx_amsdu, bool, 0644);
 
 /* WMM information IE */
 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
@@ -64,21 +64,6 @@ static u8 tos_to_tid[] = {
        0x07                    /* 1 1 1 AC_VO */
 };
 
-/*
- * This table inverses the tos_to_tid operation to get a priority
- * which is in sequential order, and can be compared.
- * Use this to compare the priority of two different TIDs.
- */
-static u8 tos_to_tid_inv[] = {
-       0x02,  /* from tos_to_tid[2] = 0 */
-       0x00,  /* from tos_to_tid[0] = 1 */
-       0x01,  /* from tos_to_tid[1] = 2 */
-       0x03,
-       0x04,
-       0x05,
-       0x06,
-       0x07};
-
 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
 
 /*
@@ -175,8 +160,15 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
                        break;
 
                ra_list->is_11n_enabled = 0;
+               ra_list->tdls_link = false;
                if (!mwifiex_queuing_ra_based(priv)) {
-                       ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+                       if (mwifiex_get_tdls_link_status(priv, ra) ==
+                           TDLS_SETUP_COMPLETE) {
+                               ra_list->is_11n_enabled =
+                                       mwifiex_tdls_peer_11n_enabled(priv, ra);
+                       } else {
+                               ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+                       }
                } else {
                        ra_list->is_11n_enabled =
                                      mwifiex_is_sta_11n_enabled(priv, node);
@@ -213,8 +205,9 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
  * This function map ACs to TIDs.
  */
 static void
-mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
+mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
 {
+       struct mwifiex_wmm_desc *wmm = &priv->wmm;
        u8 *queue_priority = wmm->queue_priority;
        int i;
 
@@ -224,7 +217,7 @@ mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
        }
 
        for (i = 0; i < MAX_NUM_TID; ++i)
-               tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
+               priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
 
        atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
 }
@@ -285,7 +278,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
                }
        }
 
-       mwifiex_wmm_queue_priorities_tid(&priv->wmm);
+       mwifiex_wmm_queue_priorities_tid(priv);
 }
 
 /*
@@ -388,8 +381,7 @@ mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
  * AP is disabled (due to call admission control (ACM bit). Mapping
  * of TID to AC is taken care of internally.
  */
-static u8
-mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
 {
        enum mwifiex_wmm_ac_e ac, ac_down;
        u8 new_tid;
@@ -421,9 +413,17 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                        continue;
 
                for (i = 0; i < MAX_NUM_TID; ++i) {
-                       priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
-                       priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
-                       priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
+                       if (!disable_tx_amsdu &&
+                           adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
+                               priv->aggr_prio_tbl[i].amsdu =
+                                                       priv->tos_to_tid_inv[i];
+                       else
+                               priv->aggr_prio_tbl[i].amsdu =
+                                                       BA_STREAM_NOT_ALLOWED;
+                       priv->aggr_prio_tbl[i].ampdu_ap =
+                                                       priv->tos_to_tid_inv[i];
+                       priv->aggr_prio_tbl[i].ampdu_user =
+                                                       priv->tos_to_tid_inv[i];
                }
 
                priv->aggr_prio_tbl[6].amsdu
@@ -546,6 +546,7 @@ void
 mwifiex_clean_txrx(struct mwifiex_private *priv)
 {
        unsigned long flags;
+       struct sk_buff *skb, *tmp;
 
        mwifiex_11n_cleanup_reorder_tbl(priv);
        spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
@@ -563,6 +564,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
            !priv->adapter->surprise_removed)
                priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
        spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+
+       skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
+               mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 }
 
 /*
@@ -591,7 +595,7 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
  * If no such node is found, a new node is added first and then
  * retrieved.
  */
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
 {
        struct mwifiex_ra_list_tbl *ra_list;
@@ -641,6 +645,21 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        struct mwifiex_ra_list_tbl *ra_list;
        u8 ra[ETH_ALEN], tid_down;
        unsigned long flags;
+       struct list_head list_head;
+       int tdls_status = TDLS_NOT_SETUP;
+       struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+       struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+
+       memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
+
+       if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+           ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
+               if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
+                       dev_dbg(adapter->dev,
+                               "TDLS setup packet for %pM. Don't block\n", ra);
+               else
+                       tdls_status = mwifiex_get_tdls_link_status(priv, ra);
+       }
 
        if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
                dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
@@ -659,12 +678,27 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
           have only 1 raptr for a tid in case of infra */
        if (!mwifiex_queuing_ra_based(priv) &&
            !mwifiex_is_skb_mgmt_frame(skb)) {
-               if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
-                       ra_list = list_first_entry(
-                               &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
-                               struct mwifiex_ra_list_tbl, list);
-               else
-                       ra_list = NULL;
+               switch (tdls_status) {
+               case TDLS_SETUP_COMPLETE:
+                       ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
+                                                             ra);
+                       tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+                       break;
+               case TDLS_SETUP_INPROGRESS:
+                       skb_queue_tail(&priv->tdls_txq, skb);
+                       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+                                              flags);
+                       return;
+               default:
+                       list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+                       if (!list_empty(&list_head))
+                               ra_list = list_first_entry(
+                                       &list_head, struct mwifiex_ra_list_tbl,
+                                       list);
+                       else
+                               ra_list = NULL;
+                       break;
+               }
        } else {
                memcpy(ra, skb->data, ETH_ALEN);
                if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
@@ -684,9 +718,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        ra_list->total_pkt_count++;
 
        if (atomic_read(&priv->wmm.highest_queued_prio) <
-                                               tos_to_tid_inv[tid_down])
+                                               priv->tos_to_tid_inv[tid_down])
                atomic_set(&priv->wmm.highest_queued_prio,
-                          tos_to_tid_inv[tid_down]);
+                          priv->tos_to_tid_inv[tid_down]);
 
        atomic_inc(&priv->wmm.tx_pkts_queued);
 
@@ -1219,15 +1253,24 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        if (!ptr->is_11n_enabled ||
            mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
-           priv->wps.session_enable ||
-           ((priv->sec_info.wpa_enabled ||
-             priv->sec_info.wpa2_enabled) &&
-            !priv->wpa_is_gtk_set)) {
-               mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
-               /* ra_list_spinlock has been freed in
-                  mwifiex_send_single_packet() */
+           priv->wps.session_enable) {
+               if (ptr->is_11n_enabled &&
+                   mwifiex_is_ba_stream_setup(priv, ptr, tid) &&
+                   mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) &&
+                   mwifiex_is_amsdu_allowed(priv, tid) &&
+                   mwifiex_is_11n_aggragation_possible(priv, ptr,
+                                                       adapter->tx_buf_size))
+                       mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+                       /* ra_list_spinlock has been freed in
+                        * mwifiex_11n_aggregate_pkt()
+                        */
+               else
+                       mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+                       /* ra_list_spinlock has been freed in
+                        * mwifiex_send_single_packet()
+                        */
        } else {
-               if (mwifiex_is_ampdu_allowed(priv, tid) &&
+               if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
                    ptr->ba_pkt_count > ptr->ba_packet_thr) {
                        if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
                                mwifiex_create_ba_tbl(priv, ptr->ra, tid,
@@ -1240,7 +1283,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
                                mwifiex_send_delba(priv, tid_del, ra, 1);
                        }
                }
-               if (enable_tx_amsdu && mwifiex_is_amsdu_allowed(priv, tid) &&
+               if (mwifiex_is_amsdu_allowed(priv, tid) &&
                    mwifiex_is_11n_aggragation_possible(priv, ptr,
                                                        adapter->tx_buf_size))
                        mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
index 0f129d498fb1057d898db2c6e92868d8ff1f3baf..83e42083ebff8cbc3b248f333111691e8a21fc1d 100644 (file)
@@ -33,6 +33,21 @@ enum ieee_types_wmm_ecw_bitmasks {
 
 static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
 
+/*
+ * This table inverses the tos_to_tid operation to get a priority
+ * which is in sequential order, and can be compared.
+ * Use this to compare the priority of two different TIDs.
+ */
+static const u8 tos_to_tid_inv[] = {
+       0x02,  /* from tos_to_tid[2] = 0 */
+       0x00,  /* from tos_to_tid[0] = 1 */
+       0x01,  /* from tos_to_tid[1] = 2 */
+       0x03,
+       0x04,
+       0x05,
+       0x06,
+       0x07};
+
 /*
  * This function retrieves the TID of the given RA list.
  */
@@ -107,5 +122,8 @@ void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
 void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
                               const struct host_cmd_ds_command *resp);
+struct mwifiex_ra_list_tbl *
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
 
 #endif /* !_MWIFIEX_WMM_H_ */
index 4987c3f942ce7dff2bb599feab7bb1aa26d40f50..3c0a0a86ba12038056dee4bbc37d12cb50ae9868 100644 (file)
@@ -81,6 +81,9 @@ MODULE_PARM_DESC(ap_mode_default,
  */
 
 #define        MWL8K_HW_TIMER_REGISTER                 0x0000a600
+#define BBU_RXRDY_CNT_REG                      0x0000a860
+#define NOK_CCA_CNT_REG                                0x0000a6a0
+#define BBU_AVG_NOISE_VAL                      0x67
 
 #define MWL8K_A2H_EVENTS       (MWL8K_A2H_INT_DUMMY | \
                                 MWL8K_A2H_INT_CHNL_SWITCHED | \
@@ -112,6 +115,8 @@ MODULE_PARM_DESC(ap_mode_default,
  */
 #define MWL8K_NUM_AMPDU_STREAMS        (TOTAL_HW_TX_QUEUES - 1)
 
+#define MWL8K_NUM_CHANS 18
+
 struct rxd_ops {
        int rxd_size;
        void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
@@ -289,6 +294,12 @@ struct mwl8k_priv {
 
        /* bitmap of running BSSes */
        u32 running_bsses;
+
+       /* ACS related */
+       bool sw_scan_start;
+       struct ieee80211_channel *acs_chan;
+       unsigned long channel_time;
+       struct survey_info survey[MWL8K_NUM_CHANS];
 };
 
 #define MAX_WEP_KEY_LEN         13
@@ -396,6 +407,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
 #define MWL8K_CMD_SET_HW_SPEC          0x0004
 #define MWL8K_CMD_MAC_MULTICAST_ADR    0x0010
 #define MWL8K_CMD_GET_STAT             0x0014
+#define MWL8K_CMD_BBP_REG_ACCESS       0x001a
 #define MWL8K_CMD_RADIO_CONTROL                0x001c
 #define MWL8K_CMD_RF_TX_POWER          0x001e
 #define MWL8K_CMD_TX_POWER             0x001f
@@ -2986,6 +2998,47 @@ static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
        return rc;
 }
 
+/*
+ * CMD_BBP_REG_ACCESS.
+ */
+struct mwl8k_cmd_bbp_reg_access {
+       struct mwl8k_cmd_pkt header;
+       __le16 action;
+       __le16 offset;
+       u8 value;
+       u8 rsrv[3];
+} __packed;
+
+static int
+mwl8k_cmd_bbp_reg_access(struct ieee80211_hw *hw,
+                        u16 action,
+                        u16 offset,
+                        u8 *value)
+{
+       struct mwl8k_cmd_bbp_reg_access *cmd;
+       int rc;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (cmd == NULL)
+               return -ENOMEM;
+
+       cmd->header.code = cpu_to_le16(MWL8K_CMD_BBP_REG_ACCESS);
+       cmd->header.length = cpu_to_le16(sizeof(*cmd));
+       cmd->action = cpu_to_le16(action);
+       cmd->offset = cpu_to_le16(offset);
+
+       rc = mwl8k_post_cmd(hw, &cmd->header);
+
+       if (!rc)
+               *value = cmd->value;
+       else
+               *value = 0;
+
+       kfree(cmd);
+
+       return rc;
+}
+
 /*
  * CMD_SET_POST_SCAN.
  */
@@ -3016,6 +3069,64 @@ mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
        return rc;
 }
 
+static int freq_to_idx(struct mwl8k_priv *priv, int freq)
+{
+       struct ieee80211_supported_band *sband;
+       int band, ch, idx = 0;
+
+       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+               sband = priv->hw->wiphy->bands[band];
+               if (!sband)
+                       continue;
+
+               for (ch = 0; ch < sband->n_channels; ch++, idx++)
+                       if (sband->channels[ch].center_freq == freq)
+                               goto exit;
+       }
+
+exit:
+       return idx;
+}
+
+static void mwl8k_update_survey(struct mwl8k_priv *priv,
+                               struct ieee80211_channel *channel)
+{
+       u32 cca_cnt, rx_rdy;
+       s8 nf = 0, idx;
+       struct survey_info *survey;
+
+       idx = freq_to_idx(priv, priv->acs_chan->center_freq);
+       if (idx >= MWL8K_NUM_CHANS) {
+               wiphy_err(priv->hw->wiphy, "Failed to update survey\n");
+               return;
+       }
+
+       survey = &priv->survey[idx];
+
+       cca_cnt = ioread32(priv->regs + NOK_CCA_CNT_REG);
+       cca_cnt /= 1000; /* uSecs to mSecs */
+       survey->channel_time_busy = (u64) cca_cnt;
+
+       rx_rdy = ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+       rx_rdy /= 1000; /* uSecs to mSecs */
+       survey->channel_time_rx = (u64) rx_rdy;
+
+       priv->channel_time = jiffies - priv->channel_time;
+       survey->channel_time = jiffies_to_msecs(priv->channel_time);
+
+       survey->channel = channel;
+
+       mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &nf);
+
+       /* Make sure sign is negative else ACS  at hostapd fails */
+       survey->noise = nf * -1;
+
+       survey->filled = SURVEY_INFO_NOISE_DBM |
+                        SURVEY_INFO_CHANNEL_TIME |
+                        SURVEY_INFO_CHANNEL_TIME_BUSY |
+                        SURVEY_INFO_CHANNEL_TIME_RX;
+}
+
 /*
  * CMD_SET_RF_CHANNEL.
  */
@@ -3033,6 +3144,7 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
        enum nl80211_channel_type channel_type =
                cfg80211_get_chandef_type(&conf->chandef);
        struct mwl8k_cmd_set_rf_channel *cmd;
+       struct mwl8k_priv *priv = hw->priv;
        int rc;
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -3049,13 +3161,29 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
        else if (channel->band == IEEE80211_BAND_5GHZ)
                cmd->channel_flags |= cpu_to_le32(0x00000004);
 
-       if (channel_type == NL80211_CHAN_NO_HT ||
-           channel_type == NL80211_CHAN_HT20)
+       if (!priv->sw_scan_start) {
+               if (channel_type == NL80211_CHAN_NO_HT ||
+                   channel_type == NL80211_CHAN_HT20)
+                       cmd->channel_flags |= cpu_to_le32(0x00000080);
+               else if (channel_type == NL80211_CHAN_HT40MINUS)
+                       cmd->channel_flags |= cpu_to_le32(0x000001900);
+               else if (channel_type == NL80211_CHAN_HT40PLUS)
+                       cmd->channel_flags |= cpu_to_le32(0x000000900);
+       } else {
                cmd->channel_flags |= cpu_to_le32(0x00000080);
-       else if (channel_type == NL80211_CHAN_HT40MINUS)
-               cmd->channel_flags |= cpu_to_le32(0x000001900);
-       else if (channel_type == NL80211_CHAN_HT40PLUS)
-               cmd->channel_flags |= cpu_to_le32(0x000000900);
+       }
+
+       if (priv->sw_scan_start) {
+               /* Store current channel stats
+                * before switching to newer one.
+                * This will be processed only for AP fw.
+                */
+               if (priv->channel_time != 0)
+                       mwl8k_update_survey(priv, priv->acs_chan);
+
+               priv->channel_time = jiffies;
+               priv->acs_chan =  channel;
+       }
 
        rc = mwl8k_post_cmd(hw, &cmd->header);
        kfree(cmd);
@@ -5263,6 +5391,27 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
 {
        struct mwl8k_priv *priv = hw->priv;
        struct ieee80211_conf *conf = &hw->conf;
+       struct ieee80211_supported_band *sband;
+
+       if (priv->ap_fw) {
+               sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+
+               if (sband && idx >= sband->n_channels) {
+                       idx -= sband->n_channels;
+                       sband = NULL;
+               }
+
+               if (!sband)
+                       sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+               if (!sband || idx >= sband->n_channels)
+                       return -ENOENT;
+
+               memcpy(survey, &priv->survey[idx], sizeof(*survey));
+               survey->channel = &sband->channels[idx];
+
+               return 0;
+       }
 
        if (idx != 0)
                return -ENOENT;
@@ -5406,6 +5555,40 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        return rc;
 }
 
+static void mwl8k_sw_scan_start(struct ieee80211_hw *hw)
+{
+       struct mwl8k_priv *priv = hw->priv;
+       u8 tmp;
+
+       if (!priv->ap_fw)
+               return;
+
+       /* clear all stats */
+       priv->channel_time = 0;
+       ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+       ioread32(priv->regs + NOK_CCA_CNT_REG);
+       mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
+
+       priv->sw_scan_start = true;
+}
+
+static void mwl8k_sw_scan_complete(struct ieee80211_hw *hw)
+{
+       struct mwl8k_priv *priv = hw->priv;
+       u8 tmp;
+
+       if (!priv->ap_fw)
+               return;
+
+       priv->sw_scan_start = false;
+
+       /* clear all stats */
+       priv->channel_time = 0;
+       ioread32(priv->regs + BBU_RXRDY_CNT_REG);
+       ioread32(priv->regs + NOK_CCA_CNT_REG);
+       mwl8k_cmd_bbp_reg_access(priv->hw, 0, BBU_AVG_NOISE_VAL, &tmp);
+}
+
 static const struct ieee80211_ops mwl8k_ops = {
        .tx                     = mwl8k_tx,
        .start                  = mwl8k_start,
@@ -5424,6 +5607,8 @@ static const struct ieee80211_ops mwl8k_ops = {
        .get_stats              = mwl8k_get_stats,
        .get_survey             = mwl8k_get_survey,
        .ampdu_action           = mwl8k_ampdu_action,
+       .sw_scan_start          = mwl8k_sw_scan_start,
+       .sw_scan_complete       = mwl8k_sw_scan_complete,
 };
 
 static void mwl8k_finalize_join_worker(struct work_struct *work)
index d01edd2c50c505866cb554826659e24b4489c68f..a9e94b6db5b7e961d25ec2c58a2eb50d46bc621d 100644 (file)
@@ -59,7 +59,8 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
        for (i = 0; i < NUM_CHANNELS; i++) {
                if (priv->channel_mask & (1 << i)) {
                        priv->channels[i].center_freq =
-                               ieee80211_dsss_chan_to_freq(i + 1);
+                               ieee80211_channel_to_frequency(i + 1,
+                                                          IEEE80211_BAND_2GHZ);
                        channels++;
                }
        }
@@ -177,7 +178,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
        if (chandef->chan->band != IEEE80211_BAND_2GHZ)
                return -EINVAL;
 
-       channel = ieee80211_freq_to_dsss_chan(chandef->chan->center_freq);
+       channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
 
        if ((channel < 1) || (channel > NUM_CHANNELS) ||
             !(priv->channel_mask & (1 << (channel - 1))))
index c09c8437c0b821072633c1022b348722d3913170..49300d04efdf0e7221a2b0f64f0475ad6f73ee59 100644 (file)
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
                goto out;
 
        }
-       freq = ieee80211_dsss_chan_to_freq(channel);
+       freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
 
  out:
        orinoco_unlock(priv, &flags);
index e8c5714bfd11c24642933b824d299c165a768b27..e175b9b8561b594299d69d237624e3604932d22f 100644 (file)
@@ -110,7 +110,8 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
                break;
        }
 
-       freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel));
+       freq = ieee80211_channel_to_frequency(
+               le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
        channel = ieee80211_get_channel(wiphy, freq);
        if (!channel) {
                printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -146,7 +147,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
        ie_len = len - sizeof(*bss);
        ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
        chan = ie ? ie[2] : 0;
-       freq = ieee80211_dsss_chan_to_freq(chan);
+       freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
        channel = ieee80211_get_channel(wiphy, freq);
 
        timestamp = le64_to_cpu(bss->timestamp);
index 3b5508f982e80b8376d6bcb20c63dae6fd5586e0..b7a867b50b9476fb2c54ad9ccbb28815e6fa70e6 100644 (file)
@@ -444,7 +444,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
                for (i = 0; i < (6 - frq->e); i++)
                        denom *= 10;
 
-               chan = ieee80211_freq_to_dsss_chan(frq->m / denom);
+               chan = ieee80211_frequency_to_channel(frq->m / denom);
        }
 
        if ((chan < 1) || (chan > NUM_CHANNELS) ||
index 6e635cfa24c8dea70eb439f3724ee52adc01d27d..043bd1c23c19efb6997c33865e41059011052a46 100644 (file)
@@ -513,7 +513,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
        if (!buf)
                return -ENOMEM;
 
-       left = block_size = min((size_t)P54U_FW_BLOCK, priv->fw->size);
+       left = block_size = min_t(size_t, P54U_FW_BLOCK, priv->fw->size);
        strcpy(buf, p54u_firmware_upload_3887);
        left -= strlen(p54u_firmware_upload_3887);
        tmp += strlen(p54u_firmware_upload_3887);
@@ -1053,6 +1053,10 @@ static int p54u_probe(struct usb_interface *intf,
                priv->upload_fw = p54u_upload_firmware_net2280;
        }
        err = p54u_load_firmware(dev, intf);
+       if (err) {
+               usb_put_dev(udev);
+               p54_free_common(dev);
+       }
        return err;
 }
 
index 78fa64d3f2232a85a15a8b88f698765f80c9dfc8..ecbb0546cf3eed4b14f86c64a6d649d808c8eed3 100644 (file)
@@ -644,7 +644,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
        wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
        if (wpa_ie_len > 0) {
                iwe.cmd = IWEVGENIE;
-               iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN);
+               iwe.u.data.length = min_t(size_t, wpa_ie_len, MAX_WPA_IE_LEN);
                current_ev = iwe_stream_add_point(info, current_ev, end_buf,
                                                  &iwe, wpa_ie);
        }
index 5028557aa18adb1a22c74c7a59123f1bb52b0d5a..39d22a154341019fe8038bdfa1f7e1b4cb560cf2 100644 (file)
@@ -1290,7 +1290,8 @@ static int set_channel(struct usbnet *usbdev, int channel)
        if (is_associated(usbdev))
                return 0;
 
-       dsconfig = ieee80211_dsss_chan_to_freq(channel) * 1000;
+       dsconfig = 1000 *
+               ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
 
        len = sizeof(config);
        ret = rndis_query_oid(usbdev,
@@ -2835,7 +2836,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
                                        bssid, req_ie, req_ie_len,
                                        resp_ie, resp_ie_len, GFP_KERNEL);
        } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
-               cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
+               cfg80211_ibss_joined(usbdev->net, bssid,
+                                    get_current_channel(usbdev, NULL),
+                                    GFP_KERNEL);
 
        kfree(info);
 
diff --git a/drivers/net/wireless/rsi/Kconfig b/drivers/net/wireless/rsi/Kconfig
new file mode 100644 (file)
index 0000000..35245f9
--- /dev/null
@@ -0,0 +1,30 @@
+config RSI_91X
+       tristate "Redpine Signals Inc 91x WLAN driver support"
+       depends on MAC80211
+       ---help---
+         This option enabes support for RSI 1x1 devices.
+         Select M (recommended), if you have a RSI 1x1 wireless module.
+
+config RSI_DEBUGFS
+       bool "Redpine Signals Inc debug support"
+       depends on RSI_91X
+       default y
+       ---help---
+        Say Y, if you would like to enable debug support. This option
+        creates debugfs entries
+
+config RSI_SDIO
+       tristate "Redpine Signals SDIO bus support"
+       depends on MMC && RSI_91X
+       default m
+       ---help---
+         This option enables the SDIO bus support in rsi drivers.
+         Select M (recommended), if you have a RSI 1x1 wireless module.
+
+config RSI_USB
+       tristate "Redpine Signals USB bus support"
+       depends on USB && RSI_91X
+       default m
+       ---help---
+         This option enables the USB bus support in rsi drivers.
+         Select M (recommended), if you have a RSI 1x1 wireless module.
diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile
new file mode 100644 (file)
index 0000000..25828b6
--- /dev/null
@@ -0,0 +1,12 @@
+rsi_91x-y                      += rsi_91x_main.o
+rsi_91x-y                      += rsi_91x_core.o
+rsi_91x-y                      += rsi_91x_mac80211.o
+rsi_91x-y                      += rsi_91x_mgmt.o
+rsi_91x-y                      += rsi_91x_pkt.o
+rsi_91x-$(CONFIG_RSI_DEBUGFS)  += rsi_91x_debugfs.o
+
+rsi_usb-y                      += rsi_91x_usb.o rsi_91x_usb_ops.o
+rsi_sdio-y                     += rsi_91x_sdio.o rsi_91x_sdio_ops.o
+obj-$(CONFIG_RSI_91X)          += rsi_91x.o
+obj-$(CONFIG_RSI_SDIO)         += rsi_sdio.o
+obj-$(CONFIG_RSI_USB)          += rsi_usb.o
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
new file mode 100644 (file)
index 0000000..e89535e
--- /dev/null
@@ -0,0 +1,342 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_determine_min_weight_queue() - This function determines the queue with
+ *                                   the min weight.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: q_num: Corresponding queue number.
+ */
+static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
+{
+       struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
+       u32 q_len = 0;
+       u8 ii = 0;
+
+       for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
+               q_len = skb_queue_len(&common->tx_queue[ii]);
+               if ((tx_qinfo[ii].pkt_contended) && q_len) {
+                       common->min_weight = tx_qinfo[ii].weight;
+                       break;
+               }
+       }
+       return ii;
+}
+
+/**
+ * rsi_recalculate_weights() - This function recalculates the weights
+ *                            corresponding to each queue.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: recontend_queue bool variable
+ */
+static bool rsi_recalculate_weights(struct rsi_common *common)
+{
+       struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
+       bool recontend_queue = false;
+       u8 ii = 0;
+       u32 q_len = 0;
+
+       for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
+               q_len = skb_queue_len(&common->tx_queue[ii]);
+               /* Check for the need of contention */
+               if (q_len) {
+                       if (tx_qinfo[ii].pkt_contended) {
+                               tx_qinfo[ii].weight =
+                               ((tx_qinfo[ii].weight > common->min_weight) ?
+                                tx_qinfo[ii].weight - common->min_weight : 0);
+                       } else {
+                               tx_qinfo[ii].pkt_contended = 1;
+                               tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
+                               recontend_queue = true;
+                       }
+               } else { /* No packets so no contention */
+                       tx_qinfo[ii].weight = 0;
+                       tx_qinfo[ii].pkt_contended = 0;
+               }
+       }
+
+       return recontend_queue;
+}
+
+/**
+ * rsi_core_determine_hal_queue() - This function determines the queue from
+ *                                 which packet has to be dequeued.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: q_num: Corresponding queue number on success.
+ */
+static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
+{
+       bool recontend_queue = false;
+       u32 q_len = 0;
+       u8 q_num = INVALID_QUEUE;
+       u8 ii, min = 0;
+
+       if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
+               if (!common->mgmt_q_block)
+                       q_num = MGMT_SOFT_Q;
+               return q_num;
+       }
+
+       if (common->pkt_cnt != 0) {
+               --common->pkt_cnt;
+               return common->selected_qnum;
+       }
+
+get_queue_num:
+       q_num = 0;
+       recontend_queue = false;
+
+       q_num = rsi_determine_min_weight_queue(common);
+       q_len = skb_queue_len(&common->tx_queue[ii]);
+       ii = q_num;
+
+       /* Selecting the queue with least back off */
+       for (; ii < NUM_EDCA_QUEUES; ii++) {
+               if (((common->tx_qinfo[ii].pkt_contended) &&
+                    (common->tx_qinfo[ii].weight < min)) && q_len) {
+                       min = common->tx_qinfo[ii].weight;
+                       q_num = ii;
+               }
+       }
+
+       common->tx_qinfo[q_num].pkt_contended = 0;
+       /* Adjust the back off values for all queues again */
+       recontend_queue = rsi_recalculate_weights(common);
+
+       q_len = skb_queue_len(&common->tx_queue[q_num]);
+       if (!q_len) {
+               /* If any queues are freshly contended and the selected queue
+                * doesn't have any packets
+                * then get the queue number again with fresh values
+                */
+               if (recontend_queue)
+                       goto get_queue_num;
+
+               q_num = INVALID_QUEUE;
+               return q_num;
+       }
+
+       common->selected_qnum = q_num;
+       q_len = skb_queue_len(&common->tx_queue[q_num]);
+
+       switch (common->selected_qnum) {
+       case VO_Q:
+               if (q_len > MAX_CONTINUOUS_VO_PKTS)
+                       common->pkt_cnt = (MAX_CONTINUOUS_VO_PKTS - 1);
+               else
+                       common->pkt_cnt = --q_len;
+               break;
+
+       case VI_Q:
+               if (q_len > MAX_CONTINUOUS_VI_PKTS)
+                       common->pkt_cnt = (MAX_CONTINUOUS_VI_PKTS - 1);
+               else
+                       common->pkt_cnt = --q_len;
+
+               break;
+
+       default:
+               common->pkt_cnt = 0;
+               break;
+       }
+
+       return q_num;
+}
+
+/**
+ * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
+ *                       specified by the queue number.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+static void rsi_core_queue_pkt(struct rsi_common *common,
+                              struct sk_buff *skb)
+{
+       u8 q_num = skb->priority;
+       if (q_num >= NUM_SOFT_QUEUES) {
+               rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
+                       __func__, q_num);
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       skb_queue_tail(&common->tx_queue[q_num], skb);
+}
+
+/**
+ * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
+ *                         specified by the queue number.
+ * @common: Pointer to the driver private structure.
+ * @q_num: Queue number.
+ *
+ * Return: Pointer to sk_buff structure.
+ */
+static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
+                                           u8 q_num)
+{
+       if (q_num >= NUM_SOFT_QUEUES) {
+               rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
+                       __func__, q_num);
+               return NULL;
+       }
+
+       return skb_dequeue(&common->tx_queue[q_num]);
+}
+
+/**
+ * rsi_core_qos_processor() - This function is used to determine the wmm queue
+ *                           based on the backoff procedure. Data packets are
+ *                           dequeued from the selected hal queue and sent to
+ *                           the below layers.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+void rsi_core_qos_processor(struct rsi_common *common)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct sk_buff *skb;
+       unsigned long tstamp_1, tstamp_2;
+       u8 q_num;
+       int status;
+
+       tstamp_1 = jiffies;
+       while (1) {
+               q_num = rsi_core_determine_hal_queue(common);
+               rsi_dbg(DATA_TX_ZONE,
+                       "%s: Queue number = %d\n", __func__, q_num);
+
+               if (q_num == INVALID_QUEUE) {
+                       rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
+                       break;
+               }
+
+               mutex_lock(&common->tx_rxlock);
+
+               status = adapter->check_hw_queue_status(adapter, q_num);
+               if ((status <= 0)) {
+                       mutex_unlock(&common->tx_rxlock);
+                       break;
+               }
+
+               if ((q_num < MGMT_SOFT_Q) &&
+                   ((skb_queue_len(&common->tx_queue[q_num])) <=
+                     MIN_DATA_QUEUE_WATER_MARK)) {
+                       if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
+                               ieee80211_wake_queue(adapter->hw,
+                                                    WME_AC(q_num));
+               }
+
+               skb = rsi_core_dequeue_pkt(common, q_num);
+               if (skb == NULL) {
+                       mutex_unlock(&common->tx_rxlock);
+                       break;
+               }
+
+               if (q_num == MGMT_SOFT_Q)
+                       status = rsi_send_mgmt_pkt(common, skb);
+               else
+                       status = rsi_send_data_pkt(common, skb);
+
+               if (status) {
+                       mutex_unlock(&common->tx_rxlock);
+                       break;
+               }
+
+               common->tx_stats.total_tx_pkt_send[q_num]++;
+
+               tstamp_2 = jiffies;
+               mutex_unlock(&common->tx_rxlock);
+
+               if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
+                       schedule();
+       }
+}
+
+/**
+ * rsi_core_xmit() - This function transmits the packets received from mac80211
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_tx_info *info;
+       struct skb_info *tx_params;
+       struct ieee80211_hdr *tmp_hdr = NULL;
+       u8 q_num, tid = 0;
+
+       if ((!skb) || (!skb->len)) {
+               rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
+                       __func__);
+               goto xmit_fail;
+       }
+       info = IEEE80211_SKB_CB(skb);
+       tx_params = (struct skb_info *)info->driver_data;
+       tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
+
+       if (common->fsm_state != FSM_MAC_INIT_DONE) {
+               rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
+               goto xmit_fail;
+       }
+
+       if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
+           (ieee80211_is_ctl(tmp_hdr->frame_control))) {
+               q_num = MGMT_SOFT_Q;
+               skb->priority = q_num;
+       } else {
+               if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
+                       tid = (skb->data[24] & IEEE80211_QOS_TID);
+                       skb->priority = TID_TO_WME_AC(tid);
+               } else {
+                       tid = IEEE80211_NONQOS_TID;
+                       skb->priority = BE_Q;
+               }
+               q_num = skb->priority;
+               tx_params->tid = tid;
+               tx_params->sta_id = 0;
+       }
+
+       if ((q_num != MGMT_SOFT_Q) &&
+           ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
+            DATA_QUEUE_WATER_MARK)) {
+               if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
+                       ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
+               rsi_set_event(&common->tx_thread.event);
+               goto xmit_fail;
+       }
+
+       rsi_core_queue_pkt(common, skb);
+       rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
+       rsi_set_event(&common->tx_thread.event);
+
+       return;
+
+xmit_fail:
+       rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
+       /* Dropping pkt here */
+       ieee80211_free_txskb(common->priv->hw, skb);
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
new file mode 100644 (file)
index 0000000..7e4ef45
--- /dev/null
@@ -0,0 +1,339 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_debugfs.h"
+#include "rsi_sdio.h"
+
+/**
+ * rsi_sdio_stats_read() - This function returns the sdio status of the driver.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_stats_read(struct seq_file *seq, void *data)
+{
+       struct rsi_common *common = seq->private;
+       struct rsi_hw *adapter = common->priv;
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+       seq_printf(seq, "total_sdio_interrupts: %d\n",
+                  dev->rx_info.sdio_int_counter);
+       seq_printf(seq, "sdio_msdu_pending_intr_count: %d\n",
+                  dev->rx_info.total_sdio_msdu_pending_intr);
+       seq_printf(seq, "sdio_buff_full_count : %d\n",
+                  dev->rx_info.buf_full_counter);
+       seq_printf(seq, "sdio_buf_semi_full_count %d\n",
+                  dev->rx_info.buf_semi_full_counter);
+       seq_printf(seq, "sdio_unknown_intr_count: %d\n",
+                  dev->rx_info.total_sdio_unknown_intr);
+       /* RX Path Stats */
+       seq_printf(seq, "BUFFER FULL STATUS  : %d\n",
+                  dev->rx_info.buffer_full);
+       seq_printf(seq, "SEMI BUFFER FULL STATUS  : %d\n",
+                  dev->rx_info.semi_buffer_full);
+       seq_printf(seq, "MGMT BUFFER FULL STATUS  : %d\n",
+                  dev->rx_info.mgmt_buffer_full);
+       seq_printf(seq, "BUFFER FULL COUNTER  : %d\n",
+                  dev->rx_info.buf_full_counter);
+       seq_printf(seq, "BUFFER SEMI FULL COUNTER  : %d\n",
+                  dev->rx_info.buf_semi_full_counter);
+       seq_printf(seq, "MGMT BUFFER FULL COUNTER  : %d\n",
+                  dev->rx_info.mgmt_buf_full_counter);
+
+       return 0;
+}
+
+/**
+ * rsi_sdio_stats_open() - This funtion calls single open function of seq_file
+ *                        to open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_sdio_stats_open(struct inode *inode,
+                              struct file *file)
+{
+       return single_open(file, rsi_sdio_stats_read, inode->i_private);
+}
+
+/**
+ * rsi_version_read() - This function gives driver and firmware version number.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_version_read(struct seq_file *seq, void *data)
+{
+       struct rsi_common *common = seq->private;
+
+       common->driver_ver.major = 0;
+       common->driver_ver.minor = 1;
+       common->driver_ver.release_num = 0;
+       common->driver_ver.patch_num = 0;
+       seq_printf(seq, "Driver : %x.%d.%d.%d\nLMAC   : %d.%d.%d.%d\n",
+                  common->driver_ver.major,
+                  common->driver_ver.minor,
+                  common->driver_ver.release_num,
+                  common->driver_ver.patch_num,
+                  common->fw_ver.major,
+                  common->fw_ver.minor,
+                  common->fw_ver.release_num,
+                  common->fw_ver.patch_num);
+       return 0;
+}
+
+/**
+ * rsi_version_open() - This funtion calls single open function of seq_file to
+ *                     open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_version_open(struct inode *inode,
+                                struct file *file)
+{
+       return single_open(file, rsi_version_read, inode->i_private);
+}
+
+/**
+ * rsi_stats_read() - This function return the status of the driver.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_stats_read(struct seq_file *seq, void *data)
+{
+       struct rsi_common *common = seq->private;
+
+       unsigned char fsm_state[][32] = {
+               "FSM_CARD_NOT_READY",
+               "FSM_BOOT_PARAMS_SENT",
+               "FSM_EEPROM_READ_MAC_ADDR",
+               "FSM_RESET_MAC_SENT",
+               "FSM_RADIO_CAPS_SENT",
+               "FSM_BB_RF_PROG_SENT",
+               "FSM_MAC_INIT_DONE"
+       };
+       seq_puts(seq, "==> RSI STA DRIVER STATUS <==\n");
+       seq_puts(seq, "DRIVER_FSM_STATE: ");
+
+       if (common->fsm_state <= FSM_MAC_INIT_DONE)
+               seq_printf(seq, "%s", fsm_state[common->fsm_state]);
+
+       seq_printf(seq, "(%d)\n\n", common->fsm_state);
+
+       /* Mgmt TX Path Stats */
+       seq_printf(seq, "total_mgmt_pkt_send : %d\n",
+                  common->tx_stats.total_tx_pkt_send[MGMT_SOFT_Q]);
+       seq_printf(seq, "total_mgmt_pkt_queued : %d\n",
+                  skb_queue_len(&common->tx_queue[4]));
+       seq_printf(seq, "total_mgmt_pkt_freed  : %d\n",
+                  common->tx_stats.total_tx_pkt_freed[MGMT_SOFT_Q]);
+
+       /* Data TX Path Stats */
+       seq_printf(seq, "total_data_vo_pkt_send: %8d\t",
+                  common->tx_stats.total_tx_pkt_send[VO_Q]);
+       seq_printf(seq, "total_data_vo_pkt_queued:  %8d\t",
+                  skb_queue_len(&common->tx_queue[0]));
+       seq_printf(seq, "total_vo_pkt_freed: %8d\n",
+                  common->tx_stats.total_tx_pkt_freed[VO_Q]);
+       seq_printf(seq, "total_data_vi_pkt_send: %8d\t",
+                  common->tx_stats.total_tx_pkt_send[VI_Q]);
+       seq_printf(seq, "total_data_vi_pkt_queued:  %8d\t",
+                  skb_queue_len(&common->tx_queue[1]));
+       seq_printf(seq, "total_vi_pkt_freed: %8d\n",
+                  common->tx_stats.total_tx_pkt_freed[VI_Q]);
+       seq_printf(seq,  "total_data_be_pkt_send: %8d\t",
+                  common->tx_stats.total_tx_pkt_send[BE_Q]);
+       seq_printf(seq, "total_data_be_pkt_queued:  %8d\t",
+                  skb_queue_len(&common->tx_queue[2]));
+       seq_printf(seq, "total_be_pkt_freed: %8d\n",
+                  common->tx_stats.total_tx_pkt_freed[BE_Q]);
+       seq_printf(seq, "total_data_bk_pkt_send: %8d\t",
+                  common->tx_stats.total_tx_pkt_send[BK_Q]);
+       seq_printf(seq, "total_data_bk_pkt_queued:  %8d\t",
+                  skb_queue_len(&common->tx_queue[3]));
+       seq_printf(seq, "total_bk_pkt_freed: %8d\n",
+                  common->tx_stats.total_tx_pkt_freed[BK_Q]);
+
+       seq_puts(seq, "\n");
+       return 0;
+}
+
+/**
+ * rsi_stats_open() - This funtion calls single open function of seq_file to
+ *                   open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_stats_open(struct inode *inode,
+                         struct file *file)
+{
+       return single_open(file, rsi_stats_read, inode->i_private);
+}
+
+/**
+ * rsi_debug_zone_read() - This function display the currently enabled debug zones.
+ * @seq: Pointer to the sequence file structure.
+ * @data: Pointer to the data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_debug_zone_read(struct seq_file *seq, void *data)
+{
+       rsi_dbg(FSM_ZONE, "%x: rsi_enabled zone", rsi_zone_enabled);
+       seq_printf(seq, "The zones available are %#x\n",
+                  rsi_zone_enabled);
+       return 0;
+}
+
+/**
+ * rsi_debug_read() - This funtion calls single open function of seq_file to
+ *                   open file and read contents from it.
+ * @inode: Pointer to the inode structure.
+ * @file: Pointer to the file structure.
+ *
+ * Return: Pointer to the opened file status: 0 on success, ENOMEM on failure.
+ */
+static int rsi_debug_read(struct inode *inode,
+                         struct file *file)
+{
+       return single_open(file, rsi_debug_zone_read, inode->i_private);
+}
+
+/**
+ * rsi_debug_zone_write() - This function writes into hal queues as per user
+ *                         requirement.
+ * @filp: Pointer to the file structure.
+ * @buff: Pointer to the character buffer.
+ * @len: Length of the data to be written into buffer.
+ * @data: Pointer to the data.
+ *
+ * Return: len: Number of bytes read.
+ */
+static ssize_t rsi_debug_zone_write(struct file *filp,
+                                   const char __user *buff,
+                                   size_t len,
+                                   loff_t *data)
+{
+       unsigned long dbg_zone;
+       int ret;
+
+       if (!len)
+               return 0;
+
+       ret = kstrtoul_from_user(buff, len, 16, &dbg_zone);
+
+       if (ret)
+               return ret;
+
+       rsi_zone_enabled = dbg_zone;
+       return len;
+}
+
+#define FOPS(fopen) { \
+       .owner = THIS_MODULE, \
+       .open = (fopen), \
+       .read = seq_read, \
+       .llseek = seq_lseek, \
+}
+
+#define FOPS_RW(fopen, fwrite) { \
+       .owner = THIS_MODULE, \
+       .open = (fopen), \
+       .read = seq_read, \
+       .llseek = seq_lseek, \
+       .write = (fwrite), \
+}
+
+static const struct rsi_dbg_files dev_debugfs_files[] = {
+       {"version", 0644, FOPS(rsi_version_open),},
+       {"stats", 0644, FOPS(rsi_stats_open),},
+       {"debug_zone", 0666, FOPS_RW(rsi_debug_read, rsi_debug_zone_write),},
+       {"sdio_stats", 0644, FOPS(rsi_sdio_stats_open),},
+};
+
+/**
+ * rsi_init_dbgfs() - This function initializes the dbgfs entry.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_init_dbgfs(struct rsi_hw *adapter)
+{
+       struct rsi_common *common = adapter->priv;
+       struct rsi_debugfs *dev_dbgfs;
+       char devdir[6];
+       int ii;
+       const struct rsi_dbg_files *files;
+
+       dev_dbgfs = kzalloc(sizeof(*dev_dbgfs), GFP_KERNEL);
+       adapter->dfsentry = dev_dbgfs;
+
+       snprintf(devdir, sizeof(devdir), "%s",
+                wiphy_name(adapter->hw->wiphy));
+       dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL);
+
+       if (IS_ERR(dev_dbgfs->subdir)) {
+               if (dev_dbgfs->subdir == ERR_PTR(-ENODEV))
+                       rsi_dbg(ERR_ZONE,
+                               "%s:Debugfs has not been mounted\n", __func__);
+               else
+                       rsi_dbg(ERR_ZONE, "debugfs:%s not created\n", devdir);
+
+               adapter->dfsentry = NULL;
+               kfree(dev_dbgfs);
+               return (int)PTR_ERR(dev_dbgfs->subdir);
+       } else {
+               for (ii = 0; ii < adapter->num_debugfs_entries; ii++) {
+                       files = &dev_debugfs_files[ii];
+                       dev_dbgfs->rsi_files[ii] =
+                       debugfs_create_file(files->name,
+                                           files->perms,
+                                           dev_dbgfs->subdir,
+                                           common,
+                                           &files->fops);
+               }
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rsi_init_dbgfs);
+
+/**
+ * rsi_remove_dbgfs() - Removes the previously created dbgfs file entries
+ *                     in the reverse order of creation.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_remove_dbgfs(struct rsi_hw *adapter)
+{
+       struct rsi_debugfs *dev_dbgfs = adapter->dfsentry;
+
+       if (!dev_dbgfs)
+               return;
+
+       debugfs_remove_recursive(dev_dbgfs->subdir);
+}
+EXPORT_SYMBOL_GPL(rsi_remove_dbgfs);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
new file mode 100644 (file)
index 0000000..8416474
--- /dev/null
@@ -0,0 +1,1008 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "rsi_debugfs.h"
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+static const struct ieee80211_channel rsi_2ghz_channels[] = {
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+         .hw_value = 1 }, /* Channel 1 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+         .hw_value = 2 }, /* Channel 2 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+         .hw_value = 3 }, /* Channel 3 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+         .hw_value = 4 }, /* Channel 4 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+         .hw_value = 5 }, /* Channel 5 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+         .hw_value = 6 }, /* Channel 6 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+         .hw_value = 7 }, /* Channel 7 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+         .hw_value = 8 }, /* Channel 8 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+         .hw_value = 9 }, /* Channel 9 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+         .hw_value = 10 }, /* Channel 10 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+         .hw_value = 11 }, /* Channel 11 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+         .hw_value = 12 }, /* Channel 12 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+         .hw_value = 13 }, /* Channel 13 */
+       { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+         .hw_value = 14 }, /* Channel 14 */
+};
+
+static const struct ieee80211_channel rsi_5ghz_channels[] = {
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
+         .hw_value = 36,  }, /* Channel 36 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
+         .hw_value = 40, }, /* Channel 40 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
+         .hw_value = 44, }, /* Channel 44 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
+         .hw_value = 48, }, /* Channel 48 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
+         .hw_value = 52, }, /* Channel 52 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
+         .hw_value = 56, }, /* Channel 56 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
+         .hw_value = 60, }, /* Channel 60 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
+         .hw_value = 64, }, /* Channel 64 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
+         .hw_value = 100, }, /* Channel 100 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
+         .hw_value = 104, }, /* Channel 104 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
+         .hw_value = 108, }, /* Channel 108 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
+         .hw_value = 112, }, /* Channel 112 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
+         .hw_value = 116, }, /* Channel 116 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
+         .hw_value = 120, }, /* Channel 120 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
+         .hw_value = 124, }, /* Channel 124 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
+         .hw_value = 128, }, /* Channel 128 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
+         .hw_value = 132, }, /* Channel 132 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
+         .hw_value = 136, }, /* Channel 136 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
+         .hw_value = 140, }, /* Channel 140 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
+         .hw_value = 149, }, /* Channel 149 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
+         .hw_value = 153, }, /* Channel 153 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
+         .hw_value = 157, }, /* Channel 157 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
+         .hw_value = 161, }, /* Channel 161 */
+       { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
+         .hw_value = 165, }, /* Channel 165 */
+};
+
+struct ieee80211_rate rsi_rates[12] = {
+       { .bitrate = STD_RATE_01  * 5, .hw_value = RSI_RATE_1 },
+       { .bitrate = STD_RATE_02  * 5, .hw_value = RSI_RATE_2 },
+       { .bitrate = STD_RATE_5_5 * 5, .hw_value = RSI_RATE_5_5 },
+       { .bitrate = STD_RATE_11  * 5, .hw_value = RSI_RATE_11 },
+       { .bitrate = STD_RATE_06  * 5, .hw_value = RSI_RATE_6 },
+       { .bitrate = STD_RATE_09  * 5, .hw_value = RSI_RATE_9 },
+       { .bitrate = STD_RATE_12  * 5, .hw_value = RSI_RATE_12 },
+       { .bitrate = STD_RATE_18  * 5, .hw_value = RSI_RATE_18 },
+       { .bitrate = STD_RATE_24  * 5, .hw_value = RSI_RATE_24 },
+       { .bitrate = STD_RATE_36  * 5, .hw_value = RSI_RATE_36 },
+       { .bitrate = STD_RATE_48  * 5, .hw_value = RSI_RATE_48 },
+       { .bitrate = STD_RATE_54  * 5, .hw_value = RSI_RATE_54 },
+};
+
+const u16 rsi_mcsrates[8] = {
+       RSI_RATE_MCS0, RSI_RATE_MCS1, RSI_RATE_MCS2, RSI_RATE_MCS3,
+       RSI_RATE_MCS4, RSI_RATE_MCS5, RSI_RATE_MCS6, RSI_RATE_MCS7
+};
+
+/**
+ * rsi_is_cipher_wep() -  This function determines if the cipher is WEP or not.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: If cipher type is WEP, a value of 1 is returned, else 0.
+ */
+
+bool rsi_is_cipher_wep(struct rsi_common *common)
+{
+       if (((common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP104) ||
+            (common->secinfo.gtk_cipher == WLAN_CIPHER_SUITE_WEP40)) &&
+           (!common->secinfo.ptk_cipher))
+               return true;
+       else
+               return false;
+}
+
+/**
+ * rsi_register_rates_channels() - This function registers channels and rates.
+ * @adapter: Pointer to the adapter structure.
+ * @band: Operating band to be set.
+ *
+ * Return: None.
+ */
+static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+{
+       struct ieee80211_supported_band *sbands = &adapter->sbands[band];
+       void *channels = NULL;
+
+       if (band == IEEE80211_BAND_2GHZ) {
+               channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
+               memcpy(channels,
+                      rsi_2ghz_channels,
+                      sizeof(rsi_2ghz_channels));
+               sbands->band = IEEE80211_BAND_2GHZ;
+               sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
+               sbands->bitrates = rsi_rates;
+               sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
+       } else {
+               channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
+               memcpy(channels,
+                      rsi_5ghz_channels,
+                      sizeof(rsi_5ghz_channels));
+               sbands->band = IEEE80211_BAND_5GHZ;
+               sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
+               sbands->bitrates = &rsi_rates[4];
+               sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
+       }
+
+       sbands->channels = channels;
+
+       memset(&sbands->ht_cap, 0, sizeof(struct ieee80211_sta_ht_cap));
+       sbands->ht_cap.ht_supported = true;
+       sbands->ht_cap.cap = (IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                             IEEE80211_HT_CAP_SGI_20 |
+                             IEEE80211_HT_CAP_SGI_40);
+       sbands->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
+       sbands->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+       sbands->ht_cap.mcs.rx_mask[0] = 0xff;
+       sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+       /* sbands->ht_cap.mcs.rx_highest = 0x82; */
+}
+
+/**
+ * rsi_mac80211_attach() - This function is used to de-initialize the
+ *                        Mac80211 stack.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_mac80211_detach(struct rsi_hw *adapter)
+{
+       struct ieee80211_hw *hw = adapter->hw;
+
+       if (hw) {
+               ieee80211_stop_queues(hw);
+               ieee80211_unregister_hw(hw);
+               ieee80211_free_hw(hw);
+       }
+
+       rsi_remove_dbgfs(adapter);
+}
+EXPORT_SYMBOL_GPL(rsi_mac80211_detach);
+
+/**
+ * rsi_indicate_tx_status() - This function indicates the transmit status.
+ * @adapter: Pointer to the adapter structure.
+ * @skb: Pointer to the socket buffer structure.
+ * @status: Status
+ *
+ * Return: None.
+ */
+void rsi_indicate_tx_status(struct rsi_hw *adapter,
+                           struct sk_buff *skb,
+                           int status)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+       if (!status)
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
+       ieee80211_tx_status_irqsafe(adapter->hw, skb);
+}
+
+/**
+ * rsi_mac80211_tx() - This is the handler that 802.11 module calls for each
+ *                    transmitted frame.SKB contains the buffer starting
+ *                    from the IEEE 802.11 header.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @control: Pointer to the ieee80211_tx_control structure
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None
+ */
+static void rsi_mac80211_tx(struct ieee80211_hw *hw,
+                           struct ieee80211_tx_control *control,
+                           struct sk_buff *skb)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       rsi_core_xmit(common, skb);
+}
+
+/**
+ * rsi_mac80211_start() - This is first handler that 802.11 module calls, since
+ *                       the driver init is complete by then, just
+ *                       returns success.
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: 0 as success.
+ */
+static int rsi_mac80211_start(struct ieee80211_hw *hw)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+       common->iface_down = false;
+       mutex_unlock(&common->mutex);
+
+       return 0;
+}
+
+/**
+ * rsi_mac80211_stop() - This is the last handler that 802.11 module calls.
+ * @hw: Pointer to the ieee80211_hw structure.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_stop(struct ieee80211_hw *hw)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+       common->iface_down = true;
+       mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_add_interface() - This function is called when a netdevice
+ *                               attached to the hardware is enabled.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ *
+ * Return: ret: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+       int ret = -EOPNOTSUPP;
+
+       mutex_lock(&common->mutex);
+       switch (vif->type) {
+       case NL80211_IFTYPE_STATION:
+               if (!adapter->sc_nvifs) {
+                       ++adapter->sc_nvifs;
+                       adapter->vifs[0] = vif;
+                       ret = rsi_set_vap_capabilities(common, STA_OPMODE);
+               }
+               break;
+       default:
+               rsi_dbg(ERR_ZONE,
+                       "%s: Interface type %d not supported\n", __func__,
+                       vif->type);
+       }
+       mutex_unlock(&common->mutex);
+
+       return ret;
+}
+
+/**
+ * rsi_mac80211_remove_interface() - This function notifies driver that an
+ *                                  interface is going down.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw,
+                                         struct ieee80211_vif *vif)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+       if (vif->type == NL80211_IFTYPE_STATION)
+               adapter->sc_nvifs--;
+
+       if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif)))
+               adapter->vifs[0] = NULL;
+       mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_config() - This function is a handler for configuration
+ *                        requests. The stack calls this function to
+ *                        change hardware configuration, e.g., channel.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @changed: Changed flags set.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_config(struct ieee80211_hw *hw,
+                              u32 changed)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+       int status = -EOPNOTSUPP;
+
+       mutex_lock(&common->mutex);
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+               u16 channel = curchan->hw_value;
+
+               rsi_dbg(INFO_ZONE,
+                       "%s: Set channel: %d MHz type: %d channel_no %d\n",
+                       __func__, curchan->center_freq,
+                       curchan->flags, channel);
+               common->band = curchan->band;
+               status = rsi_set_channel(adapter->priv, channel);
+       }
+       mutex_unlock(&common->mutex);
+
+       return status;
+}
+
+/**
+ * rsi_get_connected_channel() - This function is used to get the current
+ *                              connected channel number.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: Current connected AP's channel number is returned.
+ */
+u16 rsi_get_connected_channel(struct rsi_hw *adapter)
+{
+       struct ieee80211_vif *vif = adapter->vifs[0];
+       if (vif) {
+               struct ieee80211_bss_conf *bss = &vif->bss_conf;
+               struct ieee80211_channel *channel = bss->chandef.chan;
+               return channel->hw_value;
+       }
+
+       return 0;
+}
+
+/**
+ * rsi_mac80211_bss_info_changed() - This function is a handler for config
+ *                                  requests related to BSS parameters that
+ *                                  may vary during BSS's lifespan.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @bss_conf: Pointer to the ieee80211_bss_conf structure.
+ * @changed: Changed flags set.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
+                                         struct ieee80211_vif *vif,
+                                         struct ieee80211_bss_conf *bss_conf,
+                                         u32 changed)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+       if (changed & BSS_CHANGED_ASSOC) {
+               rsi_dbg(INFO_ZONE, "%s: Changed Association status: %d\n",
+                       __func__, bss_conf->assoc);
+               rsi_inform_bss_status(common,
+                                     bss_conf->assoc,
+                                     bss_conf->bssid,
+                                     bss_conf->qos,
+                                     bss_conf->aid);
+       }
+       mutex_unlock(&common->mutex);
+}
+
+/**
+ * rsi_mac80211_conf_filter() - This function configure the device's RX filter.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @changed: Changed flags set.
+ * @total_flags: Total initial flags set.
+ * @multicast: Multicast.
+ *
+ * Return: None.
+ */
+static void rsi_mac80211_conf_filter(struct ieee80211_hw *hw,
+                                    u32 changed_flags,
+                                    u32 *total_flags,
+                                    u64 multicast)
+{
+       /* Not doing much here as of now */
+       *total_flags &= RSI_SUPP_FILTERS;
+}
+
+/**
+ * rsi_mac80211_conf_tx() - This function configures TX queue parameters
+ *                         (EDCF (aifs, cw_min, cw_max), bursting)
+ *                         for a hardware TX queue.
+ * @hw: Pointer to the ieee80211_hw structure
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @queue: Queue number.
+ * @params: Pointer to ieee80211_tx_queue_params structure.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif, u16 queue,
+                               const struct ieee80211_tx_queue_params *params)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+       u8 idx = 0;
+
+       if (queue >= IEEE80211_NUM_ACS)
+               return 0;
+
+       rsi_dbg(INFO_ZONE,
+               "%s: Conf queue %d, aifs: %d, cwmin: %d cwmax: %d, txop: %d\n",
+               __func__, queue, params->aifs,
+               params->cw_min, params->cw_max, params->txop);
+
+       mutex_lock(&common->mutex);
+       /* Map into the way the f/w expects */
+       switch (queue) {
+       case IEEE80211_AC_VO:
+               idx = VO_Q;
+               break;
+       case IEEE80211_AC_VI:
+               idx = VI_Q;
+               break;
+       case IEEE80211_AC_BE:
+               idx = BE_Q;
+               break;
+       case IEEE80211_AC_BK:
+               idx = BK_Q;
+               break;
+       default:
+               idx = BE_Q;
+               break;
+       }
+
+       memcpy(&common->edca_params[idx],
+              params,
+              sizeof(struct ieee80211_tx_queue_params));
+       mutex_unlock(&common->mutex);
+
+       return 0;
+}
+
+/**
+ * rsi_hal_key_config() - This function loads the keys into the firmware.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @key: Pointer to the ieee80211_key_conf structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_hal_key_config(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_key_conf *key)
+{
+       struct rsi_hw *adapter = hw->priv;
+       int status;
+       u8 key_type;
+
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+               key_type = RSI_PAIRWISE_KEY;
+       else
+               key_type = RSI_GROUP_KEY;
+
+       rsi_dbg(ERR_ZONE, "%s: Cipher 0x%x key_type: %d key_len: %d\n",
+               __func__, key->cipher, key_type, key->keylen);
+
+       if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) ||
+           (key->cipher == WLAN_CIPHER_SUITE_WEP40)) {
+               status = rsi_hal_load_key(adapter->priv,
+                                         key->key,
+                                         key->keylen,
+                                         RSI_PAIRWISE_KEY,
+                                         key->keyidx,
+                                         key->cipher);
+               if (status)
+                       return status;
+       }
+       return rsi_hal_load_key(adapter->priv,
+                               key->key,
+                               key->keylen,
+                               key_type,
+                               key->keyidx,
+                               key->cipher);
+}
+
+/**
+ * rsi_mac80211_set_key() - This function sets type of key to be loaded.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @cmd: enum set_key_cmd.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ * @key: Pointer to the ieee80211_key_conf structure.
+ *
+ * Return: status: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
+                               enum set_key_cmd cmd,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta,
+                               struct ieee80211_key_conf *key)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+       struct security_info *secinfo = &common->secinfo;
+       int status;
+
+       mutex_lock(&common->mutex);
+       switch (cmd) {
+       case SET_KEY:
+               secinfo->security_enable = true;
+               status = rsi_hal_key_config(hw, vif, key);
+               if (status) {
+                       mutex_unlock(&common->mutex);
+                       return status;
+               }
+
+               if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+                       secinfo->ptk_cipher = key->cipher;
+               else
+                       secinfo->gtk_cipher = key->cipher;
+
+               key->hw_key_idx = key->keyidx;
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+               rsi_dbg(ERR_ZONE, "%s: RSI set_key\n", __func__);
+               break;
+
+       case DISABLE_KEY:
+               secinfo->security_enable = false;
+               rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
+               memset(key, 0, sizeof(struct ieee80211_key_conf));
+               status = rsi_hal_key_config(hw, vif, key);
+               break;
+
+       default:
+               status = -EOPNOTSUPP;
+               break;
+       }
+
+       mutex_unlock(&common->mutex);
+       return status;
+}
+
+/**
+ * rsi_mac80211_ampdu_action() - This function selects the AMPDU action for
+ *                              the corresponding mlme_action flag and
+ *                              informs the f/w regarding this.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @action: ieee80211_ampdu_mlme_action enum.
+ * @sta: Pointer to the ieee80211_sta structure.
+ * @tid: Traffic identifier.
+ * @ssn: Pointer to ssn value.
+ * @buf_size: Buffer size (for kernel version > 2.6.38).
+ *
+ * Return: status: 0 on success, negative error code on failure.
+ */
+static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    enum ieee80211_ampdu_mlme_action action,
+                                    struct ieee80211_sta *sta,
+                                    unsigned short tid,
+                                    unsigned short *ssn,
+                                    unsigned char buf_size)
+{
+       int status = -EOPNOTSUPP;
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+       u16 seq_no = 0;
+       u8 ii = 0;
+
+       for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
+               if (vif == adapter->vifs[ii])
+                       break;
+       }
+
+       mutex_lock(&common->mutex);
+       rsi_dbg(INFO_ZONE, "%s: AMPDU action %d called\n", __func__, action);
+       if (ssn != NULL)
+               seq_no = *ssn;
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               status = rsi_send_aggregation_params_frame(common,
+                                                          tid,
+                                                          seq_no,
+                                                          buf_size,
+                                                          STA_RX_ADDBA_DONE);
+               break;
+
+       case IEEE80211_AMPDU_RX_STOP:
+               status = rsi_send_aggregation_params_frame(common,
+                                                          tid,
+                                                          0,
+                                                          buf_size,
+                                                          STA_RX_DELBA);
+               break;
+
+       case IEEE80211_AMPDU_TX_START:
+               common->vif_info[ii].seq_start = seq_no;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+               status = rsi_send_aggregation_params_frame(common,
+                                                          tid,
+                                                          seq_no,
+                                                          buf_size,
+                                                          STA_TX_DELBA);
+               if (!status)
+                       ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               status = rsi_send_aggregation_params_frame(common,
+                                                          tid,
+                                                          common->vif_info[ii]
+                                                               .seq_start,
+                                                          buf_size,
+                                                          STA_TX_ADDBA_DONE);
+               break;
+
+       default:
+               rsi_dbg(ERR_ZONE, "%s: Uknown AMPDU action\n", __func__);
+               break;
+       }
+
+       mutex_unlock(&common->mutex);
+       return status;
+}
+
+/**
+ * rsi_mac80211_set_rts_threshold() - This function sets rts threshold value.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @value: Rts threshold value.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_mac80211_set_rts_threshold(struct ieee80211_hw *hw,
+                                         u32 value)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+       common->rts_threshold = value;
+       mutex_unlock(&common->mutex);
+
+       return 0;
+}
+
+/**
+ * rsi_mac80211_set_rate_mask() - This function sets bitrate_mask to be used.
+ * @hw: Pointer to the ieee80211_hw structure
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @mask: Pointer to the cfg80211_bitrate_mask structure.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
+                                     struct ieee80211_vif *vif,
+                                     const struct cfg80211_bitrate_mask *mask)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+
+       common->fixedrate_mask[IEEE80211_BAND_2GHZ] = 0;
+
+       if (mask->control[IEEE80211_BAND_2GHZ].legacy == 0xfff) {
+               common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
+                       (mask->control[IEEE80211_BAND_2GHZ].ht_mcs[0] << 12);
+       } else {
+               common->fixedrate_mask[IEEE80211_BAND_2GHZ] =
+                       mask->control[IEEE80211_BAND_2GHZ].legacy;
+       }
+       mutex_unlock(&common->mutex);
+
+       return 0;
+}
+
+/**
+ * rsi_fill_rx_status() - This function fills rx status in
+ *                       ieee80211_rx_status structure.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @skb: Pointer to the socket buffer structure.
+ * @common: Pointer to the driver private structure.
+ * @rxs: Pointer to the ieee80211_rx_status structure.
+ *
+ * Return: None.
+ */
+static void rsi_fill_rx_status(struct ieee80211_hw *hw,
+                              struct sk_buff *skb,
+                              struct rsi_common *common,
+                              struct ieee80211_rx_status *rxs)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct skb_info *rx_params = (struct skb_info *)info->driver_data;
+       struct ieee80211_hdr *hdr;
+       char rssi = rx_params->rssi;
+       u8 hdrlen = 0;
+       u8 channel = rx_params->channel;
+       s32 freq;
+
+       hdr = ((struct ieee80211_hdr *)(skb->data));
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+       memset(info, 0, sizeof(struct ieee80211_tx_info));
+
+       rxs->signal = -(rssi);
+
+       if (channel <= 14)
+               rxs->band = IEEE80211_BAND_2GHZ;
+       else
+               rxs->band = IEEE80211_BAND_5GHZ;
+
+       freq = ieee80211_channel_to_frequency(channel, rxs->band);
+
+       if (freq)
+               rxs->freq = freq;
+
+       if (ieee80211_has_protected(hdr->frame_control)) {
+               if (rsi_is_cipher_wep(common)) {
+                       memmove(skb->data + 4, skb->data, hdrlen);
+                       skb_pull(skb, 4);
+               } else {
+                       memmove(skb->data + 8, skb->data, hdrlen);
+                       skb_pull(skb, 8);
+                       rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+               }
+               rxs->flag |= RX_FLAG_DECRYPTED;
+               rxs->flag |= RX_FLAG_IV_STRIPPED;
+       }
+}
+
+/**
+ * rsi_indicate_pkt_to_os() - This function sends recieved packet to mac80211.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: None.
+ */
+void rsi_indicate_pkt_to_os(struct rsi_common *common,
+                           struct sk_buff *skb)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_hw *hw = adapter->hw;
+       struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+       if ((common->iface_down) || (!adapter->sc_nvifs)) {
+               dev_kfree_skb(skb);
+               return;
+       }
+
+       /* filling in the ieee80211_rx_status flags */
+       rsi_fill_rx_status(hw, skb, common, rx_status);
+
+       ieee80211_rx_irqsafe(hw, skb);
+}
+
+static void rsi_set_min_rate(struct ieee80211_hw *hw,
+                            struct ieee80211_sta *sta,
+                            struct rsi_common *common)
+{
+       u8 band = hw->conf.chandef.chan->band;
+       u8 ii;
+       u32 rate_bitmap;
+       bool matched = false;
+
+       common->bitrate_mask[band] = sta->supp_rates[band];
+
+       rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]);
+
+       if (rate_bitmap & 0xfff) {
+               /* Find out the min rate */
+               for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
+                       if (rate_bitmap & BIT(ii)) {
+                               common->min_rate = rsi_rates[ii].hw_value;
+                               matched = true;
+                               break;
+                       }
+               }
+       }
+
+       common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
+
+       if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) {
+               for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) {
+                       if ((rate_bitmap >> 12) & BIT(ii)) {
+                               common->min_rate = rsi_mcsrates[ii];
+                               matched = true;
+                               break;
+                       }
+               }
+       }
+
+       if (!matched)
+               common->min_rate = 0xffff;
+}
+
+/**
+ * rsi_mac80211_sta_add() - This function notifies driver about a peer getting
+ *                         connected.
+ * @hw: pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
+                               struct ieee80211_vif *vif,
+                               struct ieee80211_sta *sta)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+
+       rsi_set_min_rate(hw, sta, common);
+
+       if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
+           (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) {
+               common->vif_info[0].sgi = true;
+       }
+
+       if (sta->ht_cap.ht_supported)
+               ieee80211_start_tx_ba_session(sta, 0, 0);
+
+       mutex_unlock(&common->mutex);
+
+       return 0;
+}
+
+/**
+ * rsi_mac80211_sta_remove() - This function notifies driver about a peer
+ *                            getting disconnected.
+ * @hw: Pointer to the ieee80211_hw structure.
+ * @vif: Pointer to the ieee80211_vif structure.
+ * @sta: Pointer to the ieee80211_sta structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_sta *sta)
+{
+       struct rsi_hw *adapter = hw->priv;
+       struct rsi_common *common = adapter->priv;
+
+       mutex_lock(&common->mutex);
+       /* Resetting all the fields to default values */
+       common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
+       common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
+       common->min_rate = 0xffff;
+       common->vif_info[0].is_ht = false;
+       common->vif_info[0].sgi = false;
+       common->vif_info[0].seq_start = 0;
+       common->secinfo.ptk_cipher = 0;
+       common->secinfo.gtk_cipher = 0;
+       mutex_unlock(&common->mutex);
+
+       return 0;
+}
+
+static struct ieee80211_ops mac80211_ops = {
+       .tx = rsi_mac80211_tx,
+       .start = rsi_mac80211_start,
+       .stop = rsi_mac80211_stop,
+       .add_interface = rsi_mac80211_add_interface,
+       .remove_interface = rsi_mac80211_remove_interface,
+       .config = rsi_mac80211_config,
+       .bss_info_changed = rsi_mac80211_bss_info_changed,
+       .conf_tx = rsi_mac80211_conf_tx,
+       .configure_filter = rsi_mac80211_conf_filter,
+       .set_key = rsi_mac80211_set_key,
+       .set_rts_threshold = rsi_mac80211_set_rts_threshold,
+       .set_bitrate_mask = rsi_mac80211_set_rate_mask,
+       .ampdu_action = rsi_mac80211_ampdu_action,
+       .sta_add = rsi_mac80211_sta_add,
+       .sta_remove = rsi_mac80211_sta_remove,
+};
+
+/**
+ * rsi_mac80211_attach() - This function is used to initialize Mac80211 stack.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_mac80211_attach(struct rsi_common *common)
+{
+       int status = 0;
+       struct ieee80211_hw *hw = NULL;
+       struct wiphy *wiphy = NULL;
+       struct rsi_hw *adapter = common->priv;
+       u8 addr_mask[ETH_ALEN] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x3};
+
+       rsi_dbg(INIT_ZONE, "%s: Performing mac80211 attach\n", __func__);
+
+       hw = ieee80211_alloc_hw(sizeof(struct rsi_hw), &mac80211_ops);
+       if (!hw) {
+               rsi_dbg(ERR_ZONE, "%s: ieee80211 hw alloc failed\n", __func__);
+               return -ENOMEM;
+       }
+
+       wiphy = hw->wiphy;
+
+       SET_IEEE80211_DEV(hw, adapter->device);
+
+       hw->priv = adapter;
+       adapter->hw = hw;
+
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_HAS_RATE_CONTROL |
+                   IEEE80211_HW_AMPDU_AGGREGATION |
+                   0;
+
+       hw->queues = MAX_HW_QUEUES;
+       hw->extra_tx_headroom = RSI_NEEDED_HEADROOM;
+
+       hw->max_rates = 1;
+       hw->max_rate_tries = MAX_RETRIES;
+
+       hw->max_tx_aggregation_subframes = 6;
+       rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
+       hw->rate_control_algorithm = "AARF";
+
+       SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
+       ether_addr_copy(hw->wiphy->addr_mask, addr_mask);
+
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+       wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+       wiphy->retry_short = RETRY_SHORT;
+       wiphy->retry_long  = RETRY_LONG;
+       wiphy->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
+       wiphy->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+       wiphy->flags = 0;
+
+       wiphy->available_antennas_rx = 1;
+       wiphy->available_antennas_tx = 1;
+       wiphy->bands[IEEE80211_BAND_2GHZ] =
+               &adapter->sbands[IEEE80211_BAND_2GHZ];
+
+       status = ieee80211_register_hw(hw);
+       if (status)
+               return status;
+
+       return rsi_init_dbgfs(adapter);
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
new file mode 100644 (file)
index 0000000..8810862
--- /dev/null
@@ -0,0 +1,295 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+u32 rsi_zone_enabled = /* INFO_ZONE |
+                       INIT_ZONE |
+                       MGMT_TX_ZONE |
+                       MGMT_RX_ZONE |
+                       DATA_TX_ZONE |
+                       DATA_RX_ZONE |
+                       FSM_ZONE |
+                       ISR_ZONE | */
+                       ERR_ZONE |
+                       0;
+EXPORT_SYMBOL_GPL(rsi_zone_enabled);
+
+/**
+ * rsi_dbg() - This function outputs informational messages.
+ * @zone: Zone of interest for output message.
+ * @fmt: printf-style format for output message.
+ *
+ * Return: none
+ */
+void rsi_dbg(u32 zone, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       if (zone & rsi_zone_enabled)
+               pr_info("%pV", &vaf);
+       va_end(args);
+}
+EXPORT_SYMBOL_GPL(rsi_dbg);
+
+/**
+ * rsi_prepare_skb() - This function prepares the skb.
+ * @common: Pointer to the driver private structure.
+ * @buffer: Pointer to the packet data.
+ * @pkt_len: Length of the packet.
+ * @extended_desc: Extended descriptor.
+ *
+ * Return: Successfully skb.
+ */
+static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
+                                      u8 *buffer,
+                                      u32 pkt_len,
+                                      u8 extended_desc)
+{
+       struct ieee80211_tx_info *info;
+       struct skb_info *rx_params;
+       struct sk_buff *skb = NULL;
+       u8 payload_offset;
+
+       if (WARN(!pkt_len, "%s: Dummy pkt received", __func__))
+               return NULL;
+
+       if (pkt_len > (RSI_RCV_BUFFER_LEN * 4)) {
+               rsi_dbg(ERR_ZONE, "%s: Pkt size > max rx buf size %d\n",
+                       __func__, pkt_len);
+               pkt_len = RSI_RCV_BUFFER_LEN * 4;
+       }
+
+       pkt_len -= extended_desc;
+       skb = dev_alloc_skb(pkt_len + FRAME_DESC_SZ);
+       if (skb == NULL)
+               return NULL;
+
+       payload_offset = (extended_desc + FRAME_DESC_SZ);
+       skb_put(skb, pkt_len);
+       memcpy((skb->data), (buffer + payload_offset), skb->len);
+
+       info = IEEE80211_SKB_CB(skb);
+       rx_params = (struct skb_info *)info->driver_data;
+       rx_params->rssi = rsi_get_rssi(buffer);
+       rx_params->channel = rsi_get_connected_channel(common->priv);
+
+       return skb;
+}
+
+/**
+ * rsi_read_pkt() - This function reads frames from the card.
+ * @common: Pointer to the driver private structure.
+ * @rcv_pkt_len: Received pkt length. In case of USB it is 0.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len)
+{
+       u8 *frame_desc = NULL, extended_desc = 0;
+       u32 index, length = 0, queueno = 0;
+       u16 actual_length = 0, offset;
+       struct sk_buff *skb = NULL;
+
+       index = 0;
+       do {
+               frame_desc = &common->rx_data_pkt[index];
+               actual_length = *(u16 *)&frame_desc[0];
+               offset = *(u16 *)&frame_desc[2];
+
+               queueno = rsi_get_queueno(frame_desc, offset);
+               length = rsi_get_length(frame_desc, offset);
+               extended_desc = rsi_get_extended_desc(frame_desc, offset);
+
+               switch (queueno) {
+               case RSI_WIFI_DATA_Q:
+                       skb = rsi_prepare_skb(common,
+                                             (frame_desc + offset),
+                                             length,
+                                             extended_desc);
+                       if (skb == NULL)
+                               goto fail;
+
+                       rsi_indicate_pkt_to_os(common, skb);
+                       break;
+
+               case RSI_WIFI_MGMT_Q:
+                       rsi_mgmt_pkt_recv(common, (frame_desc + offset));
+                       break;
+
+               default:
+                       rsi_dbg(ERR_ZONE, "%s: pkt from invalid queue: %d\n",
+                               __func__,   queueno);
+                       goto fail;
+               }
+
+               index  += actual_length;
+               rcv_pkt_len -= actual_length;
+       } while (rcv_pkt_len > 0);
+
+       return 0;
+fail:
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(rsi_read_pkt);
+
+/**
+ * rsi_tx_scheduler_thread() - This function is a kernel thread to send the
+ *                            packets to the device.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+static void rsi_tx_scheduler_thread(struct rsi_common *common)
+{
+       struct rsi_hw *adapter = common->priv;
+       u32 timeout = EVENT_WAIT_FOREVER;
+
+       do {
+               if (adapter->determine_event_timeout)
+                       timeout = adapter->determine_event_timeout(adapter);
+               rsi_wait_event(&common->tx_thread.event, timeout);
+               rsi_reset_event(&common->tx_thread.event);
+
+               if (common->init_done)
+                       rsi_core_qos_processor(common);
+       } while (atomic_read(&common->tx_thread.thread_done) == 0);
+       complete_and_exit(&common->tx_thread.completion, 0);
+}
+
+/**
+ * rsi_91x_init() - This function initializes os interface operations.
+ * @void: Void.
+ *
+ * Return: Pointer to the adapter structure on success, NULL on failure .
+ */
+struct rsi_hw *rsi_91x_init(void)
+{
+       struct rsi_hw *adapter = NULL;
+       struct rsi_common *common = NULL;
+       u8 ii = 0;
+
+       adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+       if (!adapter)
+               return NULL;
+
+       adapter->priv = kzalloc(sizeof(*common), GFP_KERNEL);
+       if (adapter->priv == NULL) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of memory\n",
+                       __func__);
+               kfree(adapter);
+               return NULL;
+       } else {
+               common = adapter->priv;
+               common->priv = adapter;
+       }
+
+       for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
+               skb_queue_head_init(&common->tx_queue[ii]);
+
+       rsi_init_event(&common->tx_thread.event);
+       mutex_init(&common->mutex);
+       mutex_init(&common->tx_rxlock);
+
+       if (rsi_create_kthread(common,
+                              &common->tx_thread,
+                              rsi_tx_scheduler_thread,
+                              "Tx-Thread")) {
+               rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
+               goto err;
+       }
+
+       common->init_done = true;
+       return adapter;
+
+err:
+       kfree(common);
+       kfree(adapter);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(rsi_91x_init);
+
+/**
+ * rsi_91x_deinit() - This function de-intializes os intf operations.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_91x_deinit(struct rsi_hw *adapter)
+{
+       struct rsi_common *common = adapter->priv;
+       u8 ii;
+
+       rsi_dbg(INFO_ZONE, "%s: Performing deinit os ops\n", __func__);
+
+       rsi_kill_thread(&common->tx_thread);
+
+       for (ii = 0; ii < NUM_SOFT_QUEUES; ii++)
+               skb_queue_purge(&common->tx_queue[ii]);
+
+       common->init_done = false;
+
+       kfree(common);
+       kfree(adapter->rsi_dev);
+       kfree(adapter);
+}
+EXPORT_SYMBOL_GPL(rsi_91x_deinit);
+
+/**
+ * rsi_91x_hal_module_init() - This function is invoked when the module is
+ *                            loaded into the kernel.
+ *                            It registers the client driver.
+ * @void: Void.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_91x_hal_module_init(void)
+{
+       rsi_dbg(INIT_ZONE, "%s: Module init called\n", __func__);
+       return 0;
+}
+
+/**
+ * rsi_91x_hal_module_exit() - This function is called at the time of
+ *                            removing/unloading the module.
+ *                            It unregisters the client driver.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_91x_hal_module_exit(void)
+{
+       rsi_dbg(INIT_ZONE, "%s: Module exit called\n", __func__);
+}
+
+module_init(rsi_91x_hal_module_init);
+module_exit(rsi_91x_hal_module_exit);
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Station driver for RSI 91x devices");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
new file mode 100644 (file)
index 0000000..2361a68
--- /dev/null
@@ -0,0 +1,1304 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "rsi_mgmt.h"
+#include "rsi_common.h"
+
+static struct bootup_params boot_params_20 = {
+       .magic_number = cpu_to_le16(0x5aa5),
+       .crystal_good_time = 0x0,
+       .valid = cpu_to_le32(VALID_20),
+       .reserved_for_valids = 0x0,
+       .bootup_mode_info = 0x0,
+       .digital_loop_back_params = 0x0,
+       .rtls_timestamp_en = 0x0,
+       .host_spi_intr_cfg = 0x0,
+       .device_clk_info = {{
+               .pll_config_g = {
+                       .tapll_info_g = {
+                               .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+                                             (TA_PLL_M_VAL_20)),
+                               .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+                       },
+                       .pll960_info_g = {
+                               .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+                                                        (PLL960_N_VAL_20)),
+                               .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+                               .pll_reg_3 = 0x0,
+                       },
+                       .afepll_info_g = {
+                               .pll_reg = cpu_to_le16(0x9f0),
+                       }
+               },
+               .switch_clk_g = {
+                       .switch_clk_info = cpu_to_le16(BIT(3)),
+                       .bbp_lmac_clk_reg_val = cpu_to_le16(0x121),
+                       .umac_clock_reg_config = 0x0,
+                       .qspi_uart_clock_reg_config = 0x0
+               }
+       },
+       {
+               .pll_config_g = {
+                       .tapll_info_g = {
+                               .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+                                                        (TA_PLL_M_VAL_20)),
+                               .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+                       },
+                       .pll960_info_g = {
+                               .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+                                                        (PLL960_N_VAL_20)),
+                               .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+                               .pll_reg_3 = 0x0,
+                       },
+                       .afepll_info_g = {
+                               .pll_reg = cpu_to_le16(0x9f0),
+                       }
+               },
+               .switch_clk_g = {
+                       .switch_clk_info = 0x0,
+                       .bbp_lmac_clk_reg_val = 0x0,
+                       .umac_clock_reg_config = 0x0,
+                       .qspi_uart_clock_reg_config = 0x0
+               }
+       },
+       {
+               .pll_config_g = {
+                       .tapll_info_g = {
+                               .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_20 << 8)|
+                                                        (TA_PLL_M_VAL_20)),
+                               .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_20),
+                       },
+                       .pll960_info_g = {
+                               .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_20 << 8)|
+                                                        (PLL960_N_VAL_20)),
+                               .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_20),
+                               .pll_reg_3 = 0x0,
+                       },
+                       .afepll_info_g = {
+                               .pll_reg = cpu_to_le16(0x9f0),
+                       }
+               },
+               .switch_clk_g = {
+                       .switch_clk_info = 0x0,
+                       .bbp_lmac_clk_reg_val = 0x0,
+                       .umac_clock_reg_config = 0x0,
+                       .qspi_uart_clock_reg_config = 0x0
+               }
+       } },
+       .buckboost_wakeup_cnt = 0x0,
+       .pmu_wakeup_wait = 0x0,
+       .shutdown_wait_time = 0x0,
+       .pmu_slp_clkout_sel = 0x0,
+       .wdt_prog_value = 0x0,
+       .wdt_soc_rst_delay = 0x0,
+       .dcdc_operation_mode = 0x0,
+       .soc_reset_wait_cnt = 0x0
+};
+
+static struct bootup_params boot_params_40 = {
+       .magic_number = cpu_to_le16(0x5aa5),
+       .crystal_good_time = 0x0,
+       .valid = cpu_to_le32(VALID_40),
+       .reserved_for_valids = 0x0,
+       .bootup_mode_info = 0x0,
+       .digital_loop_back_params = 0x0,
+       .rtls_timestamp_en = 0x0,
+       .host_spi_intr_cfg = 0x0,
+       .device_clk_info = {{
+               .pll_config_g = {
+                       .tapll_info_g = {
+                               .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+                                                        (TA_PLL_M_VAL_40)),
+                               .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+                       },
+                       .pll960_info_g = {
+                               .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+                                                        (PLL960_N_VAL_40)),
+                               .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+                               .pll_reg_3 = 0x0,
+                       },
+                       .afepll_info_g = {
+                               .pll_reg = cpu_to_le16(0x9f0),
+                       }
+               },
+               .switch_clk_g = {
+                       .switch_clk_info = cpu_to_le16(0x09),
+                       .bbp_lmac_clk_reg_val = cpu_to_le16(0x1121),
+                       .umac_clock_reg_config = cpu_to_le16(0x48),
+                       .qspi_uart_clock_reg_config = 0x0
+               }
+       },
+       {
+               .pll_config_g = {
+                       .tapll_info_g = {
+                               .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+                                                        (TA_PLL_M_VAL_40)),
+                               .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+                       },
+                       .pll960_info_g = {
+                               .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+                                                        (PLL960_N_VAL_40)),
+                               .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+                               .pll_reg_3 = 0x0,
+                       },
+                       .afepll_info_g = {
+                               .pll_reg = cpu_to_le16(0x9f0),
+                       }
+               },
+               .switch_clk_g = {
+                       .switch_clk_info = 0x0,
+                       .bbp_lmac_clk_reg_val = 0x0,
+                       .umac_clock_reg_config = 0x0,
+                       .qspi_uart_clock_reg_config = 0x0
+               }
+       },
+       {
+               .pll_config_g = {
+                       .tapll_info_g = {
+                               .pll_reg_1 = cpu_to_le16((TA_PLL_N_VAL_40 << 8)|
+                                                        (TA_PLL_M_VAL_40)),
+                               .pll_reg_2 = cpu_to_le16(TA_PLL_P_VAL_40),
+                       },
+                       .pll960_info_g = {
+                               .pll_reg_1 = cpu_to_le16((PLL960_P_VAL_40 << 8)|
+                                                        (PLL960_N_VAL_40)),
+                               .pll_reg_2 = cpu_to_le16(PLL960_M_VAL_40),
+                               .pll_reg_3 = 0x0,
+                       },
+                       .afepll_info_g = {
+                               .pll_reg = cpu_to_le16(0x9f0),
+                       }
+               },
+               .switch_clk_g = {
+                       .switch_clk_info = 0x0,
+                       .bbp_lmac_clk_reg_val = 0x0,
+                       .umac_clock_reg_config = 0x0,
+                       .qspi_uart_clock_reg_config = 0x0
+               }
+       } },
+       .buckboost_wakeup_cnt = 0x0,
+       .pmu_wakeup_wait = 0x0,
+       .shutdown_wait_time = 0x0,
+       .pmu_slp_clkout_sel = 0x0,
+       .wdt_prog_value = 0x0,
+       .wdt_soc_rst_delay = 0x0,
+       .dcdc_operation_mode = 0x0,
+       .soc_reset_wait_cnt = 0x0
+};
+
+static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
+
+/**
+ * rsi_set_default_parameters() - This function sets default parameters.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: none
+ */
+static void rsi_set_default_parameters(struct rsi_common *common)
+{
+       common->band = IEEE80211_BAND_2GHZ;
+       common->channel_width = BW_20MHZ;
+       common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+       common->channel = 1;
+       common->min_rate = 0xffff;
+       common->fsm_state = FSM_CARD_NOT_READY;
+       common->iface_down = true;
+}
+
+/**
+ * rsi_set_contention_vals() - This function sets the contention values for the
+ *                            backoff procedure.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+static void rsi_set_contention_vals(struct rsi_common *common)
+{
+       u8 ii = 0;
+
+       for (; ii < NUM_EDCA_QUEUES; ii++) {
+               common->tx_qinfo[ii].wme_params =
+                       (((common->edca_params[ii].cw_min / 2) +
+                         (common->edca_params[ii].aifs)) *
+                         WMM_SHORT_SLOT_TIME + SIFS_DURATION);
+               common->tx_qinfo[ii].weight = common->tx_qinfo[ii].wme_params;
+               common->tx_qinfo[ii].pkt_contended = 0;
+       }
+}
+
+/**
+ * rsi_send_internal_mgmt_frame() - This function sends management frames to
+ *                                 firmware.Also schedules packet to queue
+ *                                 for transmission.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_send_internal_mgmt_frame(struct rsi_common *common,
+                                       struct sk_buff *skb)
+{
+       struct skb_info *tx_params;
+
+       if (skb == NULL) {
+               rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__);
+               return -ENOMEM;
+       }
+       tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data;
+       tx_params->flags |= INTERNAL_MGMT_PKT;
+       skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb);
+       rsi_set_event(&common->tx_thread.event);
+       return 0;
+}
+
+/**
+ * rsi_load_radio_caps() - This function is used to send radio capabilities
+ *                        values to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_load_radio_caps(struct rsi_common *common)
+{
+       struct rsi_radio_caps *radio_caps;
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_hw *hw = adapter->hw;
+       u16 inx = 0;
+       u8 ii;
+       u8 radio_id = 0;
+       u16 gc[20] = {0xf0, 0xf0, 0xf0, 0xf0,
+                     0xf0, 0xf0, 0xf0, 0xf0,
+                     0xf0, 0xf0, 0xf0, 0xf0,
+                     0xf0, 0xf0, 0xf0, 0xf0,
+                     0xf0, 0xf0, 0xf0, 0xf0};
+       struct ieee80211_conf *conf = &hw->conf;
+       struct sk_buff *skb;
+
+       rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__);
+
+       skb = dev_alloc_skb(sizeof(struct rsi_radio_caps));
+
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, sizeof(struct rsi_radio_caps));
+       radio_caps = (struct rsi_radio_caps *)skb->data;
+
+       radio_caps->desc_word[1] = cpu_to_le16(RADIO_CAPABILITIES);
+       radio_caps->desc_word[4] = cpu_to_le16(RSI_RF_TYPE << 8);
+
+       if (common->channel_width == BW_40MHZ) {
+               radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ);
+               radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ);
+               if (common->channel_width) {
+                       radio_caps->desc_word[5] =
+                               cpu_to_le16(common->channel_width << 12);
+                       radio_caps->desc_word[5] |= cpu_to_le16(FULL40M_ENABLE);
+               }
+
+               if (conf_is_ht40_minus(conf)) {
+                       radio_caps->desc_word[5] = 0;
+                       radio_caps->desc_word[5] |=
+                               cpu_to_le16(LOWER_20_ENABLE);
+                       radio_caps->desc_word[5] |=
+                               cpu_to_le16(LOWER_20_ENABLE >> 12);
+               }
+
+               if (conf_is_ht40_plus(conf)) {
+                       radio_caps->desc_word[5] = 0;
+                       radio_caps->desc_word[5] |=
+                               cpu_to_le16(UPPER_20_ENABLE);
+                       radio_caps->desc_word[5] |=
+                               cpu_to_le16(UPPER_20_ENABLE >> 12);
+               }
+       }
+
+       radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8);
+
+       for (ii = 0; ii < MAX_HW_QUEUES; ii++) {
+               radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3);
+               radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f);
+               radio_caps->qos_params[ii].aifsn_val_q = cpu_to_le16(2);
+               radio_caps->qos_params[ii].txop_q = 0;
+       }
+
+       for (ii = 0; ii < MAX_HW_QUEUES - 4; ii++) {
+               radio_caps->qos_params[ii].cont_win_min_q =
+                       cpu_to_le16(common->edca_params[ii].cw_min);
+               radio_caps->qos_params[ii].cont_win_max_q =
+                       cpu_to_le16(common->edca_params[ii].cw_max);
+               radio_caps->qos_params[ii].aifsn_val_q =
+                       cpu_to_le16((common->edca_params[ii].aifs) << 8);
+               radio_caps->qos_params[ii].txop_q =
+                       cpu_to_le16(common->edca_params[ii].txop);
+       }
+
+       memcpy(&common->rate_pwr[0], &gc[0], 40);
+       for (ii = 0; ii < 20; ii++)
+               radio_caps->gcpd_per_rate[inx++] =
+                       cpu_to_le16(common->rate_pwr[ii]  & 0x00FF);
+
+       radio_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_radio_caps) -
+                                               FRAME_DESC_SZ) |
+                                              (RSI_WIFI_MGMT_Q << 12));
+
+
+       skb_put(skb, (sizeof(struct rsi_radio_caps)));
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to received packet.
+ * @msg_len: Length of the recieved packet.
+ * @type: Type of recieved packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
+                               u8 *msg,
+                               s32 msg_len,
+                               u8 type)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_tx_info *info;
+       struct skb_info *rx_params;
+       u8 pad_bytes = msg[4];
+       u8 pkt_recv;
+       struct sk_buff *skb;
+       char *buffer;
+
+       if (type == RX_DOT11_MGMT) {
+               if (!adapter->sc_nvifs)
+                       return -ENOLINK;
+
+               msg_len -= pad_bytes;
+               if ((msg_len <= 0) || (!msg)) {
+                       rsi_dbg(MGMT_RX_ZONE,
+                               "%s: Invalid rx msg of len = %d\n",
+                               __func__, msg_len);
+                       return -EINVAL;
+               }
+
+               skb = dev_alloc_skb(msg_len);
+               if (!skb) {
+                       rsi_dbg(ERR_ZONE, "%s: Failed to allocate skb\n",
+                               __func__);
+                       return -ENOMEM;
+               }
+
+               buffer = skb_put(skb, msg_len);
+
+               memcpy(buffer,
+                      (u8 *)(msg +  FRAME_DESC_SZ + pad_bytes),
+                      msg_len);
+
+               pkt_recv = buffer[0];
+
+               info = IEEE80211_SKB_CB(skb);
+               rx_params = (struct skb_info *)info->driver_data;
+               rx_params->rssi = rsi_get_rssi(msg);
+               rx_params->channel = rsi_get_channel(msg);
+               rsi_indicate_pkt_to_os(common, skb);
+       } else {
+               rsi_dbg(MGMT_TX_ZONE, "%s: Internal Packet\n", __func__);
+       }
+
+       return 0;
+}
+
+/**
+ * rsi_hal_send_sta_notify_frame() - This function sends the station notify
+ *                                  frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
+ * @notify_event: Notification about station connection.
+ * @bssid: bssid.
+ * @qos_enable: Qos is enabled.
+ * @aid: Aid (unique for all STA).
+ *
+ * Return: status: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_hal_send_sta_notify_frame(struct rsi_common *common,
+                                        u8 opmode,
+                                        u8 notify_event,
+                                        const unsigned char *bssid,
+                                        u8 qos_enable,
+                                        u16 aid)
+{
+       struct sk_buff *skb = NULL;
+       struct rsi_peer_notify *peer_notify;
+       u16 vap_id = 0;
+       int status;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__);
+
+       skb = dev_alloc_skb(sizeof(struct rsi_peer_notify));
+
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, sizeof(struct rsi_peer_notify));
+       peer_notify = (struct rsi_peer_notify *)skb->data;
+
+       peer_notify->command = cpu_to_le16(opmode << 1);
+
+       switch (notify_event) {
+       case STA_CONNECTED:
+               peer_notify->command |= cpu_to_le16(RSI_ADD_PEER);
+               break;
+       case STA_DISCONNECTED:
+               peer_notify->command |= cpu_to_le16(RSI_DELETE_PEER);
+               break;
+       default:
+               break;
+       }
+
+       peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4);
+       ether_addr_copy(peer_notify->mac_addr, bssid);
+
+       peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0);
+
+       peer_notify->desc_word[0] =
+               cpu_to_le16((sizeof(struct rsi_peer_notify) - FRAME_DESC_SZ) |
+                           (RSI_WIFI_MGMT_Q << 12));
+       peer_notify->desc_word[1] = cpu_to_le16(PEER_NOTIFY);
+       peer_notify->desc_word[7] |= cpu_to_le16(vap_id << 8);
+
+       skb_put(skb, sizeof(struct rsi_peer_notify));
+
+       status = rsi_send_internal_mgmt_frame(common, skb);
+
+       if (!status && qos_enable) {
+               rsi_set_contention_vals(common);
+               status = rsi_load_radio_caps(common);
+       }
+       return status;
+}
+
+/**
+ * rsi_send_aggregation_params_frame() - This function sends the ampdu
+ *                                      indication frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @tid: traffic identifier.
+ * @ssn: ssn.
+ * @buf_size: buffer size.
+ * @event: notification about station connection.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+int rsi_send_aggregation_params_frame(struct rsi_common *common,
+                                     u16 tid,
+                                     u16 ssn,
+                                     u8 buf_size,
+                                     u8 event)
+{
+       struct sk_buff *skb = NULL;
+       struct rsi_mac_frame *mgmt_frame;
+       u8 peer_id = 0;
+
+       skb = dev_alloc_skb(FRAME_DESC_SZ);
+
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, FRAME_DESC_SZ);
+       mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__);
+
+       mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+       mgmt_frame->desc_word[1] = cpu_to_le16(AMPDU_IND);
+
+       if (event == STA_TX_ADDBA_DONE) {
+               mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
+               mgmt_frame->desc_word[5] = cpu_to_le16(buf_size);
+               mgmt_frame->desc_word[7] =
+               cpu_to_le16((tid | (START_AMPDU_AGGR << 4) | (peer_id << 8)));
+       } else if (event == STA_RX_ADDBA_DONE) {
+               mgmt_frame->desc_word[4] = cpu_to_le16(ssn);
+               mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+                                                      (START_AMPDU_AGGR << 4) |
+                                                      (RX_BA_INDICATION << 5) |
+                                                      (peer_id << 8));
+       } else if (event == STA_TX_DELBA) {
+               mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+                                                      (STOP_AMPDU_AGGR << 4) |
+                                                      (peer_id << 8));
+       } else if (event == STA_RX_DELBA) {
+               mgmt_frame->desc_word[7] = cpu_to_le16(tid |
+                                                      (STOP_AMPDU_AGGR << 4) |
+                                                      (RX_BA_INDICATION << 5) |
+                                                      (peer_id << 8));
+       }
+
+       skb_put(skb, FRAME_DESC_SZ);
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_program_bb_rf() - This function starts base band and RF programming.
+ *                      This is called after initial configurations are done.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+static int rsi_program_bb_rf(struct rsi_common *common)
+{
+       struct sk_buff *skb;
+       struct rsi_mac_frame *mgmt_frame;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__);
+
+       skb = dev_alloc_skb(FRAME_DESC_SZ);
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, FRAME_DESC_SZ);
+       mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+       mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+       mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA);
+       mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint << 8);
+
+       if (common->rf_reset) {
+               mgmt_frame->desc_word[7] =  cpu_to_le16(RF_RESET_ENABLE);
+               rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n",
+                       __func__);
+               common->rf_reset = 0;
+       }
+       common->bb_rf_prog_count = 1;
+       mgmt_frame->desc_word[7] |= cpu_to_le16(PUT_BBP_RESET |
+                                    BBP_REG_WRITE | (RSI_RF_TYPE << 4));
+       skb_put(skb, FRAME_DESC_SZ);
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_set_vap_capabilities() - This function send vap capability to firmware.
+ * @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
+ *
+ * Return: 0 on success, corresponding negative error code on failure.
+ */
+int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
+{
+       struct sk_buff *skb = NULL;
+       struct rsi_vap_caps *vap_caps;
+       u16 vap_id = 0;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__);
+
+       skb = dev_alloc_skb(sizeof(struct rsi_vap_caps));
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, sizeof(struct rsi_vap_caps));
+       vap_caps = (struct rsi_vap_caps *)skb->data;
+
+       vap_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_vap_caps) -
+                                            FRAME_DESC_SZ) |
+                                            (RSI_WIFI_MGMT_Q << 12));
+       vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES);
+       vap_caps->desc_word[4] = cpu_to_le16(mode |
+                                            (common->channel_width << 8));
+       vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) |
+                                            (common->mac_id << 4) |
+                                            common->radio_id);
+
+       memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN);
+       vap_caps->keep_alive_period = cpu_to_le16(90);
+       vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD);
+
+       vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
+       vap_caps->default_mgmt_rate = 0;
+       if (conf_is_ht40(&common->priv->hw->conf)) {
+               vap_caps->default_ctrl_rate =
+                               cpu_to_le32(RSI_RATE_6 | FULL40M_ENABLE << 16);
+       } else {
+               vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
+       }
+       vap_caps->default_data_rate = 0;
+       vap_caps->beacon_interval = cpu_to_le16(200);
+       vap_caps->dtim_period = cpu_to_le16(4);
+
+       skb_put(skb, sizeof(*vap_caps));
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_hal_load_key() - This function is used to load keys within the firmware.
+ * @common: Pointer to the driver private structure.
+ * @data: Pointer to the key data.
+ * @key_len: Key length to be loaded.
+ * @key_type: Type of key: GROUP/PAIRWISE.
+ * @key_id: Key index.
+ * @cipher: Type of cipher used.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_hal_load_key(struct rsi_common *common,
+                    u8 *data,
+                    u16 key_len,
+                    u8 key_type,
+                    u8 key_id,
+                    u32 cipher)
+{
+       struct sk_buff *skb = NULL;
+       struct rsi_set_key *set_key;
+       u16 key_descriptor = 0;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__);
+
+       skb = dev_alloc_skb(sizeof(struct rsi_set_key));
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, sizeof(struct rsi_set_key));
+       set_key = (struct rsi_set_key *)skb->data;
+
+       if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
+           (cipher == WLAN_CIPHER_SUITE_WEP104)) {
+               key_len += 1;
+               key_descriptor |= BIT(2);
+               if (key_len >= 13)
+                       key_descriptor |= BIT(3);
+       } else if (cipher != KEY_TYPE_CLEAR) {
+               key_descriptor |= BIT(4);
+               if (key_type == RSI_PAIRWISE_KEY)
+                       key_id = 0;
+               if (cipher == WLAN_CIPHER_SUITE_TKIP)
+                       key_descriptor |= BIT(5);
+       }
+       key_descriptor |= (key_type | BIT(13) | (key_id << 14));
+
+       set_key->desc_word[0] = cpu_to_le16((sizeof(struct rsi_set_key) -
+                                           FRAME_DESC_SZ) |
+                                           (RSI_WIFI_MGMT_Q << 12));
+       set_key->desc_word[1] = cpu_to_le16(SET_KEY_REQ);
+       set_key->desc_word[4] = cpu_to_le16(key_descriptor);
+
+       if ((cipher == WLAN_CIPHER_SUITE_WEP40) ||
+           (cipher == WLAN_CIPHER_SUITE_WEP104)) {
+               memcpy(&set_key->key[key_id][1],
+                      data,
+                      key_len * 2);
+       } else {
+               memcpy(&set_key->key[0][0], data, key_len);
+       }
+
+       memcpy(set_key->tx_mic_key, &data[16], 8);
+       memcpy(set_key->rx_mic_key, &data[24], 8);
+
+       skb_put(skb, sizeof(struct rsi_set_key));
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/*
+ * rsi_load_bootup_params() - This function send bootup params to the firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static u8 rsi_load_bootup_params(struct rsi_common *common)
+{
+       struct sk_buff *skb;
+       struct rsi_boot_params *boot_params;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__);
+       skb = dev_alloc_skb(sizeof(struct rsi_boot_params));
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, sizeof(struct rsi_boot_params));
+       boot_params = (struct rsi_boot_params *)skb->data;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s:\n", __func__);
+
+       if (common->channel_width == BW_40MHZ) {
+               memcpy(&boot_params->bootup_params,
+                      &boot_params_40,
+                      sizeof(struct bootup_params));
+               rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__,
+                       UMAC_CLK_40BW);
+               boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40BW);
+       } else {
+               memcpy(&boot_params->bootup_params,
+                      &boot_params_20,
+                      sizeof(struct bootup_params));
+               if (boot_params_20.valid != cpu_to_le32(VALID_20)) {
+                       boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_20BW);
+                       rsi_dbg(MGMT_TX_ZONE,
+                               "%s: Packet 20MHZ <=== %d\n", __func__,
+                               UMAC_CLK_20BW);
+               } else {
+                       boot_params->desc_word[7] = cpu_to_le16(UMAC_CLK_40MHZ);
+                       rsi_dbg(MGMT_TX_ZONE,
+                               "%s: Packet 20MHZ <=== %d\n", __func__,
+                               UMAC_CLK_40MHZ);
+               }
+       }
+
+       /**
+        * Bit{0:11} indicates length of the Packet
+        * Bit{12:15} indicates host queue number
+        */
+       boot_params->desc_word[0] = cpu_to_le16(sizeof(struct bootup_params) |
+                                   (RSI_WIFI_MGMT_Q << 12));
+       boot_params->desc_word[1] = cpu_to_le16(BOOTUP_PARAMS_REQUEST);
+
+       skb_put(skb, sizeof(struct rsi_boot_params));
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_send_reset_mac() - This function prepares reset MAC request and sends an
+ *                       internal management frame to indicate it to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static int rsi_send_reset_mac(struct rsi_common *common)
+{
+       struct sk_buff *skb;
+       struct rsi_mac_frame *mgmt_frame;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending reset MAC frame\n", __func__);
+
+       skb = dev_alloc_skb(FRAME_DESC_SZ);
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, FRAME_DESC_SZ);
+       mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+       mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+       mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ);
+       mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8);
+
+       skb_put(skb, FRAME_DESC_SZ);
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_set_channel() - This function programs the channel.
+ * @common: Pointer to the driver private structure.
+ * @channel: Channel value to be set.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+int rsi_set_channel(struct rsi_common *common, u16 channel)
+{
+       struct sk_buff *skb = NULL;
+       struct rsi_mac_frame *mgmt_frame;
+
+       rsi_dbg(MGMT_TX_ZONE,
+               "%s: Sending scan req frame\n", __func__);
+
+       skb = dev_alloc_skb(FRAME_DESC_SZ);
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, FRAME_DESC_SZ);
+       mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+       if (common->band == IEEE80211_BAND_5GHZ) {
+               if ((channel >= 36) && (channel <= 64))
+                       channel = ((channel - 32) / 4);
+               else if ((channel > 64) && (channel <= 140))
+                       channel = ((channel - 102) / 4) + 8;
+               else if (channel >= 149)
+                       channel = ((channel - 151) / 4) + 18;
+               else
+                       return -EINVAL;
+       } else {
+               if (channel > 14) {
+                       rsi_dbg(ERR_ZONE, "%s: Invalid chno %d, band = %d\n",
+                               __func__, channel, common->band);
+                       return -EINVAL;
+               }
+       }
+
+       mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+       mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
+       mgmt_frame->desc_word[4] = cpu_to_le16(channel);
+
+       mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET |
+                                              BBP_REG_WRITE |
+                                              (RSI_RF_TYPE << 4));
+
+       mgmt_frame->desc_word[5] = cpu_to_le16(0x01);
+
+       if (common->channel_width == BW_40MHZ)
+               mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8);
+
+       common->channel = channel;
+
+       skb_put(skb, FRAME_DESC_SZ);
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_compare() - This function is used to compare two integers
+ * @a: pointer to the first integer
+ * @b: pointer to the second integer
+ *
+ * Return: 0 if both are equal, -1 if the first is smaller, else 1
+ */
+static int rsi_compare(const void *a, const void *b)
+{
+       u16 _a = *(const u16 *)(a);
+       u16 _b = *(const u16 *)(b);
+
+       if (_a > _b)
+               return -1;
+
+       if (_a < _b)
+               return 1;
+
+       return 0;
+}
+
+/**
+ * rsi_map_rates() - This function is used to map selected rates to hw rates.
+ * @rate: The standard rate to be mapped.
+ * @offset: Offset that will be returned.
+ *
+ * Return: 0 if it is a mcs rate, else 1
+ */
+static bool rsi_map_rates(u16 rate, int *offset)
+{
+       int kk;
+       for (kk = 0; kk < ARRAY_SIZE(rsi_mcsrates); kk++) {
+               if (rate == mcs[kk]) {
+                       *offset = kk;
+                       return false;
+               }
+       }
+
+       for (kk = 0; kk < ARRAY_SIZE(rsi_rates); kk++) {
+               if (rate == rsi_rates[kk].bitrate / 5) {
+                       *offset = kk;
+                       break;
+               }
+       }
+       return true;
+}
+
+/**
+ * rsi_send_auto_rate_request() - This function is to set rates for connection
+ *                               and send autorate request to firmware.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, corresponding error code on failure.
+ */
+static int rsi_send_auto_rate_request(struct rsi_common *common)
+{
+       struct sk_buff *skb;
+       struct rsi_auto_rate *auto_rate;
+       int ii = 0, jj = 0, kk = 0;
+       struct ieee80211_hw *hw = common->priv->hw;
+       u8 band = hw->conf.chandef.chan->band;
+       u8 num_supported_rates = 0;
+       u8 rate_offset = 0;
+       u32 rate_bitmap = common->bitrate_mask[band];
+
+       u16 *selected_rates, min_rate;
+
+       skb = dev_alloc_skb(sizeof(struct rsi_auto_rate));
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       selected_rates = kmalloc(2 * RSI_TBL_SZ, GFP_KERNEL);
+       if (!selected_rates) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, sizeof(struct rsi_auto_rate));
+       memset(selected_rates, 0, 2 * RSI_TBL_SZ);
+
+       auto_rate = (struct rsi_auto_rate *)skb->data;
+
+       auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f));
+       auto_rate->collision_tolerance = cpu_to_le16(3);
+       auto_rate->failure_limit = cpu_to_le16(3);
+       auto_rate->initial_boundary = cpu_to_le16(3);
+       auto_rate->max_threshold_limt = cpu_to_le16(27);
+
+       auto_rate->desc_word[1] = cpu_to_le16(AUTO_RATE_IND);
+
+       if (common->channel_width == BW_40MHZ)
+               auto_rate->desc_word[7] |= cpu_to_le16(1);
+
+       if (band == IEEE80211_BAND_2GHZ)
+               min_rate = STD_RATE_01;
+       else
+               min_rate = STD_RATE_06;
+
+       for (ii = 0, jj = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
+               if (rate_bitmap & BIT(ii)) {
+                       selected_rates[jj++] = (rsi_rates[ii].bitrate / 5);
+                       rate_offset++;
+               }
+       }
+       num_supported_rates = jj;
+
+       if (common->vif_info[0].is_ht) {
+               for (ii = 0; ii < ARRAY_SIZE(mcs); ii++)
+                       selected_rates[jj++] = mcs[ii];
+               num_supported_rates += ARRAY_SIZE(mcs);
+               rate_offset += ARRAY_SIZE(mcs);
+       }
+
+       if (rate_offset < (RSI_TBL_SZ / 2) - 1) {
+               for (ii = jj; ii < (RSI_TBL_SZ / 2); ii++) {
+                       selected_rates[jj++] = min_rate;
+                       rate_offset++;
+               }
+       }
+
+       sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
+
+       /* mapping the rates to RSI rates */
+       for (ii = 0; ii < jj; ii++) {
+               if (rsi_map_rates(selected_rates[ii], &kk)) {
+                       auto_rate->supported_rates[ii] =
+                               cpu_to_le16(rsi_rates[kk].hw_value);
+               } else {
+                       auto_rate->supported_rates[ii] =
+                               cpu_to_le16(rsi_mcsrates[kk]);
+               }
+       }
+
+       /* loading HT rates in the bottom half of the auto rate table */
+       if (common->vif_info[0].is_ht) {
+               if (common->vif_info[0].sgi)
+                       auto_rate->supported_rates[rate_offset++] =
+                               cpu_to_le16(RSI_RATE_MCS7_SG);
+
+               for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1;
+                    ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) {
+                       if (common->vif_info[0].sgi)
+                               auto_rate->supported_rates[ii++] =
+                                       cpu_to_le16(rsi_mcsrates[kk] | BIT(9));
+                       auto_rate->supported_rates[ii] =
+                               cpu_to_le16(rsi_mcsrates[kk--]);
+               }
+
+               for (; ii < RSI_TBL_SZ; ii++) {
+                       auto_rate->supported_rates[ii] =
+                               cpu_to_le16(rsi_mcsrates[0]);
+               }
+       }
+
+       auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2);
+       auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2);
+       auto_rate->desc_word[7] |= cpu_to_le16(0 << 8);
+       num_supported_rates *= 2;
+
+       auto_rate->desc_word[0] = cpu_to_le16((sizeof(*auto_rate) -
+                                              FRAME_DESC_SZ) |
+                                              (RSI_WIFI_MGMT_Q << 12));
+
+       skb_put(skb,
+               sizeof(struct rsi_auto_rate));
+       kfree(selected_rates);
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_inform_bss_status() - This function informs about bss status with the
+ *                          help of sta notify params by sending an internal
+ *                          management frame to firmware.
+ * @common: Pointer to the driver private structure.
+ * @status: Bss status type.
+ * @bssid: Bssid.
+ * @qos_enable: Qos is enabled.
+ * @aid: Aid (unique for all STAs).
+ *
+ * Return: None.
+ */
+void rsi_inform_bss_status(struct rsi_common *common,
+                          u8 status,
+                          const unsigned char *bssid,
+                          u8 qos_enable,
+                          u16 aid)
+{
+       if (status) {
+               rsi_hal_send_sta_notify_frame(common,
+                                             NL80211_IFTYPE_STATION,
+                                             STA_CONNECTED,
+                                             bssid,
+                                             qos_enable,
+                                             aid);
+               if (common->min_rate == 0xffff)
+                       rsi_send_auto_rate_request(common);
+       } else {
+               rsi_hal_send_sta_notify_frame(common,
+                                             NL80211_IFTYPE_STATION,
+                                             STA_DISCONNECTED,
+                                             bssid,
+                                             qos_enable,
+                                             aid);
+       }
+}
+
+/**
+ * rsi_eeprom_read() - This function sends a frame to read the mac address
+ *                    from the eeprom.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_eeprom_read(struct rsi_common *common)
+{
+       struct rsi_mac_frame *mgmt_frame;
+       struct sk_buff *skb;
+
+       rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__);
+
+       skb = dev_alloc_skb(FRAME_DESC_SZ);
+       if (!skb) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       memset(skb->data, 0, FRAME_DESC_SZ);
+       mgmt_frame = (struct rsi_mac_frame *)skb->data;
+
+       /* FrameType */
+       mgmt_frame->desc_word[1] = cpu_to_le16(EEPROM_READ_TYPE);
+       mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
+       /* Number of bytes to read */
+       mgmt_frame->desc_word[3] = cpu_to_le16(ETH_ALEN +
+                                              WLAN_MAC_MAGIC_WORD_LEN +
+                                              WLAN_HOST_MODE_LEN +
+                                              WLAN_FW_VERSION_LEN);
+       /* Address to read */
+       mgmt_frame->desc_word[4] = cpu_to_le16(WLAN_MAC_EEPROM_ADDR);
+
+       skb_put(skb, FRAME_DESC_SZ);
+
+       return rsi_send_internal_mgmt_frame(common, skb);
+}
+
+/**
+ * rsi_handle_ta_confirm_type() - This function handles the confirm frames.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to received packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_handle_ta_confirm_type(struct rsi_common *common,
+                                     u8 *msg)
+{
+       u8 sub_type = (msg[15] & 0xff);
+
+       switch (sub_type) {
+       case BOOTUP_PARAMS_REQUEST:
+               rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n",
+                       __func__);
+               if (common->fsm_state == FSM_BOOT_PARAMS_SENT) {
+                       if (rsi_eeprom_read(common)) {
+                               common->fsm_state = FSM_CARD_NOT_READY;
+                               goto out;
+                       } else {
+                               common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
+                       }
+               } else {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Received bootup params cfm in %d state\n",
+                                __func__, common->fsm_state);
+                       return 0;
+               }
+               break;
+
+       case EEPROM_READ_TYPE:
+               if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) {
+                       if (msg[16] == MAGIC_WORD) {
+                               u8 offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN
+                                            + WLAN_MAC_MAGIC_WORD_LEN);
+                               memcpy(common->mac_addr,
+                                      &msg[offset],
+                                      ETH_ALEN);
+                               memcpy(&common->fw_ver,
+                                      &msg[offset + ETH_ALEN],
+                                      sizeof(struct version_info));
+
+                       } else {
+                               common->fsm_state = FSM_CARD_NOT_READY;
+                               break;
+                       }
+                       if (rsi_send_reset_mac(common))
+                               goto out;
+                       else
+                               common->fsm_state = FSM_RESET_MAC_SENT;
+               } else {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Received eeprom mac addr in %d state\n",
+                               __func__, common->fsm_state);
+                       return 0;
+               }
+               break;
+
+       case RESET_MAC_REQ:
+               if (common->fsm_state == FSM_RESET_MAC_SENT) {
+                       rsi_dbg(FSM_ZONE, "%s: Reset MAC cfm received\n",
+                               __func__);
+
+                       if (rsi_load_radio_caps(common))
+                               goto out;
+                       else
+                               common->fsm_state = FSM_RADIO_CAPS_SENT;
+               } else {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Received reset mac cfm in %d state\n",
+                                __func__, common->fsm_state);
+                       return 0;
+               }
+               break;
+
+       case RADIO_CAPABILITIES:
+               if (common->fsm_state == FSM_RADIO_CAPS_SENT) {
+                       common->rf_reset = 1;
+                       if (rsi_program_bb_rf(common)) {
+                               goto out;
+                       } else {
+                               common->fsm_state = FSM_BB_RF_PROG_SENT;
+                               rsi_dbg(FSM_ZONE, "%s: Radio cap cfm received\n",
+                                       __func__);
+                       }
+               } else {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Received radio caps cfm in %d state\n",
+                                __func__, common->fsm_state);
+                       return 0;
+               }
+               break;
+
+       case BB_PROG_VALUES_REQUEST:
+       case RF_PROG_VALUES_REQUEST:
+       case BBP_PROG_IN_TA:
+               rsi_dbg(FSM_ZONE, "%s: BB/RF cfm received\n", __func__);
+               if (common->fsm_state == FSM_BB_RF_PROG_SENT) {
+                       common->bb_rf_prog_count--;
+                       if (!common->bb_rf_prog_count) {
+                               common->fsm_state = FSM_MAC_INIT_DONE;
+                               return rsi_mac80211_attach(common);
+                       }
+               } else {
+                       goto out;
+               }
+               break;
+
+       default:
+               rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n",
+                       __func__);
+               break;
+       }
+       return 0;
+out:
+       rsi_dbg(ERR_ZONE, "%s: Unable to send pkt/Invalid frame received\n",
+               __func__);
+       return -EINVAL;
+}
+
+/**
+ * rsi_mgmt_pkt_recv() - This function processes the management packets
+ *                      recieved from the hardware.
+ * @common: Pointer to the driver private structure.
+ * @msg: Pointer to the received packet.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg)
+{
+       s32 msg_len = (le16_to_cpu(*(__le16 *)&msg[0]) & 0x0fff);
+       u16 msg_type = (msg[2]);
+
+       rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n",
+               __func__, msg_len, msg_type);
+
+       if (msg_type == TA_CONFIRM_TYPE) {
+               return rsi_handle_ta_confirm_type(common, msg);
+       } else if (msg_type == CARD_READY_IND) {
+               rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n",
+                       __func__);
+               if (common->fsm_state == FSM_CARD_NOT_READY) {
+                       rsi_set_default_parameters(common);
+
+                       if (rsi_load_bootup_params(common))
+                               return -ENOMEM;
+                       else
+                               common->fsm_state = FSM_BOOT_PARAMS_SENT;
+               } else {
+                       return -EINVAL;
+               }
+       } else if (msg_type == TX_STATUS_IND) {
+               if (msg[15] == PROBEREQ_CONFIRM) {
+                       common->mgmt_q_block = false;
+                       rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n",
+                               __func__);
+               }
+       } else {
+               return rsi_mgmt_pkt_to_core(common, msg, msg_len, msg_type);
+       }
+       return 0;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
new file mode 100644 (file)
index 0000000..8e48e72
--- /dev/null
@@ -0,0 +1,196 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "rsi_mgmt.h"
+
+/**
+ * rsi_send_data_pkt() - This function sends the recieved data packet from
+ *                      driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_hdr *tmp_hdr = NULL;
+       struct ieee80211_tx_info *info;
+       struct skb_info *tx_params;
+       struct ieee80211_bss_conf *bss = NULL;
+       int status = -EINVAL;
+       u8 ieee80211_size = MIN_802_11_HDR_LEN;
+       u8 extnd_size = 0;
+       __le16 *frame_desc;
+       u16 seq_num = 0;
+
+       info = IEEE80211_SKB_CB(skb);
+       bss = &info->control.vif->bss_conf;
+       tx_params = (struct skb_info *)info->driver_data;
+
+       if (!bss->assoc)
+               goto err;
+
+       tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
+       seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
+
+       extnd_size = ((uintptr_t)skb->data & 0x3);
+
+       if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) {
+               rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+               status = -ENOSPC;
+               goto err;
+       }
+
+       skb_push(skb, (FRAME_DESC_SZ + extnd_size));
+       frame_desc = (__le16 *)&skb->data[0];
+       memset((u8 *)frame_desc, 0, FRAME_DESC_SZ);
+
+       if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
+               ieee80211_size += 2;
+               frame_desc[6] |= cpu_to_le16(BIT(12));
+       }
+
+       if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
+           (common->secinfo.security_enable)) {
+               if (rsi_is_cipher_wep(common))
+                       ieee80211_size += 4;
+               else
+                       ieee80211_size += 8;
+               frame_desc[6] |= cpu_to_le16(BIT(15));
+       }
+
+       frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+                                   (RSI_WIFI_DATA_Q << 12));
+       frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8);
+
+       if (common->min_rate != 0xffff) {
+               /* Send fixed rate */
+               frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE);
+               frame_desc[4] = cpu_to_le16(common->min_rate);
+       }
+
+       frame_desc[6] |= cpu_to_le16(seq_num & 0xfff);
+       frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) |
+                                   (skb->priority & 0xf) |
+                                   (tx_params->sta_id << 8));
+
+       status = adapter->host_intf_write_pkt(common->priv,
+                                             skb->data,
+                                             skb->len);
+       if (status)
+               rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n",
+                       __func__);
+
+err:
+       ++common->tx_stats.total_tx_pkt_freed[skb->priority];
+       rsi_indicate_tx_status(common->priv, skb, status);
+       return status;
+}
+
+/**
+ * rsi_send_mgmt_pkt() - This functions sends the received management packet
+ *                      from driver to device.
+ * @common: Pointer to the driver private structure.
+ * @skb: Pointer to the socket buffer structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_send_mgmt_pkt(struct rsi_common *common,
+                     struct sk_buff *skb)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct ieee80211_hdr *wh = NULL;
+       struct ieee80211_tx_info *info;
+       struct ieee80211_bss_conf *bss = NULL;
+       struct skb_info *tx_params;
+       int status = -E2BIG;
+       __le16 *msg = NULL;
+       u8 extnd_size = 0;
+       u8 vap_id = 0;
+
+       info = IEEE80211_SKB_CB(skb);
+       tx_params = (struct skb_info *)info->driver_data;
+       extnd_size = ((uintptr_t)skb->data & 0x3);
+
+       if (tx_params->flags & INTERNAL_MGMT_PKT) {
+               if ((extnd_size) > skb_headroom(skb)) {
+                       rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__);
+                       dev_kfree_skb(skb);
+                       return -ENOSPC;
+               }
+               skb_push(skb, extnd_size);
+               skb->data[extnd_size + 4] = extnd_size;
+               status = adapter->host_intf_write_pkt(common->priv,
+                                                     (u8 *)skb->data,
+                                                     skb->len);
+               if (status) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Failed to write the packet\n", __func__);
+               }
+               dev_kfree_skb(skb);
+               return status;
+       }
+
+       bss = &info->control.vif->bss_conf;
+       wh = (struct ieee80211_hdr *)&skb->data[0];
+
+       if (FRAME_DESC_SZ > skb_headroom(skb))
+               goto err;
+
+       skb_push(skb, FRAME_DESC_SZ);
+       memset(skb->data, 0, FRAME_DESC_SZ);
+       msg = (__le16 *)skb->data;
+
+       if (skb->len > MAX_MGMT_PKT_SIZE) {
+               rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__);
+               goto err;
+       }
+
+       msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) |
+                           (RSI_WIFI_MGMT_Q << 12));
+       msg[1] = cpu_to_le16(TX_DOT11_MGMT);
+       msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8);
+       msg[3] = cpu_to_le16(RATE_INFO_ENABLE);
+       msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4);
+
+       if (wh->addr1[0] & BIT(0))
+               msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
+
+       if (common->band == IEEE80211_BAND_2GHZ)
+               msg[4] = cpu_to_le16(RSI_11B_MODE);
+       else
+               msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
+
+       /* Indicate to firmware to give cfm */
+       if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) {
+               msg[1] |= cpu_to_le16(BIT(10));
+               msg[7] = cpu_to_le16(PROBEREQ_CONFIRM);
+               common->mgmt_q_block = true;
+       }
+
+       msg[7] |= cpu_to_le16(vap_id << 8);
+
+       status = adapter->host_intf_write_pkt(common->priv,
+                                             (u8 *)msg,
+                                             skb->len);
+       if (status)
+               rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__);
+
+err:
+       rsi_indicate_tx_status(common->priv, skb, status);
+       return status;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
new file mode 100644 (file)
index 0000000..852453f
--- /dev/null
@@ -0,0 +1,850 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include "rsi_sdio.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
+ * @rw: Read/write
+ * @func: function number
+ * @raw: indicates whether to perform read after write
+ * @address: address to which to read/write
+ * @writedata: data to write
+ *
+ * Return: argument
+ */
+static u32 rsi_sdio_set_cmd52_arg(bool rw,
+                                 u8 func,
+                                 u8 raw,
+                                 u32 address,
+                                 u8 writedata)
+{
+       return ((rw & 1) << 31) | ((func & 0x7) << 28) |
+               ((raw & 1) << 27) | (1 << 26) |
+               ((address & 0x1FFFF) << 9) | (1 << 8) |
+               (writedata & 0xFF);
+}
+
+/**
+ * rsi_cmd52writebyte() - This function issues cmd52 byte write onto the card.
+ * @card: Pointer to the mmc_card.
+ * @address: Address to write.
+ * @byte: Data to write.
+ *
+ * Return: Write status.
+ */
+static int rsi_cmd52writebyte(struct mmc_card *card,
+                             u32 address,
+                             u8 byte)
+{
+       struct mmc_command io_cmd;
+       u32 arg;
+
+       memset(&io_cmd, 0, sizeof(io_cmd));
+       arg = rsi_sdio_set_cmd52_arg(1, 0, 0, address, byte);
+       io_cmd.opcode = SD_IO_RW_DIRECT;
+       io_cmd.arg = arg;
+       io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+       return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+/**
+ * rsi_cmd52readbyte() - This function issues cmd52 byte read onto the card.
+ * @card: Pointer to the mmc_card.
+ * @address: Address to read from.
+ * @byte: Variable to store read value.
+ *
+ * Return: Read status.
+ */
+static int rsi_cmd52readbyte(struct mmc_card *card,
+                            u32 address,
+                            u8 *byte)
+{
+       struct mmc_command io_cmd;
+       u32 arg;
+       int err;
+
+       memset(&io_cmd, 0, sizeof(io_cmd));
+       arg = rsi_sdio_set_cmd52_arg(0, 0, 0, address, 0);
+       io_cmd.opcode = SD_IO_RW_DIRECT;
+       io_cmd.arg = arg;
+       io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+       err = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+       if ((!err) && (byte))
+               *byte =  io_cmd.resp[0] & 0xFF;
+       return err;
+}
+
+/**
+ * rsi_issue_sdiocommand() - This function issues sdio commands.
+ * @func: Pointer to the sdio_func structure.
+ * @opcode: Opcode value.
+ * @arg: Arguments to pass.
+ * @flags: Flags which are set.
+ * @resp: Pointer to store response.
+ *
+ * Return: err: command status as 0 or -1.
+ */
+static int rsi_issue_sdiocommand(struct sdio_func *func,
+                                u32 opcode,
+                                u32 arg,
+                                u32 flags,
+                                u32 *resp)
+{
+       struct mmc_command cmd;
+       struct mmc_host *host;
+       int err;
+
+       host = func->card->host;
+
+       memset(&cmd, 0, sizeof(struct mmc_command));
+       cmd.opcode = opcode;
+       cmd.arg = arg;
+       cmd.flags = flags;
+       err = mmc_wait_for_cmd(host, &cmd, 3);
+
+       if ((!err) && (resp))
+               *resp = cmd.resp[0];
+
+       return err;
+}
+
+/**
+ * rsi_handle_interrupt() - This function is called upon the occurence
+ *                         of an interrupt.
+ * @function: Pointer to the sdio_func structure.
+ *
+ * Return: None.
+ */
+static void rsi_handle_interrupt(struct sdio_func *function)
+{
+       struct rsi_hw *adapter = sdio_get_drvdata(function);
+
+       sdio_release_host(function);
+       rsi_interrupt_handler(adapter);
+       sdio_claim_host(function);
+}
+
+/**
+ * rsi_reset_card() - This function resets and re-initializes the card.
+ * @pfunction: Pointer to the sdio_func structure.
+ *
+ * Return: None.
+ */
+static void rsi_reset_card(struct sdio_func *pfunction)
+{
+       int ret = 0;
+       int err;
+       struct mmc_card *card = pfunction->card;
+       struct mmc_host *host = card->host;
+       s32 bit = (fls(host->ocr_avail) - 1);
+       u8 cmd52_resp;
+       u32 clock, resp, i;
+       u16 rca;
+
+       /* Reset 9110 chip */
+       ret = rsi_cmd52writebyte(pfunction->card,
+                                SDIO_CCCR_ABORT,
+                                (1 << 3));
+
+       /* Card will not send any response as it is getting reset immediately
+        * Hence expect a timeout status from host controller
+        */
+       if (ret != -ETIMEDOUT)
+               rsi_dbg(ERR_ZONE, "%s: Reset failed : %d\n", __func__, ret);
+
+       /* Wait for few milli seconds to get rid of residue charges if any */
+       msleep(20);
+
+       /* Initialize the SDIO card */
+       host->ios.vdd = bit;
+       host->ios.chip_select = MMC_CS_DONTCARE;
+       host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+       host->ios.power_mode = MMC_POWER_UP;
+       host->ios.bus_width = MMC_BUS_WIDTH_1;
+       host->ios.timing = MMC_TIMING_LEGACY;
+       host->ops->set_ios(host, &host->ios);
+
+       /*
+        * This delay should be sufficient to allow the power supply
+        * to reach the minimum voltage.
+        */
+       msleep(20);
+
+       host->ios.clock = host->f_min;
+       host->ios.power_mode = MMC_POWER_ON;
+       host->ops->set_ios(host, &host->ios);
+
+       /*
+        * This delay must be at least 74 clock sizes, or 1 ms, or the
+        * time required to reach a stable voltage.
+        */
+       msleep(20);
+
+       /* Issue CMD0. Goto idle state */
+       host->ios.chip_select = MMC_CS_HIGH;
+       host->ops->set_ios(host, &host->ios);
+       msleep(20);
+       err = rsi_issue_sdiocommand(pfunction,
+                                   MMC_GO_IDLE_STATE,
+                                   0,
+                                   (MMC_RSP_NONE | MMC_CMD_BC),
+                                   NULL);
+       host->ios.chip_select = MMC_CS_DONTCARE;
+       host->ops->set_ios(host, &host->ios);
+       msleep(20);
+       host->use_spi_crc = 0;
+
+       if (err)
+               rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
+
+       if (!host->ocr_avail) {
+               /* Issue CMD5, arg = 0 */
+               err = rsi_issue_sdiocommand(pfunction,
+                                           SD_IO_SEND_OP_COND,
+                                           0,
+                                           (MMC_RSP_R4 | MMC_CMD_BCR),
+                                           &resp);
+               if (err)
+                       rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+                               __func__, err);
+               host->ocr_avail = resp;
+       }
+
+       /* Issue CMD5, arg = ocr. Wait till card is ready  */
+       for (i = 0; i < 100; i++) {
+               err = rsi_issue_sdiocommand(pfunction,
+                                           SD_IO_SEND_OP_COND,
+                                           host->ocr_avail,
+                                           (MMC_RSP_R4 | MMC_CMD_BCR),
+                                           &resp);
+               if (err) {
+                       rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+                               __func__, err);
+                       break;
+               }
+
+               if (resp & MMC_CARD_BUSY)
+                       break;
+               msleep(20);
+       }
+
+       if ((i == 100) || (err)) {
+               rsi_dbg(ERR_ZONE, "%s: card in not ready : %d %d\n",
+                       __func__, i, err);
+               return;
+       }
+
+       /* Issue CMD3, get RCA */
+       err = rsi_issue_sdiocommand(pfunction,
+                                   SD_SEND_RELATIVE_ADDR,
+                                   0,
+                                   (MMC_RSP_R6 | MMC_CMD_BCR),
+                                   &resp);
+       if (err) {
+               rsi_dbg(ERR_ZONE, "%s: CMD3 failed : %d\n", __func__, err);
+               return;
+       }
+       rca = resp >> 16;
+       host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+       host->ops->set_ios(host, &host->ios);
+
+       /* Issue CMD7, select card  */
+       err = rsi_issue_sdiocommand(pfunction,
+                                   MMC_SELECT_CARD,
+                                   (rca << 16),
+                                   (MMC_RSP_R1 | MMC_CMD_AC),
+                                   NULL);
+       if (err) {
+               rsi_dbg(ERR_ZONE, "%s: CMD7 failed : %d\n", __func__, err);
+               return;
+       }
+
+       /* Enable high speed */
+       if (card->host->caps & MMC_CAP_SD_HIGHSPEED) {
+               rsi_dbg(ERR_ZONE, "%s: Set high speed mode\n", __func__);
+               err = rsi_cmd52readbyte(card, SDIO_CCCR_SPEED, &cmd52_resp);
+               if (err) {
+                       rsi_dbg(ERR_ZONE, "%s: CCCR speed reg read failed: %d\n",
+                               __func__, err);
+                       card->state &= ~MMC_STATE_HIGHSPEED;
+               } else {
+                       err = rsi_cmd52writebyte(card,
+                                                SDIO_CCCR_SPEED,
+                                                (cmd52_resp | SDIO_SPEED_EHS));
+                       if (err) {
+                               rsi_dbg(ERR_ZONE,
+                                       "%s: CCR speed regwrite failed %d\n",
+                                       __func__, err);
+                               return;
+                       }
+                       mmc_card_set_highspeed(card);
+                       host->ios.timing = MMC_TIMING_SD_HS;
+                       host->ops->set_ios(host, &host->ios);
+               }
+       }
+
+       /* Set clock */
+       if (mmc_card_highspeed(card))
+               clock = 50000000;
+       else
+               clock = card->cis.max_dtr;
+
+       if (clock > host->f_max)
+               clock = host->f_max;
+
+       host->ios.clock = clock;
+       host->ops->set_ios(host, &host->ios);
+
+       if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+               /* CMD52: Set bus width & disable card detect resistor */
+               err = rsi_cmd52writebyte(card,
+                                        SDIO_CCCR_IF,
+                                        (SDIO_BUS_CD_DISABLE |
+                                         SDIO_BUS_WIDTH_4BIT));
+               if (err) {
+                       rsi_dbg(ERR_ZONE, "%s: Set bus mode failed : %d\n",
+                               __func__, err);
+                       return;
+               }
+               host->ios.bus_width = MMC_BUS_WIDTH_4;
+               host->ops->set_ios(host, &host->ios);
+       }
+}
+
+/**
+ * rsi_setclock() - This function sets the clock frequency.
+ * @adapter: Pointer to the adapter structure.
+ * @freq: Clock frequency.
+ *
+ * Return: None.
+ */
+static void rsi_setclock(struct rsi_hw *adapter, u32 freq)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       struct mmc_host *host = dev->pfunction->card->host;
+       u32 clock;
+
+       clock = freq * 1000;
+       if (clock > host->f_max)
+               clock = host->f_max;
+       host->ios.clock = clock;
+       host->ops->set_ios(host, &host->ios);
+}
+
+/**
+ * rsi_setblocklength() - This function sets the host block length.
+ * @adapter: Pointer to the adapter structure.
+ * @length: Block length to be set.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_setblocklength(struct rsi_hw *adapter, u32 length)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       int status;
+       rsi_dbg(INIT_ZONE, "%s: Setting the block length\n", __func__);
+
+       status = sdio_set_block_size(dev->pfunction, length);
+       dev->pfunction->max_blksize = 256;
+
+       rsi_dbg(INFO_ZONE,
+               "%s: Operational blk length is %d\n", __func__, length);
+       return status;
+}
+
+/**
+ * rsi_setupcard() - This function queries and sets the card's features.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_setupcard(struct rsi_hw *adapter)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       int status = 0;
+
+       rsi_setclock(adapter, 50000);
+
+       dev->tx_blk_size = 256;
+       status = rsi_setblocklength(adapter, dev->tx_blk_size);
+       if (status)
+               rsi_dbg(ERR_ZONE,
+                       "%s: Unable to set block length\n", __func__);
+       return status;
+}
+
+/**
+ * rsi_sdio_read_register() - This function reads one byte of information
+ *                           from a register.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that stores the data read.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_read_register(struct rsi_hw *adapter,
+                          u32 addr,
+                          u8 *data)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       u8 fun_num = 0;
+       int status;
+
+       sdio_claim_host(dev->pfunction);
+
+       if (fun_num == 0)
+               *data = sdio_f0_readb(dev->pfunction, addr, &status);
+       else
+               *data = sdio_readb(dev->pfunction, addr, &status);
+
+       sdio_release_host(dev->pfunction);
+
+       return status;
+}
+
+/**
+ * rsi_sdio_write_register() - This function writes one byte of information
+ *                            into a register.
+ * @adapter: Pointer to the adapter structure.
+ * @function: Function Number.
+ * @addr: Address of the register.
+ * @data: Pointer to the data tha has to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_write_register(struct rsi_hw *adapter,
+                           u8 function,
+                           u32 addr,
+                           u8 *data)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       int status = 0;
+
+       sdio_claim_host(dev->pfunction);
+
+       if (function == 0)
+               sdio_f0_writeb(dev->pfunction, *data, addr, &status);
+       else
+               sdio_writeb(dev->pfunction, *data, addr, &status);
+
+       sdio_release_host(dev->pfunction);
+
+       return status;
+}
+
+/**
+ * rsi_sdio_ack_intr() - This function acks the interrupt received.
+ * @adapter: Pointer to the adapter structure.
+ * @int_bit: Interrupt bit to write into register.
+ *
+ * Return: None.
+ */
+void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit)
+{
+       int status;
+       status = rsi_sdio_write_register(adapter,
+                                        1,
+                                        (SDIO_FUN1_INTR_CLR_REG |
+                                         RSI_SD_REQUEST_MASTER),
+                                        &int_bit);
+       if (status)
+               rsi_dbg(ERR_ZONE, "%s: unable to send ack\n", __func__);
+}
+
+
+
+/**
+ * rsi_sdio_read_register_multiple() - This function read multiple bytes of
+ *                                    information from the SD card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @count: Number of multiple bytes to be read.
+ * @data: Pointer to the read data.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter,
+                                          u32 addr,
+                                          u32 count,
+                                          u8 *data)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       u32 status;
+
+       sdio_claim_host(dev->pfunction);
+
+       status =  sdio_readsb(dev->pfunction, data, addr, count);
+
+       sdio_release_host(dev->pfunction);
+
+       if (status != 0)
+               rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 read failed\n", __func__);
+       return status;
+}
+
+/**
+ * rsi_sdio_write_register_multiple() - This function writes multiple bytes of
+ *                                     information to the SD card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_write_register_multiple(struct rsi_hw *adapter,
+                                    u32 addr,
+                                    u8 *data,
+                                    u32 count)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       int status;
+
+       if (dev->write_fail > 1) {
+               rsi_dbg(ERR_ZONE, "%s: Stopping card writes\n", __func__);
+               return 0;
+       } else if (dev->write_fail == 1) {
+               /**
+                * Assuming it is a CRC failure, we want to allow another
+                *  card write
+                */
+               rsi_dbg(ERR_ZONE, "%s: Continue card writes\n", __func__);
+               dev->write_fail++;
+       }
+
+       sdio_claim_host(dev->pfunction);
+
+       status = sdio_writesb(dev->pfunction, addr, data, count);
+
+       sdio_release_host(dev->pfunction);
+
+       if (status) {
+               rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 write failed %d\n",
+                       __func__, status);
+               dev->write_fail = 2;
+       } else {
+               memcpy(dev->prev_desc, data, FRAME_DESC_SZ);
+       }
+       return status;
+}
+
+/**
+ * rsi_sdio_host_intf_write_pkt() - This function writes the packet to device.
+ * @adapter: Pointer to the adapter structure.
+ * @pkt: Pointer to the data to be written on to the device.
+ * @len: length of the data to be written on to the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
+                                       u8 *pkt,
+                                       u32 len)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       u32 block_size = dev->tx_blk_size;
+       u32 num_blocks, address, length;
+       u32 queueno;
+       int status;
+
+       queueno = ((pkt[1] >> 4) & 0xf);
+
+       num_blocks = len / block_size;
+
+       if (len % block_size)
+               num_blocks++;
+
+       address = (num_blocks * block_size | (queueno << 12));
+       length  = num_blocks * block_size;
+
+       status = rsi_sdio_write_register_multiple(adapter,
+                                                 address,
+                                                 (u8 *)pkt,
+                                                 length);
+       if (status)
+               rsi_dbg(ERR_ZONE, "%s: Unable to write onto the card: %d\n",
+                       __func__, status);
+       rsi_dbg(DATA_TX_ZONE, "%s: Successfully written onto card\n", __func__);
+       return status;
+}
+
+/**
+ * rsi_sdio_host_intf_read_pkt() - This function reads the packet
+                                  from the device.
+ * @adapter: Pointer to the adapter data structure.
+ * @pkt: Pointer to the packet data to be read from the the device.
+ * @length: Length of the data to be read from the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter,
+                               u8 *pkt,
+                               u32 length)
+{
+       int status = -EINVAL;
+
+       if (!length) {
+               rsi_dbg(ERR_ZONE, "%s: Pkt size is zero\n", __func__);
+               return status;
+       }
+
+       status = rsi_sdio_read_register_multiple(adapter,
+                                                length,
+                                                length, /*num of bytes*/
+                                                (u8 *)pkt);
+
+       if (status)
+               rsi_dbg(ERR_ZONE, "%s: Failed to read frame: %d\n", __func__,
+                       status);
+       return status;
+}
+
+/**
+ * rsi_init_sdio_interface() - This function does init specific to SDIO.
+ *
+ * @adapter: Pointer to the adapter data structure.
+ * @pkt: Pointer to the packet data to be read from the the device.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+
+static int rsi_init_sdio_interface(struct rsi_hw *adapter,
+                                  struct sdio_func *pfunction)
+{
+       struct rsi_91x_sdiodev *rsi_91x_dev;
+       int status = -ENOMEM;
+
+       rsi_91x_dev = kzalloc(sizeof(*rsi_91x_dev), GFP_KERNEL);
+       if (!rsi_91x_dev)
+               return status;
+
+       adapter->rsi_dev = rsi_91x_dev;
+
+       sdio_claim_host(pfunction);
+
+       pfunction->enable_timeout = 100;
+       status = sdio_enable_func(pfunction);
+       if (status) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to enable interface\n", __func__);
+               sdio_release_host(pfunction);
+               return status;
+       }
+
+       rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
+
+       rsi_91x_dev->pfunction = pfunction;
+       adapter->device = &pfunction->dev;
+
+       sdio_set_drvdata(pfunction, adapter);
+
+       status = rsi_setupcard(adapter);
+       if (status) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to setup card\n", __func__);
+               goto fail;
+       }
+
+       rsi_dbg(INIT_ZONE, "%s: Setup card succesfully\n", __func__);
+
+       status = rsi_init_sdio_slave_regs(adapter);
+       if (status) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to init slave regs\n", __func__);
+               goto fail;
+       }
+       sdio_release_host(pfunction);
+
+       adapter->host_intf_write_pkt = rsi_sdio_host_intf_write_pkt;
+       adapter->host_intf_read_pkt = rsi_sdio_host_intf_read_pkt;
+       adapter->determine_event_timeout = rsi_sdio_determine_event_timeout;
+       adapter->check_hw_queue_status = rsi_sdio_read_buffer_status_register;
+
+#ifdef CONFIG_RSI_DEBUGFS
+       adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES;
+#endif
+       return status;
+fail:
+       sdio_disable_func(pfunction);
+       sdio_release_host(pfunction);
+       return status;
+}
+
+/**
+ * rsi_probe() - This function is called by kernel when the driver provided
+ *              Vendor and device IDs are matched. All the initialization
+ *              work is done here.
+ * @pfunction: Pointer to the sdio_func structure.
+ * @id: Pointer to sdio_device_id structure.
+ *
+ * Return: 0 on success, 1 on failure.
+ */
+static int rsi_probe(struct sdio_func *pfunction,
+                    const struct sdio_device_id *id)
+{
+       struct rsi_hw *adapter;
+
+       rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
+
+       adapter = rsi_91x_init();
+       if (!adapter) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
+                       __func__);
+               return 1;
+       }
+
+       if (rsi_init_sdio_interface(adapter, pfunction)) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to init sdio interface\n",
+                       __func__);
+               goto fail;
+       }
+
+       if (rsi_sdio_device_init(adapter->priv)) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in device init\n", __func__);
+               sdio_claim_host(pfunction);
+               sdio_disable_func(pfunction);
+               sdio_release_host(pfunction);
+               goto fail;
+       }
+
+       sdio_claim_host(pfunction);
+       if (sdio_claim_irq(pfunction, rsi_handle_interrupt)) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to request IRQ\n", __func__);
+               sdio_release_host(pfunction);
+               goto fail;
+       }
+
+       sdio_release_host(pfunction);
+       rsi_dbg(INIT_ZONE, "%s: Registered Interrupt handler\n", __func__);
+
+       return 0;
+fail:
+       rsi_91x_deinit(adapter);
+       rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
+       return 1;
+}
+
+/**
+ * rsi_disconnect() - This function performs the reverse of the probe function.
+ * @pfunction: Pointer to the sdio_func structure.
+ *
+ * Return: void.
+ */
+static void rsi_disconnect(struct sdio_func *pfunction)
+{
+       struct rsi_hw *adapter = sdio_get_drvdata(pfunction);
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+       if (!adapter)
+               return;
+
+       dev->write_fail = 2;
+       rsi_mac80211_detach(adapter);
+
+       sdio_claim_host(pfunction);
+       sdio_release_irq(pfunction);
+       sdio_disable_func(pfunction);
+       rsi_91x_deinit(adapter);
+       /* Resetting to take care of the case, where-in driver is re-loaded */
+       rsi_reset_card(pfunction);
+       sdio_release_host(pfunction);
+}
+
+#ifdef CONFIG_PM
+static int rsi_suspend(struct device *dev)
+{
+       /* Not yet implemented */
+       return -ENOSYS;
+}
+
+static int rsi_resume(struct device *dev)
+{
+       /* Not yet implemented */
+       return -ENOSYS;
+}
+
+static const struct dev_pm_ops rsi_pm_ops = {
+       .suspend = rsi_suspend,
+       .resume = rsi_resume,
+};
+#endif
+
+static const struct sdio_device_id rsi_dev_table[] =  {
+       { SDIO_DEVICE(0x303, 0x100) },
+       { SDIO_DEVICE(0x041B, 0x0301) },
+       { SDIO_DEVICE(0x041B, 0x0201) },
+       { SDIO_DEVICE(0x041B, 0x9330) },
+       { /* Blank */},
+};
+
+static struct sdio_driver rsi_driver = {
+       .name       = "RSI-SDIO WLAN",
+       .probe      = rsi_probe,
+       .remove     = rsi_disconnect,
+       .id_table   = rsi_dev_table,
+#ifdef CONFIG_PM
+       .drv = {
+               .pm = &rsi_pm_ops,
+       }
+#endif
+};
+
+/**
+ * rsi_module_init() - This function registers the sdio module.
+ * @void: Void.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_module_init(void)
+{
+       sdio_register_driver(&rsi_driver);
+       rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
+       return 0;
+}
+
+/**
+ * rsi_module_exit() - This function unregisters the sdio module.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_module_exit(void)
+{
+       sdio_unregister_driver(&rsi_driver);
+       rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
+}
+
+module_init(rsi_module_init);
+module_exit(rsi_module_exit);
+
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_DEVICE_TABLE(sdio, rsi_dev_table);
+MODULE_FIRMWARE(FIRMWARE_RSI9113);
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
new file mode 100644 (file)
index 0000000..f1cb99c
--- /dev/null
@@ -0,0 +1,566 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "rsi_sdio.h"
+#include "rsi_common.h"
+
+/**
+ * rsi_sdio_master_access_msword() - This function sets the AHB master access
+ *                                  MS word in the SDIO slave registers.
+ * @adapter: Pointer to the adapter structure.
+ * @ms_word: ms word need to be initialized.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_sdio_master_access_msword(struct rsi_hw *adapter,
+                                        u16 ms_word)
+{
+       u8 byte;
+       u8 function = 0;
+       int status = 0;
+
+       byte = (u8)(ms_word & 0x00FF);
+
+       rsi_dbg(INIT_ZONE,
+               "%s: MASTER_ACCESS_MSBYTE:0x%x\n", __func__, byte);
+
+       status = rsi_sdio_write_register(adapter,
+                                        function,
+                                        SDIO_MASTER_ACCESS_MSBYTE,
+                                        &byte);
+       if (status) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: fail to access MASTER_ACCESS_MSBYTE\n",
+                       __func__);
+               return -1;
+       }
+
+       byte = (u8)(ms_word >> 8);
+
+       rsi_dbg(INIT_ZONE, "%s:MASTER_ACCESS_LSBYTE:0x%x\n", __func__, byte);
+       status = rsi_sdio_write_register(adapter,
+                                        function,
+                                        SDIO_MASTER_ACCESS_LSBYTE,
+                                        &byte);
+       return status;
+}
+
+/**
+ * rsi_copy_to_card() - This function includes the actual funtionality of
+ *                     copying the TA firmware to the card.Basically this
+ *                     function includes opening the TA file,reading the
+ *                     TA file and writing their values in blocks of data.
+ * @common: Pointer to the driver private structure.
+ * @fw: Pointer to the firmware value to be written.
+ * @len: length of firmware file.
+ * @num_blocks: Number of blocks to be written to the card.
+ *
+ * Return: 0 on success and -1 on failure.
+ */
+static int rsi_copy_to_card(struct rsi_common *common,
+                           const u8 *fw,
+                           u32 len,
+                           u32 num_blocks)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       u32 indx, ii;
+       u32 block_size = dev->tx_blk_size;
+       u32 lsb_address;
+       __le32 data[] = { TA_HOLD_THREAD_VALUE, TA_SOFT_RST_CLR,
+                         TA_PC_ZERO, TA_RELEASE_THREAD_VALUE };
+       u32 address[] = { TA_HOLD_THREAD_REG, TA_SOFT_RESET_REG,
+                         TA_TH0_PC_REG, TA_RELEASE_THREAD_REG };
+       u32 base_address;
+       u16 msb_address;
+
+       base_address = TA_LOAD_ADDRESS;
+       msb_address = base_address >> 16;
+
+       for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
+               lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
+               if (rsi_sdio_write_register_multiple(adapter,
+                                                    lsb_address,
+                                                    (u8 *)(fw + indx),
+                                                    block_size)) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Unable to load %s blk\n", __func__,
+                               FIRMWARE_RSI9113);
+                       return -1;
+               }
+               rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
+               base_address += block_size;
+               if ((base_address >> 16) != msb_address) {
+                       msb_address += 1;
+                       if (rsi_sdio_master_access_msword(adapter,
+                                                         msb_address)) {
+                               rsi_dbg(ERR_ZONE,
+                                       "%s: Unable to set ms word reg\n",
+                                       __func__);
+                               return -1;
+                       }
+               }
+       }
+
+       if (len % block_size) {
+               lsb_address = ((u16) base_address | RSI_SD_REQUEST_MASTER);
+               if (rsi_sdio_write_register_multiple(adapter,
+                                                    lsb_address,
+                                                    (u8 *)(fw + indx),
+                                                    len % block_size)) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Unable to load f/w\n", __func__);
+                       return -1;
+               }
+       }
+       rsi_dbg(INIT_ZONE,
+               "%s: Succesfully loaded TA instructions\n", __func__);
+
+       if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Unable to set ms word to common reg\n",
+                       __func__);
+               return -1;
+       }
+
+       for (ii = 0; ii < ARRAY_SIZE(data); ii++) {
+               /* Bringing TA out of reset */
+               if (rsi_sdio_write_register_multiple(adapter,
+                                                    (address[ii] |
+                                                    RSI_SD_REQUEST_MASTER),
+                                                    (u8 *)&data[ii],
+                                                    4)) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Unable to hold TA threads\n", __func__);
+                       return -1;
+               }
+       }
+
+       rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
+       return 0;
+}
+
+/**
+ * rsi_load_ta_instructions() - This function includes the actual funtionality
+ *                             of loading the TA firmware.This function also
+ *                             includes opening the TA file,reading the TA
+ *                             file and writing their value in blocks of data.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_load_ta_instructions(struct rsi_common *common)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       u32 len;
+       u32 num_blocks;
+       const u8 *fw;
+       const struct firmware *fw_entry = NULL;
+       u32 block_size = dev->tx_blk_size;
+       int status = 0;
+       u32 base_address;
+       u16 msb_address;
+
+       if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Unable to set ms word to common reg\n",
+                       __func__);
+               return -1;
+       }
+       base_address = TA_LOAD_ADDRESS;
+       msb_address = (base_address >> 16);
+
+       if (rsi_sdio_master_access_msword(adapter, msb_address)) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Unable to set ms word reg\n", __func__);
+               return -1;
+       }
+
+       status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
+       if (status < 0) {
+               rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
+                       __func__, FIRMWARE_RSI9113);
+               return status;
+       }
+
+       fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       len = fw_entry->size;
+
+       if (len % 4)
+               len += (4 - (len % 4));
+
+       num_blocks = (len / block_size);
+
+       rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
+       rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+
+       status = rsi_copy_to_card(common, fw, len, num_blocks);
+       release_firmware(fw_entry);
+       return status;
+}
+
+/**
+ * rsi_process_pkt() - This Function reads rx_blocks register and figures out
+ *                    the size of the rx pkt.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_process_pkt(struct rsi_common *common)
+{
+       struct rsi_hw *adapter = common->priv;
+       u8 num_blks = 0;
+       u32 rcv_pkt_len = 0;
+       int status = 0;
+
+       status = rsi_sdio_read_register(adapter,
+                                       SDIO_RX_NUM_BLOCKS_REG,
+                                       &num_blks);
+
+       if (status) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Failed to read pkt length from the card:\n",
+                       __func__);
+               return status;
+       }
+       rcv_pkt_len = (num_blks * 256);
+
+       common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL);
+       if (!common->rx_data_pkt) {
+               rsi_dbg(ERR_ZONE, "%s: Failed in memory allocation\n",
+                       __func__);
+               return -1;
+       }
+
+       status = rsi_sdio_host_intf_read_pkt(adapter,
+                                            common->rx_data_pkt,
+                                            rcv_pkt_len);
+       if (status) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to read packet from card\n",
+                       __func__);
+               goto fail;
+       }
+
+       status = rsi_read_pkt(common, rcv_pkt_len);
+       kfree(common->rx_data_pkt);
+       return status;
+
+fail:
+       kfree(common->rx_data_pkt);
+       return -1;
+}
+
+/**
+ * rsi_init_sdio_slave_regs() - This function does the actual initialization
+ *                             of SDBUS slave registers.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       u8 function = 0;
+       u8 byte;
+       int status = 0;
+
+       if (dev->next_read_delay) {
+               byte = dev->next_read_delay;
+               status = rsi_sdio_write_register(adapter,
+                                                function,
+                                                SDIO_NXT_RD_DELAY2,
+                                                &byte);
+               if (status) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Failed to write SDIO_NXT_RD_DELAY2\n",
+                               __func__);
+                       return -1;
+               }
+       }
+
+       if (dev->sdio_high_speed_enable) {
+               rsi_dbg(INIT_ZONE, "%s: Enabling SDIO High speed\n", __func__);
+               byte = 0x3;
+
+               status = rsi_sdio_write_register(adapter,
+                                                function,
+                                                SDIO_REG_HIGH_SPEED,
+                                                &byte);
+               if (status) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Failed to enable SDIO high speed\n",
+                               __func__);
+                       return -1;
+               }
+       }
+
+       /* This tells SDIO FIFO when to start read to host */
+       rsi_dbg(INIT_ZONE, "%s: Initialzing SDIO read start level\n", __func__);
+       byte = 0x24;
+
+       status = rsi_sdio_write_register(adapter,
+                                        function,
+                                        SDIO_READ_START_LVL,
+                                        &byte);
+       if (status) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Failed to write SDIO_READ_START_LVL\n", __func__);
+               return -1;
+       }
+
+       rsi_dbg(INIT_ZONE, "%s: Initialzing FIFO ctrl registers\n", __func__);
+       byte = (128 - 32);
+
+       status = rsi_sdio_write_register(adapter,
+                                        function,
+                                        SDIO_READ_FIFO_CTL,
+                                        &byte);
+       if (status) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Failed to write SDIO_READ_FIFO_CTL\n", __func__);
+               return -1;
+       }
+
+       byte = 32;
+       status = rsi_sdio_write_register(adapter,
+                                        function,
+                                        SDIO_WRITE_FIFO_CTL,
+                                        &byte);
+       if (status) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Failed to write SDIO_WRITE_FIFO_CTL\n", __func__);
+               return -1;
+       }
+
+       return 0;
+}
+
+/**
+ * rsi_interrupt_handler() - This function read and process SDIO interrupts.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+void rsi_interrupt_handler(struct rsi_hw *adapter)
+{
+       struct rsi_common *common = adapter->priv;
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       int status;
+       enum sdio_interrupt_type isr_type;
+       u8 isr_status = 0;
+       u8 fw_status = 0;
+
+       dev->rx_info.sdio_int_counter++;
+
+       do {
+               mutex_lock(&common->tx_rxlock);
+               status = rsi_sdio_read_register(common->priv,
+                                               RSI_FN1_INT_REGISTER,
+                                               &isr_status);
+               if (status) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Failed to Read Intr Status Register\n",
+                               __func__);
+                       mutex_unlock(&common->tx_rxlock);
+                       return;
+               }
+
+               if (isr_status == 0) {
+                       rsi_set_event(&common->tx_thread.event);
+                       dev->rx_info.sdio_intr_status_zero++;
+                       mutex_unlock(&common->tx_rxlock);
+                       return;
+               }
+
+               rsi_dbg(ISR_ZONE, "%s: Intr_status = %x %d %d\n",
+                       __func__, isr_status, (1 << MSDU_PKT_PENDING),
+                       (1 << FW_ASSERT_IND));
+
+               do {
+                       RSI_GET_SDIO_INTERRUPT_TYPE(isr_status, isr_type);
+
+                       switch (isr_type) {
+                       case BUFFER_AVAILABLE:
+                               dev->rx_info.watch_bufferfull_count = 0;
+                               dev->rx_info.buffer_full = false;
+                               dev->rx_info.mgmt_buffer_full = false;
+                               rsi_sdio_ack_intr(common->priv,
+                                                 (1 << PKT_BUFF_AVAILABLE));
+                               rsi_set_event((&common->tx_thread.event));
+                               rsi_dbg(ISR_ZONE,
+                                       "%s: ==> BUFFER_AVILABLE <==\n",
+                                       __func__);
+                               dev->rx_info.buf_avilable_counter++;
+                               break;
+
+                       case FIRMWARE_ASSERT_IND:
+                               rsi_dbg(ERR_ZONE,
+                                       "%s: ==> FIRMWARE Assert <==\n",
+                                       __func__);
+                               status = rsi_sdio_read_register(common->priv,
+                                                       SDIO_FW_STATUS_REG,
+                                                       &fw_status);
+                               if (status) {
+                                       rsi_dbg(ERR_ZONE,
+                                               "%s: Failed to read f/w reg\n",
+                                               __func__);
+                               } else {
+                                       rsi_dbg(ERR_ZONE,
+                                               "%s: Firmware Status is 0x%x\n",
+                                               __func__ , fw_status);
+                                       rsi_sdio_ack_intr(common->priv,
+                                                         (1 << FW_ASSERT_IND));
+                               }
+
+                               common->fsm_state = FSM_CARD_NOT_READY;
+                               break;
+
+                       case MSDU_PACKET_PENDING:
+                               rsi_dbg(ISR_ZONE, "Pkt pending interrupt\n");
+                               dev->rx_info.total_sdio_msdu_pending_intr++;
+
+                               status = rsi_process_pkt(common);
+                               if (status) {
+                                       rsi_dbg(ERR_ZONE,
+                                               "%s: Failed to read pkt\n",
+                                               __func__);
+                                       mutex_unlock(&common->tx_rxlock);
+                                       return;
+                               }
+                               break;
+                       default:
+                               rsi_sdio_ack_intr(common->priv, isr_status);
+                               dev->rx_info.total_sdio_unknown_intr++;
+                               isr_status = 0;
+                               rsi_dbg(ISR_ZONE,
+                                       "Unknown Interrupt %x\n",
+                                       isr_status);
+                               break;
+                       }
+                       isr_status ^= BIT(isr_type - 1);
+               } while (isr_status);
+               mutex_unlock(&common->tx_rxlock);
+       } while (1);
+}
+
+/**
+ * rsi_device_init() - This Function Initializes The HAL.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_sdio_device_init(struct rsi_common *common)
+{
+       if (rsi_load_ta_instructions(common))
+               return -1;
+
+       if (rsi_sdio_master_access_msword(common->priv, MISC_CFG_BASE_ADDR)) {
+               rsi_dbg(ERR_ZONE, "%s: Unable to set ms word reg\n",
+                       __func__);
+               return -1;
+       }
+       rsi_dbg(INIT_ZONE,
+               "%s: Setting ms word to 0x41050000\n", __func__);
+
+       return 0;
+}
+
+/**
+ * rsi_sdio_read_buffer_status_register() - This function is used to the read
+ *                                         buffer status register and set
+ *                                         relevant fields in
+ *                                         rsi_91x_sdiodev struct.
+ * @adapter: Pointer to the driver hw structure.
+ * @q_num: The Q number whose status is to be found.
+ *
+ * Return: status: -1 on failure or else queue full/stop is indicated.
+ */
+int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num)
+{
+       struct rsi_common *common = adapter->priv;
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+       u8 buf_status = 0;
+       int status = 0;
+
+       status = rsi_sdio_read_register(common->priv,
+                                       RSI_DEVICE_BUFFER_STATUS_REGISTER,
+                                       &buf_status);
+
+       if (status) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Failed to read status register\n", __func__);
+               return -1;
+       }
+
+       if (buf_status & (BIT(PKT_MGMT_BUFF_FULL))) {
+               if (!dev->rx_info.mgmt_buffer_full)
+                       dev->rx_info.mgmt_buf_full_counter++;
+               dev->rx_info.mgmt_buffer_full = true;
+       } else {
+               dev->rx_info.mgmt_buffer_full = false;
+       }
+
+       if (buf_status & (BIT(PKT_BUFF_FULL))) {
+               if (!dev->rx_info.buffer_full)
+                       dev->rx_info.buf_full_counter++;
+               dev->rx_info.buffer_full = true;
+       } else {
+               dev->rx_info.buffer_full = false;
+       }
+
+       if (buf_status & (BIT(PKT_BUFF_SEMI_FULL))) {
+               if (!dev->rx_info.semi_buffer_full)
+                       dev->rx_info.buf_semi_full_counter++;
+               dev->rx_info.semi_buffer_full = true;
+       } else {
+               dev->rx_info.semi_buffer_full = false;
+       }
+
+       if ((q_num == MGMT_SOFT_Q) && (dev->rx_info.mgmt_buffer_full))
+               return QUEUE_FULL;
+
+       if (dev->rx_info.buffer_full)
+               return QUEUE_FULL;
+
+       return QUEUE_NOT_FULL;
+}
+
+/**
+ * rsi_sdio_determine_event_timeout() - This Function determines the event
+ *                                     timeout duration.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: timeout duration is returned.
+ */
+int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter)
+{
+       struct rsi_91x_sdiodev *dev =
+               (struct rsi_91x_sdiodev *)adapter->rsi_dev;
+
+       /* Once buffer full is seen, event timeout to occur every 2 msecs */
+       if (dev->rx_info.buffer_full)
+               return 2;
+
+       return EVENT_WAIT_FOREVER;
+}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
new file mode 100644 (file)
index 0000000..bb1bf96
--- /dev/null
@@ -0,0 +1,575 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include "rsi_usb.h"
+
+/**
+ * rsi_usb_card_write() - This function writes to the USB Card.
+ * @adapter: Pointer to the adapter structure.
+ * @buf: Pointer to the buffer from where the data has to be taken.
+ * @len: Length to be written.
+ * @endpoint: Type of endpoint.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_card_write(struct rsi_hw *adapter,
+                             void *buf,
+                             u16 len,
+                             u8 endpoint)
+{
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       int status;
+       s32 transfer;
+
+       status = usb_bulk_msg(dev->usbdev,
+                             usb_sndbulkpipe(dev->usbdev,
+                             dev->bulkout_endpoint_addr[endpoint - 1]),
+                             buf,
+                             len,
+                             &transfer,
+                             HZ * 5);
+
+       if (status < 0) {
+               rsi_dbg(ERR_ZONE,
+                       "Card write failed with error code :%10d\n", status);
+               dev->write_fail = 1;
+       }
+       return status;
+}
+
+/**
+ * rsi_write_multiple() - This function writes multiple bytes of information
+ *                       to the USB card.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_write_multiple(struct rsi_hw *adapter,
+                             u8 endpoint,
+                             u8 *data,
+                             u32 count)
+{
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       u8 *seg = dev->tx_buffer;
+
+       if (dev->write_fail)
+               return 0;
+
+       if (endpoint == MGMT_EP) {
+               memset(seg, 0, RSI_USB_TX_HEAD_ROOM);
+               memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count);
+       } else {
+               seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM);
+       }
+
+       return rsi_usb_card_write(adapter,
+                                 seg,
+                                 count + RSI_USB_TX_HEAD_ROOM,
+                                 endpoint);
+}
+
+/**
+ * rsi_find_bulk_in_and_out_endpoints() - This function initializes the bulk
+ *                                       endpoints to the device.
+ * @interface: Pointer to the USB interface structure.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: ret_val: 0 on success, -ENOMEM on failure.
+ */
+static int rsi_find_bulk_in_and_out_endpoints(struct usb_interface *interface,
+                                             struct rsi_hw *adapter)
+{
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       struct usb_host_interface *iface_desc;
+       struct usb_endpoint_descriptor *endpoint;
+       __le16 buffer_size;
+       int ii, bep_found = 0;
+
+       iface_desc = &(interface->altsetting[0]);
+
+       for (ii = 0; ii < iface_desc->desc.bNumEndpoints; ++ii) {
+               endpoint = &(iface_desc->endpoint[ii].desc);
+
+               if ((!(dev->bulkin_endpoint_addr)) &&
+                   (endpoint->bEndpointAddress & USB_DIR_IN) &&
+                   ((endpoint->bmAttributes &
+                   USB_ENDPOINT_XFERTYPE_MASK) ==
+                   USB_ENDPOINT_XFER_BULK)) {
+                       buffer_size = endpoint->wMaxPacketSize;
+                       dev->bulkin_size = buffer_size;
+                       dev->bulkin_endpoint_addr =
+                               endpoint->bEndpointAddress;
+               }
+
+               if (!dev->bulkout_endpoint_addr[bep_found] &&
+                   !(endpoint->bEndpointAddress & USB_DIR_IN) &&
+                   ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+                     USB_ENDPOINT_XFER_BULK)) {
+                       dev->bulkout_endpoint_addr[bep_found] =
+                               endpoint->bEndpointAddress;
+                       buffer_size = endpoint->wMaxPacketSize;
+                       dev->bulkout_size[bep_found] = buffer_size;
+                       bep_found++;
+               }
+
+               if (bep_found >= MAX_BULK_EP)
+                       break;
+       }
+
+       if (!(dev->bulkin_endpoint_addr) &&
+           (dev->bulkout_endpoint_addr[0]))
+               return -EINVAL;
+
+       return 0;
+}
+
+/* rsi_usb_reg_read() - This function reads data from given register address.
+ * @usbdev: Pointer to the usb_device structure.
+ * @reg: Address of the register to be read.
+ * @value: Value to be read.
+ * @len: length of data to be read.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_reg_read(struct usb_device *usbdev,
+                           u32 reg,
+                           u16 *value,
+                           u16 len)
+{
+       u8 temp_buf[4];
+       int status = 0;
+
+       status = usb_control_msg(usbdev,
+                                usb_rcvctrlpipe(usbdev, 0),
+                                USB_VENDOR_REGISTER_READ,
+                                USB_TYPE_VENDOR,
+                                ((reg & 0xffff0000) >> 16), (reg & 0xffff),
+                                (void *)temp_buf,
+                                len,
+                                HZ * 5);
+
+       *value = (temp_buf[0] | (temp_buf[1] << 8));
+       if (status < 0) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Reg read failed with error code :%d\n",
+                       __func__, status);
+       }
+       return status;
+}
+
+/**
+ * rsi_usb_reg_write() - This function writes the given data into the given
+ *                      register address.
+ * @usbdev: Pointer to the usb_device structure.
+ * @reg: Address of the register.
+ * @value: Value to write.
+ * @len: Length of data to be written.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_usb_reg_write(struct usb_device *usbdev,
+                            u32 reg,
+                            u16 value,
+                            u16 len)
+{
+       u8 usb_reg_buf[4];
+       int status = 0;
+
+       usb_reg_buf[0] = (value & 0x00ff);
+       usb_reg_buf[1] = (value & 0xff00) >> 8;
+       usb_reg_buf[2] = 0x0;
+       usb_reg_buf[3] = 0x0;
+
+       status = usb_control_msg(usbdev,
+                                usb_sndctrlpipe(usbdev, 0),
+                                USB_VENDOR_REGISTER_WRITE,
+                                USB_TYPE_VENDOR,
+                                ((reg & 0xffff0000) >> 16),
+                                (reg & 0xffff),
+                                (void *)usb_reg_buf,
+                                len,
+                                HZ * 5);
+       if (status < 0) {
+               rsi_dbg(ERR_ZONE,
+                       "%s: Reg write failed with error code :%d\n",
+                       __func__, status);
+       }
+       return status;
+}
+
+/**
+ * rsi_rx_done_handler() - This function is called when a packet is received
+ *                        from USB stack. This is callback to recieve done.
+ * @urb: Received URB.
+ *
+ * Return: None.
+ */
+static void rsi_rx_done_handler(struct urb *urb)
+{
+       struct rsi_hw *adapter = urb->context;
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+       if (urb->status)
+               return;
+
+       rsi_set_event(&dev->rx_thread.event);
+}
+
+/**
+ * rsi_rx_urb_submit() - This function submits the given URB to the USB stack.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_rx_urb_submit(struct rsi_hw *adapter)
+{
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       struct urb *urb = dev->rx_usb_urb[0];
+       int status;
+
+       usb_fill_bulk_urb(urb,
+                         dev->usbdev,
+                         usb_rcvbulkpipe(dev->usbdev,
+                               dev->bulkin_endpoint_addr),
+                         urb->transfer_buffer,
+                         3000,
+                         rsi_rx_done_handler,
+                         adapter);
+
+       status = usb_submit_urb(urb, GFP_KERNEL);
+       if (status)
+               rsi_dbg(ERR_ZONE, "%s: Failed in urb submission\n", __func__);
+
+       return status;
+}
+
+/**
+ * rsi_usb_write_register_multiple() - This function writes multiple bytes of
+ *                                    information to multiple registers.
+ * @adapter: Pointer to the adapter structure.
+ * @addr: Address of the register.
+ * @data: Pointer to the data that has to be written.
+ * @count: Number of multiple bytes to be written on to the registers.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+int rsi_usb_write_register_multiple(struct rsi_hw *adapter,
+                                   u32 addr,
+                                   u8 *data,
+                                   u32 count)
+{
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       u8 *buf;
+       u8 transfer;
+       int status = 0;
+
+       buf = kzalloc(4096, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       while (count) {
+               transfer = min_t(int, count, 4096);
+               memcpy(buf, data, transfer);
+               status = usb_control_msg(dev->usbdev,
+                                        usb_sndctrlpipe(dev->usbdev, 0),
+                                        USB_VENDOR_REGISTER_WRITE,
+                                        USB_TYPE_VENDOR,
+                                        ((addr & 0xffff0000) >> 16),
+                                        (addr & 0xffff),
+                                        (void *)buf,
+                                        transfer,
+                                        HZ * 5);
+               if (status < 0) {
+                       rsi_dbg(ERR_ZONE,
+                               "Reg write failed with error code :%d\n",
+                               status);
+               } else {
+                       count -= transfer;
+                       data += transfer;
+                       addr += transfer;
+               }
+       }
+
+       kfree(buf);
+       return 0;
+}
+
+/**
+ *rsi_usb_host_intf_write_pkt() - This function writes the packet to the
+ *                                USB card.
+ * @adapter: Pointer to the adapter structure.
+ * @pkt: Pointer to the data to be written on to the card.
+ * @len: Length of the data to be written on to the card.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter,
+                                      u8 *pkt,
+                                      u32 len)
+{
+       u32 queueno = ((pkt[1] >> 4) & 0xf);
+       u8 endpoint;
+
+       endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP);
+
+       return rsi_write_multiple(adapter,
+                                 endpoint,
+                                 (u8 *)pkt,
+                                 len);
+}
+
+/**
+ * rsi_deinit_usb_interface() - This function deinitializes the usb interface.
+ * @adapter: Pointer to the adapter structure.
+ *
+ * Return: None.
+ */
+static void rsi_deinit_usb_interface(struct rsi_hw *adapter)
+{
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+       rsi_kill_thread(&dev->rx_thread);
+       kfree(adapter->priv->rx_data_pkt);
+       kfree(dev->tx_buffer);
+}
+
+/**
+ * rsi_init_usb_interface() - This function initializes the usb interface.
+ * @adapter: Pointer to the adapter structure.
+ * @pfunction: Pointer to USB interface structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_init_usb_interface(struct rsi_hw *adapter,
+                                 struct usb_interface *pfunction)
+{
+       struct rsi_91x_usbdev *rsi_dev;
+       struct rsi_common *common = adapter->priv;
+       int status;
+
+       rsi_dev = kzalloc(sizeof(*rsi_dev), GFP_KERNEL);
+       if (!rsi_dev)
+               return -ENOMEM;
+
+       adapter->rsi_dev = rsi_dev;
+       rsi_dev->usbdev = interface_to_usbdev(pfunction);
+
+       if (rsi_find_bulk_in_and_out_endpoints(pfunction, adapter))
+               return -EINVAL;
+
+       adapter->device = &pfunction->dev;
+       usb_set_intfdata(pfunction, adapter);
+
+       common->rx_data_pkt = kmalloc(2048, GFP_KERNEL);
+       if (!common->rx_data_pkt) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to allocate memory\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       rsi_dev->tx_buffer = kmalloc(2048, GFP_ATOMIC);
+       rsi_dev->rx_usb_urb[0] = usb_alloc_urb(0, GFP_KERNEL);
+       rsi_dev->rx_usb_urb[0]->transfer_buffer = adapter->priv->rx_data_pkt;
+       rsi_dev->tx_blk_size = 252;
+
+       /* Initializing function callbacks */
+       adapter->rx_urb_submit = rsi_rx_urb_submit;
+       adapter->host_intf_write_pkt = rsi_usb_host_intf_write_pkt;
+       adapter->check_hw_queue_status = rsi_usb_check_queue_status;
+       adapter->determine_event_timeout = rsi_usb_event_timeout;
+
+       rsi_init_event(&rsi_dev->rx_thread.event);
+       status = rsi_create_kthread(common, &rsi_dev->rx_thread,
+                                   rsi_usb_rx_thread, "RX-Thread");
+       if (status) {
+               rsi_dbg(ERR_ZONE, "%s: Unable to init rx thrd\n", __func__);
+               goto fail;
+       }
+
+#ifdef CONFIG_RSI_DEBUGFS
+       /* In USB, one less than the MAX_DEBUGFS_ENTRIES entries is required */
+       adapter->num_debugfs_entries = (MAX_DEBUGFS_ENTRIES - 1);
+#endif
+
+       rsi_dbg(INIT_ZONE, "%s: Enabled the interface\n", __func__);
+       return 0;
+
+fail:
+       kfree(rsi_dev->tx_buffer);
+       kfree(common->rx_data_pkt);
+       return status;
+}
+
+/**
+ * rsi_probe() - This function is called by kernel when the driver provided
+ *              Vendor and device IDs are matched. All the initialization
+ *              work is done here.
+ * @pfunction: Pointer to the USB interface structure.
+ * @id: Pointer to the usb_device_id structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+static int rsi_probe(struct usb_interface *pfunction,
+                    const struct usb_device_id *id)
+{
+       struct rsi_hw *adapter;
+       struct rsi_91x_usbdev *dev;
+       u16 fw_status;
+
+       rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
+
+       adapter = rsi_91x_init();
+       if (!adapter) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to init os intf ops\n",
+                       __func__);
+               return 1;
+       }
+
+       if (rsi_init_usb_interface(adapter, pfunction)) {
+               rsi_dbg(ERR_ZONE, "%s: Failed to init usb interface\n",
+                       __func__);
+               goto err;
+       }
+
+       rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
+
+       dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+
+       if (rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2) < 0)
+               goto err1;
+       else
+               fw_status &= 1;
+
+       if (!fw_status) {
+               if (rsi_usb_device_init(adapter->priv)) {
+                       rsi_dbg(ERR_ZONE, "%s: Failed in device init\n",
+                               __func__);
+                       goto err1;
+               }
+
+               if (rsi_usb_reg_write(dev->usbdev,
+                                     USB_INTERNAL_REG_1,
+                                     RSI_USB_READY_MAGIC_NUM, 1) < 0)
+                       goto err1;
+               rsi_dbg(INIT_ZONE, "%s: Performed device init\n", __func__);
+       }
+
+       if (rsi_rx_urb_submit(adapter))
+               goto err1;
+
+       return 0;
+err1:
+       rsi_deinit_usb_interface(adapter);
+err:
+       rsi_91x_deinit(adapter);
+       rsi_dbg(ERR_ZONE, "%s: Failed in probe...Exiting\n", __func__);
+       return 1;
+}
+
+/**
+ * rsi_disconnect() - This function performs the reverse of the probe function,
+ *                   it deintialize the driver structure.
+ * @pfunction: Pointer to the USB interface structure.
+ *
+ * Return: None.
+ */
+static void rsi_disconnect(struct usb_interface *pfunction)
+{
+       struct rsi_hw *adapter = usb_get_intfdata(pfunction);
+
+       if (!adapter)
+               return;
+
+       rsi_mac80211_detach(adapter);
+       rsi_deinit_usb_interface(adapter);
+       rsi_91x_deinit(adapter);
+
+       rsi_dbg(INFO_ZONE, "%s: Deinitialization completed\n", __func__);
+}
+
+#ifdef CONFIG_PM
+static int rsi_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       /* Not yet implemented */
+       return -ENOSYS;
+}
+
+static int rsi_resume(struct usb_interface *intf)
+{
+       /* Not yet implemented */
+       return -ENOSYS;
+}
+#endif
+
+static const struct usb_device_id rsi_dev_table[] = {
+       { USB_DEVICE(0x0303, 0x0100) },
+       { USB_DEVICE(0x041B, 0x0301) },
+       { USB_DEVICE(0x041B, 0x0201) },
+       { USB_DEVICE(0x041B, 0x9330) },
+       { /* Blank */},
+};
+
+static struct usb_driver rsi_driver = {
+       .name       = "RSI-USB WLAN",
+       .probe      = rsi_probe,
+       .disconnect = rsi_disconnect,
+       .id_table   = rsi_dev_table,
+#ifdef CONFIG_PM
+       .suspend    = rsi_suspend,
+       .resume     = rsi_resume,
+#endif
+};
+
+/**
+ * rsi_module_init() - This function registers the client driver.
+ * @void: Void.
+ *
+ * Return: 0 on success.
+ */
+static int rsi_module_init(void)
+{
+       usb_register(&rsi_driver);
+       rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
+       return 0;
+}
+
+/**
+ * rsi_module_exit() - This function unregisters the client driver.
+ * @void: Void.
+ *
+ * Return: None.
+ */
+static void rsi_module_exit(void)
+{
+       usb_deregister(&rsi_driver);
+       rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
+}
+
+module_init(rsi_module_init);
+module_exit(rsi_module_exit);
+
+MODULE_AUTHOR("Redpine Signals Inc");
+MODULE_DESCRIPTION("Common USB layer for RSI drivers");
+MODULE_SUPPORTED_DEVICE("RSI-91x");
+MODULE_DEVICE_TABLE(usb, rsi_dev_table);
+MODULE_FIRMWARE(FIRMWARE_RSI9113);
+MODULE_VERSION("0.1");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
new file mode 100644 (file)
index 0000000..1106ce7
--- /dev/null
@@ -0,0 +1,177 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "rsi_usb.h"
+
+/**
+ * rsi_copy_to_card() - This function includes the actual funtionality of
+ *                     copying the TA firmware to the card.Basically this
+ *                     function includes opening the TA file,reading the TA
+ *                     file and writing their values in blocks of data.
+ * @common: Pointer to the driver private structure.
+ * @fw: Pointer to the firmware value to be written.
+ * @len: length of firmware file.
+ * @num_blocks: Number of blocks to be written to the card.
+ *
+ * Return: 0 on success and -1 on failure.
+ */
+static int rsi_copy_to_card(struct rsi_common *common,
+                           const u8 *fw,
+                           u32 len,
+                           u32 num_blocks)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       u32 indx, ii;
+       u32 block_size = dev->tx_blk_size;
+       u32 lsb_address;
+       u32 base_address;
+
+       base_address = TA_LOAD_ADDRESS;
+
+       for (indx = 0, ii = 0; ii < num_blocks; ii++, indx += block_size) {
+               lsb_address = base_address;
+               if (rsi_usb_write_register_multiple(adapter,
+                                                   lsb_address,
+                                                   (u8 *)(fw + indx),
+                                                   block_size)) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Unable to load %s blk\n", __func__,
+                               FIRMWARE_RSI9113);
+                       return -EIO;
+               }
+               rsi_dbg(INIT_ZONE, "%s: loading block: %d\n", __func__, ii);
+               base_address += block_size;
+       }
+
+       if (len % block_size) {
+               lsb_address = base_address;
+               if (rsi_usb_write_register_multiple(adapter,
+                                                   lsb_address,
+                                                   (u8 *)(fw + indx),
+                                                   len % block_size)) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Unable to load %s blk\n", __func__,
+                               FIRMWARE_RSI9113);
+                       return -EIO;
+               }
+       }
+       rsi_dbg(INIT_ZONE,
+               "%s: Succesfully loaded %s instructions\n", __func__,
+               FIRMWARE_RSI9113);
+
+       rsi_dbg(INIT_ZONE, "%s: loaded firmware\n", __func__);
+       return 0;
+}
+
+/**
+ * rsi_usb_rx_thread() - This is a kernel thread to receive the packets from
+ *                      the USB device.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: None.
+ */
+void rsi_usb_rx_thread(struct rsi_common *common)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       int status;
+
+       do {
+               rsi_wait_event(&dev->rx_thread.event, EVENT_WAIT_FOREVER);
+
+               if (atomic_read(&dev->rx_thread.thread_done))
+                       goto out;
+
+               mutex_lock(&common->tx_rxlock);
+               status = rsi_read_pkt(common, 0);
+               if (status) {
+                       rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__);
+                       mutex_unlock(&common->tx_rxlock);
+                       return;
+               }
+               mutex_unlock(&common->tx_rxlock);
+               rsi_reset_event(&dev->rx_thread.event);
+               if (adapter->rx_urb_submit(adapter)) {
+                       rsi_dbg(ERR_ZONE,
+                               "%s: Failed in urb submission", __func__);
+                       return;
+               }
+       } while (1);
+
+out:
+       rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
+       complete_and_exit(&dev->rx_thread.completion, 0);
+}
+
+
+/**
+ * rsi_load_ta_instructions() - This function includes the actual funtionality
+ *                             of loading the TA firmware.This function also
+ *                             includes opening the TA file,reading the TA
+ *                             file and writing their value in blocks of data.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: status: 0 on success, -1 on failure.
+ */
+static int rsi_load_ta_instructions(struct rsi_common *common)
+{
+       struct rsi_hw *adapter = common->priv;
+       struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
+       const struct firmware *fw_entry = NULL;
+       u32 block_size = dev->tx_blk_size;
+       const u8 *fw;
+       u32 num_blocks, len;
+       int status = 0;
+
+       status = request_firmware(&fw_entry, FIRMWARE_RSI9113, adapter->device);
+       if (status < 0) {
+               rsi_dbg(ERR_ZONE, "%s Firmware file %s not found\n",
+                       __func__, FIRMWARE_RSI9113);
+               return status;
+       }
+
+       fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       len = fw_entry->size;
+
+       if (len % 4)
+               len += (4 - (len % 4));
+
+       num_blocks = (len / block_size);
+
+       rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
+       rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+
+       status = rsi_copy_to_card(common, fw, len, num_blocks);
+       release_firmware(fw_entry);
+       return status;
+}
+
+/**
+ * rsi_device_init() - This Function Initializes The HAL.
+ * @common: Pointer to the driver private structure.
+ *
+ * Return: 0 on success, -1 on failure.
+ */
+int rsi_usb_device_init(struct rsi_common *common)
+{
+       if (rsi_load_ta_instructions(common))
+               return -EIO;
+
+       return 0;
+               }
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
new file mode 100644 (file)
index 0000000..5e2721f
--- /dev/null
@@ -0,0 +1,126 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_BOOTPARAMS_HEADER_H__
+#define __RSI_BOOTPARAMS_HEADER_H__
+
+#define CRYSTAL_GOOD_TIME                BIT(0)
+#define BOOTUP_MODE_INFO                 BIT(1)
+#define WIFI_TAPLL_CONFIGS               BIT(5)
+#define WIFI_PLL960_CONFIGS              BIT(6)
+#define WIFI_AFEPLL_CONFIGS              BIT(7)
+#define WIFI_SWITCH_CLK_CONFIGS          BIT(8)
+
+#define TA_PLL_M_VAL_20                  8
+#define TA_PLL_N_VAL_20                  1
+#define TA_PLL_P_VAL_20                  4
+
+#define PLL960_M_VAL_20                  0x14
+#define PLL960_N_VAL_20                  0
+#define PLL960_P_VAL_20                  5
+
+#define UMAC_CLK_40MHZ                   40
+
+#define TA_PLL_M_VAL_40                  46
+#define TA_PLL_N_VAL_40                  3
+#define TA_PLL_P_VAL_40                  3
+
+#define PLL960_M_VAL_40                  0x14
+#define PLL960_N_VAL_40                  0
+#define PLL960_P_VAL_40                  5
+
+#define UMAC_CLK_20BW \
+       (((TA_PLL_M_VAL_20 + 1) * 40) / \
+        ((TA_PLL_N_VAL_20 + 1) * (TA_PLL_P_VAL_20 + 1)))
+#define VALID_20 \
+       (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS)
+#define UMAC_CLK_40BW   \
+       (((TA_PLL_M_VAL_40 + 1) * 40) / \
+        ((TA_PLL_N_VAL_40 + 1) * (TA_PLL_P_VAL_40 + 1)))
+#define VALID_40 \
+       (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS | \
+        WIFI_TAPLL_CONFIGS | CRYSTAL_GOOD_TIME | BOOTUP_MODE_INFO)
+
+/* structure to store configs related to TAPLL programming */
+struct tapll_info {
+       __le16 pll_reg_1;
+       __le16 pll_reg_2;
+} __packed;
+
+/* structure to store configs related to PLL960 programming */
+struct pll960_info {
+       __le16 pll_reg_1;
+       __le16 pll_reg_2;
+       __le16 pll_reg_3;
+} __packed;
+
+/* structure to store configs related to AFEPLL programming */
+struct afepll_info {
+       __le16 pll_reg;
+} __packed;
+
+/* structure to store configs related to pll configs */
+struct pll_config {
+       struct tapll_info tapll_info_g;
+       struct pll960_info pll960_info_g;
+       struct afepll_info afepll_info_g;
+} __packed;
+
+/* structure to store configs related to UMAC clk programming */
+struct switch_clk {
+       __le16 switch_clk_info;
+       /* If switch_bbp_lmac_clk_reg is set then this value will be programmed
+        * into reg
+        */
+       __le16 bbp_lmac_clk_reg_val;
+       /* if switch_umac_clk is set then this value will be programmed */
+       __le16 umac_clock_reg_config;
+       /* if switch_qspi_clk is set then this value will be programmed */
+       __le16 qspi_uart_clock_reg_config;
+} __packed;
+
+struct device_clk_info {
+       struct pll_config pll_config_g;
+       struct switch_clk switch_clk_g;
+} __packed;
+
+struct bootup_params {
+       __le16 magic_number;
+       __le16 crystal_good_time;
+       __le32 valid;
+       __le32 reserved_for_valids;
+       __le16 bootup_mode_info;
+       /* configuration used for digital loop back */
+       __le16 digital_loop_back_params;
+       __le16 rtls_timestamp_en;
+       __le16 host_spi_intr_cfg;
+       struct device_clk_info device_clk_info[3];
+       /* ulp buckboost wait time  */
+       __le32 buckboost_wakeup_cnt;
+       /* pmu wakeup wait time & WDT EN info */
+       __le16 pmu_wakeup_wait;
+       u8 shutdown_wait_time;
+       /* Sleep clock source selection */
+       u8 pmu_slp_clkout_sel;
+       /* WDT programming values */
+       __le32 wdt_prog_value;
+       /* WDT soc reset delay */
+       __le32 wdt_soc_rst_delay;
+       /* dcdc modes configs */
+       __le32 dcdc_operation_mode;
+       __le32 soc_reset_wait_cnt;
+} __packed;
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h
new file mode 100644 (file)
index 0000000..f2f7078
--- /dev/null
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_COMMON_H__
+#define __RSI_COMMON_H__
+
+#include <linux/kthread.h>
+
+#define EVENT_WAIT_FOREVER              0
+#define TA_LOAD_ADDRESS                 0x00
+#define FIRMWARE_RSI9113                "rsi_91x.fw"
+#define QUEUE_NOT_FULL                  1
+#define QUEUE_FULL                      0
+
+static inline int rsi_init_event(struct rsi_event *pevent)
+{
+       atomic_set(&pevent->event_condition, 1);
+       init_waitqueue_head(&pevent->event_queue);
+       return 0;
+}
+
+static inline int rsi_wait_event(struct rsi_event *event, u32 timeout)
+{
+       int status = 0;
+
+       if (!timeout)
+               status = wait_event_interruptible(event->event_queue,
+                               (atomic_read(&event->event_condition) == 0));
+       else
+               status = wait_event_interruptible_timeout(event->event_queue,
+                               (atomic_read(&event->event_condition) == 0),
+                               timeout);
+       return status;
+}
+
+static inline void rsi_set_event(struct rsi_event *event)
+{
+       atomic_set(&event->event_condition, 0);
+       wake_up_interruptible(&event->event_queue);
+}
+
+static inline void rsi_reset_event(struct rsi_event *event)
+{
+       atomic_set(&event->event_condition, 1);
+}
+
+static inline int rsi_create_kthread(struct rsi_common *common,
+                                    struct rsi_thread *thread,
+                                    void *func_ptr,
+                                    u8 *name)
+{
+       init_completion(&thread->completion);
+       thread->task = kthread_run(func_ptr, common, name);
+       if (IS_ERR(thread->task))
+               return (int)PTR_ERR(thread->task);
+
+       return 0;
+}
+
+static inline int rsi_kill_thread(struct rsi_thread *handle)
+{
+       atomic_inc(&handle->thread_done);
+       rsi_set_event(&handle->event);
+
+       wait_for_completion(&handle->completion);
+       return kthread_stop(handle->task);
+}
+
+void rsi_mac80211_detach(struct rsi_hw *hw);
+u16 rsi_get_connected_channel(struct rsi_hw *adapter);
+struct rsi_hw *rsi_91x_init(void);
+void rsi_91x_deinit(struct rsi_hw *adapter);
+int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_debugfs.h b/drivers/net/wireless/rsi/rsi_debugfs.h
new file mode 100644 (file)
index 0000000..580ad3b
--- /dev/null
@@ -0,0 +1,48 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_DEBUGFS_H__
+#define __RSI_DEBUGFS_H__
+
+#include "rsi_main.h"
+#include <linux/debugfs.h>
+
+#ifndef CONFIG_RSI_DEBUGFS
+static inline int rsi_init_dbgfs(struct rsi_hw *adapter)
+{
+       return 0;
+}
+
+static inline void rsi_remove_dbgfs(struct rsi_hw *adapter)
+{
+       return;
+}
+#else
+struct rsi_dbg_files {
+       const char *name;
+       umode_t perms;
+       const struct file_operations fops;
+};
+
+struct rsi_debugfs {
+       struct dentry *subdir;
+       struct rsi_dbg_ops *dfs_get_ops;
+       struct dentry *rsi_files[MAX_DEBUGFS_ENTRIES];
+};
+int rsi_init_dbgfs(struct rsi_hw *adapter);
+void rsi_remove_dbgfs(struct rsi_hw *adapter);
+#endif
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
new file mode 100644 (file)
index 0000000..2cb73e7
--- /dev/null
@@ -0,0 +1,218 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_MAIN_H__
+#define __RSI_MAIN_H__
+
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#define ERR_ZONE                        BIT(0)  /* For Error Msgs             */
+#define INFO_ZONE                       BIT(1)  /* For General Status Msgs    */
+#define INIT_ZONE                       BIT(2)  /* For Driver Init Seq Msgs   */
+#define MGMT_TX_ZONE                    BIT(3)  /* For TX Mgmt Path Msgs      */
+#define MGMT_RX_ZONE                    BIT(4)  /* For RX Mgmt Path Msgs      */
+#define DATA_TX_ZONE                    BIT(5)  /* For TX Data Path Msgs      */
+#define DATA_RX_ZONE                    BIT(6)  /* For RX Data Path Msgs      */
+#define FSM_ZONE                        BIT(7)  /* For State Machine Msgs     */
+#define ISR_ZONE                        BIT(8)  /* For Interrupt Msgs         */
+
+#define FSM_CARD_NOT_READY              0
+#define FSM_BOOT_PARAMS_SENT            1
+#define FSM_EEPROM_READ_MAC_ADDR        2
+#define FSM_RESET_MAC_SENT              3
+#define FSM_RADIO_CAPS_SENT             4
+#define FSM_BB_RF_PROG_SENT             5
+#define FSM_MAC_INIT_DONE               6
+
+extern u32 rsi_zone_enabled;
+extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
+
+#define RSI_MAX_VIFS                    1
+#define NUM_EDCA_QUEUES                 4
+#define IEEE80211_ADDR_LEN              6
+#define FRAME_DESC_SZ                   16
+#define MIN_802_11_HDR_LEN              24
+
+#define DATA_QUEUE_WATER_MARK           400
+#define MIN_DATA_QUEUE_WATER_MARK       300
+#define MULTICAST_WATER_MARK            200
+#define MAC_80211_HDR_FRAME_CONTROL     0
+#define WME_NUM_AC                      4
+#define NUM_SOFT_QUEUES                 5
+#define MAX_HW_QUEUES                   8
+#define INVALID_QUEUE                   0xff
+#define MAX_CONTINUOUS_VO_PKTS          8
+#define MAX_CONTINUOUS_VI_PKTS          4
+
+/* Queue information */
+#define RSI_WIFI_MGMT_Q                 0x4
+#define RSI_WIFI_DATA_Q                 0x5
+#define IEEE80211_MGMT_FRAME            0x00
+#define IEEE80211_CTL_FRAME             0x04
+
+#define IEEE80211_QOS_TID               0x0f
+#define IEEE80211_NONQOS_TID            16
+
+#define MAX_DEBUGFS_ENTRIES             4
+
+#define TID_TO_WME_AC(_tid) (      \
+       ((_tid) == 0 || (_tid) == 3) ? BE_Q : \
+       ((_tid) < 3) ? BK_Q : \
+       ((_tid) < 6) ? VI_Q : \
+       VO_Q)
+
+#define WME_AC(_q) (    \
+       ((_q) == BK_Q) ? IEEE80211_AC_BK : \
+       ((_q) == BE_Q) ? IEEE80211_AC_BE : \
+       ((_q) == VI_Q) ? IEEE80211_AC_VI : \
+       IEEE80211_AC_VO)
+
+struct version_info {
+       u16 major;
+       u16 minor;
+       u16 release_num;
+       u16 patch_num;
+} __packed;
+
+struct skb_info {
+       s8 rssi;
+       u32 flags;
+       u16 channel;
+       s8 tid;
+       s8 sta_id;
+};
+
+enum edca_queue {
+       BK_Q,
+       BE_Q,
+       VI_Q,
+       VO_Q,
+       MGMT_SOFT_Q
+};
+
+struct security_info {
+       bool security_enable;
+       u32 ptk_cipher;
+       u32 gtk_cipher;
+};
+
+struct wmm_qinfo {
+       s32 weight;
+       s32 wme_params;
+       s32 pkt_contended;
+};
+
+struct transmit_q_stats {
+       u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 1];
+       u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 1];
+};
+
+struct vif_priv {
+       bool is_ht;
+       bool sgi;
+       u16 seq_start;
+};
+
+struct rsi_event {
+       atomic_t event_condition;
+       wait_queue_head_t event_queue;
+};
+
+struct rsi_thread {
+       void (*thread_function)(void *);
+       struct completion completion;
+       struct task_struct *task;
+       struct rsi_event event;
+       atomic_t thread_done;
+};
+
+struct rsi_hw;
+
+struct rsi_common {
+       struct rsi_hw *priv;
+       struct vif_priv vif_info[RSI_MAX_VIFS];
+
+       bool mgmt_q_block;
+       struct version_info driver_ver;
+       struct version_info fw_ver;
+
+       struct rsi_thread tx_thread;
+       struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1];
+       /* Mutex declaration */
+       struct mutex mutex;
+       /* Mutex used between tx/rx threads */
+       struct mutex tx_rxlock;
+       u8 endpoint;
+
+       /* Channel/band related */
+       u8 band;
+       u8 channel_width;
+
+       u16 rts_threshold;
+       u16 bitrate_mask[2];
+       u32 fixedrate_mask[2];
+
+       u8 rf_reset;
+       struct transmit_q_stats tx_stats;
+       struct security_info secinfo;
+       struct wmm_qinfo tx_qinfo[NUM_EDCA_QUEUES];
+       struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
+       u8 mac_addr[IEEE80211_ADDR_LEN];
+
+       /* state related */
+       u32 fsm_state;
+       bool init_done;
+       u8 bb_rf_prog_count;
+       bool iface_down;
+
+       /* Generic */
+       u8 channel;
+       u8 *rx_data_pkt;
+       u8 mac_id;
+       u8 radio_id;
+       u16 rate_pwr[20];
+       u16 min_rate;
+
+       /* WMM algo related */
+       u8 selected_qnum;
+       u32 pkt_cnt;
+       u8 min_weight;
+};
+
+struct rsi_hw {
+       struct rsi_common *priv;
+       struct ieee80211_hw *hw;
+       struct ieee80211_vif *vifs[RSI_MAX_VIFS];
+       struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
+       struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+
+       struct device *device;
+       u8 sc_nvifs;
+
+#ifdef CONFIG_RSI_DEBUGFS
+       struct rsi_debugfs *dfsentry;
+       u8 num_debugfs_entries;
+#endif
+       void *rsi_dev;
+       int (*host_intf_read_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+       int (*host_intf_write_pkt)(struct rsi_hw *adapter, u8 *pkt, u32 len);
+       int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num);
+       int (*rx_urb_submit)(struct rsi_hw *adapter);
+       int (*determine_event_timeout)(struct rsi_hw *adapter);
+};
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
new file mode 100644 (file)
index 0000000..ac67c4a
--- /dev/null
@@ -0,0 +1,285 @@
+/**
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_MGMT_H__
+#define __RSI_MGMT_H__
+
+#include <linux/sort.h>
+#include "rsi_boot_params.h"
+#include "rsi_main.h"
+
+#define MAX_MGMT_PKT_SIZE               512
+#define RSI_NEEDED_HEADROOM             80
+#define RSI_RCV_BUFFER_LEN              2000
+
+#define RSI_11B_MODE                    0
+#define RSI_11G_MODE                    BIT(7)
+#define RETRY_COUNT                     8
+#define RETRY_LONG                      4
+#define RETRY_SHORT                     7
+#define WMM_SHORT_SLOT_TIME             9
+#define SIFS_DURATION                   16
+
+#define KEY_TYPE_CLEAR                  0
+#define RSI_PAIRWISE_KEY                1
+#define RSI_GROUP_KEY                   2
+
+/* EPPROM_READ_ADDRESS */
+#define WLAN_MAC_EEPROM_ADDR            40
+#define WLAN_MAC_MAGIC_WORD_LEN         0x01
+#define WLAN_HOST_MODE_LEN              0x04
+#define WLAN_FW_VERSION_LEN             0x08
+#define MAGIC_WORD                      0x5A
+
+/* Receive Frame Types */
+#define TA_CONFIRM_TYPE                 0x01
+#define RX_DOT11_MGMT                   0x02
+#define TX_STATUS_IND                   0x04
+#define PROBEREQ_CONFIRM                2
+#define CARD_READY_IND                  0x00
+
+#define RSI_DELETE_PEER                 0x0
+#define RSI_ADD_PEER                    0x1
+#define START_AMPDU_AGGR                0x1
+#define STOP_AMPDU_AGGR                 0x0
+#define INTERNAL_MGMT_PKT               0x99
+
+#define PUT_BBP_RESET                   0
+#define BBP_REG_WRITE                   0
+#define RF_RESET_ENABLE                 BIT(3)
+#define RATE_INFO_ENABLE                BIT(0)
+#define RSI_BROADCAST_PKT               BIT(9)
+
+#define UPPER_20_ENABLE                 (0x2 << 12)
+#define LOWER_20_ENABLE                 (0x4 << 12)
+#define FULL40M_ENABLE                  0x6
+
+#define RSI_LMAC_CLOCK_80MHZ            0x1
+#define RSI_ENABLE_40MHZ                (0x1 << 3)
+
+#define RX_BA_INDICATION                1
+#define RSI_TBL_SZ                      40
+#define MAX_RETRIES                     8
+
+#define STD_RATE_MCS7                   0x07
+#define STD_RATE_MCS6                   0x06
+#define STD_RATE_MCS5                   0x05
+#define STD_RATE_MCS4                   0x04
+#define STD_RATE_MCS3                   0x03
+#define STD_RATE_MCS2                   0x02
+#define STD_RATE_MCS1                   0x01
+#define STD_RATE_MCS0                   0x00
+#define STD_RATE_54                     0x6c
+#define STD_RATE_48                     0x60
+#define STD_RATE_36                     0x48
+#define STD_RATE_24                     0x30
+#define STD_RATE_18                     0x24
+#define STD_RATE_12                     0x18
+#define STD_RATE_11                     0x16
+#define STD_RATE_09                     0x12
+#define STD_RATE_06                     0x0C
+#define STD_RATE_5_5                    0x0B
+#define STD_RATE_02                     0x04
+#define STD_RATE_01                     0x02
+
+#define RSI_RF_TYPE                     1
+#define RSI_RATE_00                     0x00
+#define RSI_RATE_1                      0x0
+#define RSI_RATE_2                      0x2
+#define RSI_RATE_5_5                    0x4
+#define RSI_RATE_11                     0x6
+#define RSI_RATE_6                      0x8b
+#define RSI_RATE_9                      0x8f
+#define RSI_RATE_12                     0x8a
+#define RSI_RATE_18                     0x8e
+#define RSI_RATE_24                     0x89
+#define RSI_RATE_36                     0x8d
+#define RSI_RATE_48                     0x88
+#define RSI_RATE_54                     0x8c
+#define RSI_RATE_MCS0                   0x100
+#define RSI_RATE_MCS1                   0x101
+#define RSI_RATE_MCS2                   0x102
+#define RSI_RATE_MCS3                   0x103
+#define RSI_RATE_MCS4                   0x104
+#define RSI_RATE_MCS5                   0x105
+#define RSI_RATE_MCS6                   0x106
+#define RSI_RATE_MCS7                   0x107
+#define RSI_RATE_MCS7_SG                0x307
+
+#define BW_20MHZ                        0
+#define BW_40MHZ                        1
+
+#define RSI_SUPP_FILTERS       (FIF_ALLMULTI | FIF_PROBE_REQ |\
+                                FIF_BCN_PRBRESP_PROMISC)
+enum opmode {
+       STA_OPMODE = 1,
+       AP_OPMODE = 2
+};
+
+extern struct ieee80211_rate rsi_rates[12];
+extern const u16 rsi_mcsrates[8];
+
+enum sta_notify_events {
+       STA_CONNECTED = 0,
+       STA_DISCONNECTED,
+       STA_TX_ADDBA_DONE,
+       STA_TX_DELBA,
+       STA_RX_ADDBA_DONE,
+       STA_RX_DELBA
+};
+
+/* Send Frames Types */
+enum cmd_frame_type {
+       TX_DOT11_MGMT,
+       RESET_MAC_REQ,
+       RADIO_CAPABILITIES,
+       BB_PROG_VALUES_REQUEST,
+       RF_PROG_VALUES_REQUEST,
+       WAKEUP_SLEEP_REQUEST,
+       SCAN_REQUEST,
+       TSF_UPDATE,
+       PEER_NOTIFY,
+       BLOCK_UNBLOCK,
+       SET_KEY_REQ,
+       AUTO_RATE_IND,
+       BOOTUP_PARAMS_REQUEST,
+       VAP_CAPABILITIES,
+       EEPROM_READ_TYPE ,
+       EEPROM_WRITE,
+       GPIO_PIN_CONFIG ,
+       SET_RX_FILTER,
+       AMPDU_IND,
+       STATS_REQUEST_FRAME,
+       BB_BUF_PROG_VALUES_REQ,
+       BBP_PROG_IN_TA,
+       BG_SCAN_PARAMS,
+       BG_SCAN_PROBE_REQ,
+       CW_MODE_REQ,
+       PER_CMD_PKT
+};
+
+struct rsi_mac_frame {
+       __le16 desc_word[8];
+} __packed;
+
+struct rsi_boot_params {
+       __le16 desc_word[8];
+       struct bootup_params bootup_params;
+} __packed;
+
+struct rsi_peer_notify {
+       __le16 desc_word[8];
+       u8 mac_addr[6];
+       __le16 command;
+       __le16 mpdu_density;
+       __le16 reserved;
+       __le32 sta_flags;
+} __packed;
+
+struct rsi_vap_caps {
+       __le16 desc_word[8];
+       u8 mac_addr[6];
+       __le16 keep_alive_period;
+       u8 bssid[6];
+       __le16 reserved;
+       __le32 flags;
+       __le16 frag_threshold;
+       __le16 rts_threshold;
+       __le32 default_mgmt_rate;
+       __le32 default_ctrl_rate;
+       __le32 default_data_rate;
+       __le16 beacon_interval;
+       __le16 dtim_period;
+} __packed;
+
+struct rsi_set_key {
+       __le16 desc_word[8];
+       u8 key[4][32];
+       u8 tx_mic_key[8];
+       u8 rx_mic_key[8];
+} __packed;
+
+struct rsi_auto_rate {
+       __le16 desc_word[8];
+       __le16 failure_limit;
+       __le16 initial_boundary;
+       __le16 max_threshold_limt;
+       __le16 num_supported_rates;
+       __le16 aarf_rssi;
+       __le16 moderate_rate_inx;
+       __le16 collision_tolerance;
+       __le16 supported_rates[40];
+} __packed;
+
+struct qos_params {
+       __le16 cont_win_min_q;
+       __le16 cont_win_max_q;
+       __le16 aifsn_val_q;
+       __le16 txop_q;
+} __packed;
+
+struct rsi_radio_caps {
+       __le16 desc_word[8];
+       struct qos_params qos_params[MAX_HW_QUEUES];
+       u8 num_11n_rates;
+       u8 num_11ac_rates;
+       __le16 gcpd_per_rate[20];
+} __packed;
+
+static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
+{
+       return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
+}
+
+static inline u32 rsi_get_length(u8 *addr, u16 offset)
+{
+       return (le16_to_cpu(*(__le16 *)&addr[offset])) & 0x0fff;
+}
+
+static inline u8 rsi_get_extended_desc(u8 *addr, u16 offset)
+{
+       return le16_to_cpu(*((__le16 *)&addr[offset + 4])) & 0x00ff;
+}
+
+static inline u8 rsi_get_rssi(u8 *addr)
+{
+       return *(u8 *)(addr + FRAME_DESC_SZ);
+}
+
+static inline u8 rsi_get_channel(u8 *addr)
+{
+       return *(char *)(addr + 15);
+}
+
+int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg);
+int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode);
+int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid,
+                                     u16 ssn, u8 buf_size, u8 event);
+int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len,
+                    u8 key_type, u8 key_id, u32 cipher);
+int rsi_set_channel(struct rsi_common *common, u16 chno);
+void rsi_inform_bss_status(struct rsi_common *common, u8 status,
+                          const u8 *bssid, u8 qos_enable, u16 aid);
+void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb);
+int rsi_mac80211_attach(struct rsi_common *common);
+void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb,
+                           int status);
+bool rsi_is_cipher_wep(struct rsi_common *common);
+void rsi_core_qos_processor(struct rsi_common *common);
+void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb);
+int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb);
+int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
new file mode 100644 (file)
index 0000000..df4b5e2
--- /dev/null
@@ -0,0 +1,129 @@
+/**
+ * @section LICENSE
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#ifndef __RSI_SDIO_INTF__
+#define __RSI_SDIO_INTF__
+
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/mmc/sdio_ids.h>
+#include "rsi_main.h"
+
+enum sdio_interrupt_type {
+       BUFFER_FULL         = 0x0,
+       BUFFER_AVAILABLE    = 0x1,
+       FIRMWARE_ASSERT_IND = 0x3,
+       MSDU_PACKET_PENDING = 0x4,
+       UNKNOWN_INT         = 0XE
+};
+
+/* Buffer status register related info */
+#define PKT_BUFF_SEMI_FULL                      0
+#define PKT_BUFF_FULL                           1
+#define PKT_MGMT_BUFF_FULL                      2
+#define MSDU_PKT_PENDING                        3
+/* Interrupt Bit Related Macros */
+#define PKT_BUFF_AVAILABLE                      0
+#define FW_ASSERT_IND                           2
+
+#define RSI_DEVICE_BUFFER_STATUS_REGISTER       0xf3
+#define RSI_FN1_INT_REGISTER                    0xf9
+#define RSI_SD_REQUEST_MASTER                   0x10000
+
+/* FOR SD CARD ONLY */
+#define SDIO_RX_NUM_BLOCKS_REG                  0x000F1
+#define SDIO_FW_STATUS_REG                      0x000F2
+#define SDIO_NXT_RD_DELAY2                      0x000F5
+#define SDIO_MASTER_ACCESS_MSBYTE               0x000FA
+#define SDIO_MASTER_ACCESS_LSBYTE               0x000FB
+#define SDIO_READ_START_LVL                     0x000FC
+#define SDIO_READ_FIFO_CTL                      0x000FD
+#define SDIO_WRITE_FIFO_CTL                     0x000FE
+#define SDIO_FUN1_INTR_CLR_REG                  0x0008
+#define SDIO_REG_HIGH_SPEED                     0x0013
+
+#define RSI_GET_SDIO_INTERRUPT_TYPE(_I, TYPE)      \
+       {                                          \
+               TYPE =                             \
+               (_I & (1 << PKT_BUFF_AVAILABLE)) ? \
+               BUFFER_AVAILABLE :                 \
+               (_I & (1 << MSDU_PKT_PENDING)) ?   \
+               MSDU_PACKET_PENDING :              \
+               (_I & (1 << FW_ASSERT_IND)) ?      \
+               FIRMWARE_ASSERT_IND : UNKNOWN_INT; \
+       }
+
+/* common registers in SDIO function1 */
+#define TA_SOFT_RESET_REG            0x0004
+#define TA_TH0_PC_REG                0x0400
+#define TA_HOLD_THREAD_REG           0x0844
+#define TA_RELEASE_THREAD_REG        0x0848
+
+#define TA_SOFT_RST_CLR              0
+#define TA_SOFT_RST_SET              BIT(0)
+#define TA_PC_ZERO                   0
+#define TA_HOLD_THREAD_VALUE         cpu_to_le32(0xF)
+#define TA_RELEASE_THREAD_VALUE      cpu_to_le32(0xF)
+#define TA_BASE_ADDR                 0x2200
+#define MISC_CFG_BASE_ADDR           0x4150
+
+struct receive_info {
+       bool buffer_full;
+       bool semi_buffer_full;
+       bool mgmt_buffer_full;
+       u32 mgmt_buf_full_counter;
+       u32 buf_semi_full_counter;
+       u8 watch_bufferfull_count;
+       u32 sdio_intr_status_zero;
+       u32 sdio_int_counter;
+       u32 total_sdio_msdu_pending_intr;
+       u32 total_sdio_unknown_intr;
+       u32 buf_full_counter;
+       u32 buf_avilable_counter;
+};
+
+struct rsi_91x_sdiodev {
+       struct sdio_func *pfunction;
+       struct task_struct *in_sdio_litefi_irq;
+       struct receive_info rx_info;
+       u32 next_read_delay;
+       u32 sdio_high_speed_enable;
+       u8 sdio_clock_speed;
+       u32 cardcapability;
+       u8 prev_desc[16];
+       u32 tx_blk_size;
+       u8 write_fail;
+};
+
+void rsi_interrupt_handler(struct rsi_hw *adapter);
+int rsi_init_sdio_slave_regs(struct rsi_hw *adapter);
+int rsi_sdio_device_init(struct rsi_common *common);
+int rsi_sdio_read_register(struct rsi_hw *adapter, u32 addr, u8 *data);
+int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter, u8 *pkt, u32 length);
+int rsi_sdio_write_register(struct rsi_hw *adapter, u8 function,
+                           u32 addr, u8 *data);
+int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u32 addr,
+                                    u8 *data, u32 count);
+void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit);
+int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter);
+int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num);
+#endif
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
new file mode 100644 (file)
index 0000000..ebea0c4
--- /dev/null
@@ -0,0 +1,68 @@
+/**
+ * @section LICENSE
+ * Copyright (c) 2014 Redpine Signals Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __RSI_USB_INTF__
+#define __RSI_USB_INTF__
+
+#include <linux/usb.h>
+#include "rsi_main.h"
+#include "rsi_common.h"
+
+#define USB_INTERNAL_REG_1           0x25000
+#define RSI_USB_READY_MAGIC_NUM      0xab
+#define FW_STATUS_REG                0x41050012
+
+#define USB_VENDOR_REGISTER_READ     0x15
+#define USB_VENDOR_REGISTER_WRITE    0x16
+#define RSI_USB_TX_HEAD_ROOM         128
+
+#define MAX_RX_URBS                  1
+#define MAX_BULK_EP                  8
+#define MGMT_EP                      1
+#define DATA_EP                      2
+
+struct rsi_91x_usbdev {
+       struct rsi_thread rx_thread;
+       u8 endpoint;
+       struct usb_device *usbdev;
+       struct usb_interface *pfunction;
+       struct urb *rx_usb_urb[MAX_RX_URBS];
+       u8 *tx_buffer;
+       __le16 bulkin_size;
+       u8 bulkin_endpoint_addr;
+       __le16 bulkout_size[MAX_BULK_EP];
+       u8 bulkout_endpoint_addr[MAX_BULK_EP];
+       u32 tx_blk_size;
+       u8 write_fail;
+};
+
+static inline int rsi_usb_check_queue_status(struct rsi_hw *adapter, u8 q_num)
+{
+       /* In USB, there isn't any need to check the queue status */
+       return QUEUE_NOT_FULL;
+}
+
+static inline int rsi_usb_event_timeout(struct rsi_hw *adapter)
+{
+       return EVENT_WAIT_FOREVER;
+}
+
+int rsi_usb_device_init(struct rsi_common *common);
+int rsi_usb_write_register_multiple(struct rsi_hw *adapter, u32 addr,
+                                   u8 *data, u32 count);
+void rsi_usb_rx_thread(struct rsi_common *common);
+#endif
index caddc1b427a919659200c539552a619af49c5d17..a49c3d73ea2c9a21679e0dc52f746be2b1089c4a 100644 (file)
@@ -125,9 +125,9 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
 
        tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
        if (unlikely(tout))
-               rt2x00_warn(entry->queue->rt2x00dev,
-                           "TX status timeout for entry %d in queue %d\n",
-                           entry->entry_idx, entry->queue->qid);
+               rt2x00_dbg(entry->queue->rt2x00dev,
+                          "TX status timeout for entry %d in queue %d\n",
+                          entry->entry_idx, entry->queue->qid);
        return tout;
 
 }
@@ -566,8 +566,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
                queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
 
                if (unlikely(rt2x00queue_empty(queue))) {
-                       rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
-                                   qid);
+                       rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+                                  qid);
                        break;
                }
 
@@ -764,7 +764,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        /*
         * Overwrite TX done handler
         */
-       PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
+       INIT_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
 
        return 0;
 }
index 2e3d1645e68bb68fb827070f126beaefb1b7e3c6..90fdb02b55e79debd5bd89b31104e90a260c596c 100644 (file)
@@ -286,7 +286,7 @@ static ssize_t rt2x00debug_read_queue_dump(struct file *file,
        if (retval)
                return retval;
 
-       status = min((size_t)skb->len, length);
+       status = min_t(size_t, skb->len, length);
        if (copy_to_user(buf, skb->data, status)) {
                status = -EFAULT;
                goto exit;
index 30332175bcd8d6abaa788a5da5f7f1d7ca5c1fbe..1ce1d55f0010b48b9360a2716ad1587f060e7971 100644 (file)
@@ -2,11 +2,11 @@
 # RTL818X Wireless LAN device configuration
 #
 config RTL8180
-       tristate "Realtek 8180/8185 PCI support"
+       tristate "Realtek 8180/8185/8187SE PCI support"
        depends on MAC80211 && PCI
        select EEPROM_93CX6
        ---help---
-         This is a driver for RTL8180 and RTL8185 based cards.
+         This is a driver for RTL8180, RTL8185 and RTL8187SE based cards.
          These are PCI based chips found in cards such as:
 
          (RTL8185 802.11g)
index cb4fb8596f0badeada10a7dd8975888990e90c75..08b056db4a3b795282d1d9e878ab400d7090bcf2 100644 (file)
@@ -1,4 +1,4 @@
-rtl8180-objs           := dev.o rtl8225.o sa2400.o max2820.o grf5101.o
+rtl8180-objs           := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
 
 obj-$(CONFIG_RTL8180)  += rtl8180.o
 
index 3867d1470b36aef5664bcab74e85f38702c8315d..98d8256f037788a4d9af76c02a4e939758a08e0e 100644 (file)
@@ -1,15 +1,42 @@
 
-/*
- * Linux device driver for RTL8180 / RTL8185
+/* Linux device driver for RTL8180 / RTL8185 / RTL8187SE
  *
  * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ * Copyright 2007,2014 Andrea Merello <andrea.merello@gmail.com>
  *
  * Based on the r8180 driver, which is:
  * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
  *
  * Thanks to Realtek for their support!
  *
+ ************************************************************************
+ *
+ * The driver was extended to the RTL8187SE in 2014 by
+ * Andrea Merello <andrea.merello@gmail.com>
+ *
+ * based also on:
+ *  - portions of rtl8187se Linux staging driver, Copyright Realtek corp.
+ *  - other GPL, unpublished (until now), Linux driver code,
+ *    Copyright Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ * A huge thanks goes to Sara V. Nari who forgives me when I'm
+ * sitting in front of my laptop at evening, week-end, night...
+ *
+ * A special thanks goes to Antonio Cuni, who helped me with
+ * some python userspace stuff I used to debug RTL8187SE code, and who
+ * bought a laptop with an unsupported Wi-Fi card some years ago...
+ *
+ * Thanks to Larry Finger for writing some code for rtl8187se and for
+ * his suggestions.
+ *
+ * Thanks to Dan Carpenter for reviewing my initial patch and for his
+ * suggestions.
+ *
+ * Thanks to Bernhard Schiffner for his help in testing and for his
+ * suggestions.
+ *
+ ************************************************************************
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
 #include "sa2400.h"
 #include "max2820.h"
 #include "grf5101.h"
+#include "rtl8225se.h"
 
 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
 MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
-MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
+MODULE_DESCRIPTION("RTL8180 / RTL8185 / RTL8187SE PCI wireless driver");
 MODULE_LICENSE("GPL");
 
 static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
+
+       /* rtl8187se */
+       { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8199) },
+
        /* rtl8185 */
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
        { PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -85,6 +117,76 @@ static const struct ieee80211_channel rtl818x_channels[] = {
        { .center_freq = 2484 },
 };
 
+/* Queues for rtl8187se card
+ *
+ * name | reg  |  queue
+ *  BC  |  7   |   6
+ *  MG  |  1   |   0
+ *  HI  |  6   |   1
+ *  VO  |  5   |   2
+ *  VI  |  4   |   3
+ *  BE  |  3   |   4
+ *  BK  |  2   |   5
+ *
+ * The complete map for DMA kick reg using use all queue is:
+ * static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] =
+ *     {1, 6, 5, 4, 3, 2, 7};
+ *
+ * .. but.. Because for mac80211 4 queues are enough for QoS we use this
+ *
+ * name | reg  |  queue
+ *  BC  |  7   |   4  <- currently not used yet
+ *  MG  |  1   |   x  <- Not used
+ *  HI  |  6   |   x  <- Not used
+ *  VO  |  5   |   0  <- used
+ *  VI  |  4   |   1  <- used
+ *  BE  |  3   |   2  <- used
+ *  BK  |  2   |   3  <- used
+ *
+ * Beacon queue could be used, but this is not finished yet.
+ *
+ * I thougth about using the other two queues but I decided not to do this:
+ *
+ * - I'm unsure whether the mac80211 will ever try to use more than 4 queues
+ *   by itself.
+ *
+ * - I could route MGMT frames (currently sent over VO queue) to the MGMT
+ *   queue but since mac80211 will do not know about it, I will probably gain
+ *   some HW priority whenever the VO queue is not empty, but this gain is
+ *   limited by the fact that I had to stop the mac80211 queue whenever one of
+ *   the VO or MGMT queues is full, stopping also submitting of MGMT frame
+ *   to the driver.
+ *
+ * - I don't know how to set in the HW the contention window params for MGMT
+ *   and HI-prio queues.
+ */
+
+static const int rtl8187se_queues_map[RTL8187SE_NR_TX_QUEUES] = {5, 4, 3, 2, 7};
+
+/* Queues for rtl8180/rtl8185 cards
+ *
+ * name | reg  |  prio
+ *  BC  |  7   |   3
+ *  HI  |  6   |   0
+ *  NO  |  5   |   1
+ *  LO  |  4   |   2
+ *
+ * The complete map for DMA kick reg using all queue is:
+ * static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {6, 5, 4, 7};
+ *
+ * .. but .. Because the mac80211 needs at least 4 queues for QoS or
+ * otherwise QoS can't be done, we use just one.
+ * Beacon queue could be used, but this is not finished yet.
+ * Actual map is:
+ *
+ * name | reg  |  prio
+ *  BC  |  7   |   1  <- currently not used yet.
+ *  HI  |  6   |   x  <- not used
+ *  NO  |  5   |   x  <- not used
+ *  LO  |  4   |   0  <- used
+ */
+
+static const int rtl8180_queues_map[RTL8180_NR_TX_QUEUES] = {4, 7};
 
 void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
 {
@@ -105,14 +207,30 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
 static void rtl8180_handle_rx(struct ieee80211_hw *dev)
 {
        struct rtl8180_priv *priv = dev->priv;
+       struct rtl818x_rx_cmd_desc *cmd_desc;
        unsigned int count = 32;
        u8 signal, agc, sq;
        dma_addr_t mapping;
 
        while (count--) {
-               struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
+               void *entry = priv->rx_ring + priv->rx_idx * priv->rx_ring_sz;
                struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
-               u32 flags = le32_to_cpu(entry->flags);
+               u32 flags, flags2;
+               u64 tsft;
+
+               if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+                       struct rtl8187se_rx_desc *desc = entry;
+
+                       flags = le32_to_cpu(desc->flags);
+                       flags2 = le32_to_cpu(desc->flags2);
+                       tsft = le64_to_cpu(desc->tsft);
+               } else {
+                       struct rtl8180_rx_desc *desc = entry;
+
+                       flags = le32_to_cpu(desc->flags);
+                       flags2 = le32_to_cpu(desc->flags2);
+                       tsft = le64_to_cpu(desc->tsft);
+               }
 
                if (flags & RTL818X_RX_DESC_FLAG_OWN)
                        return;
@@ -122,7 +240,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                                      RTL818X_RX_DESC_FLAG_RX_ERR)))
                        goto done;
                else {
-                       u32 flags2 = le32_to_cpu(entry->flags2);
                        struct ieee80211_rx_status rx_status = {0};
                        struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_SIZE);
 
@@ -148,19 +265,24 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                        rx_status.antenna = (flags2 >> 15) & 1;
                        rx_status.rate_idx = (flags >> 20) & 0xF;
                        agc = (flags2 >> 17) & 0x7F;
-                       if (priv->r8185) {
+
+                       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
                                if (rx_status.rate_idx > 3)
                                        signal = 90 - clamp_t(u8, agc, 25, 90);
                                else
                                        signal = 95 - clamp_t(u8, agc, 30, 95);
-                       } else {
+                       } else if (priv->chip_family ==
+                                  RTL818X_CHIP_FAMILY_RTL8180) {
                                sq = flags2 & 0xff;
                                signal = priv->rf->calc_rssi(agc, sq);
+                       } else {
+                               /* TODO: rtl8187se rssi */
+                               signal = 10;
                        }
                        rx_status.signal = signal;
                        rx_status.freq = dev->conf.chandef.chan->center_freq;
                        rx_status.band = dev->conf.chandef.chan->band;
-                       rx_status.mactime = le64_to_cpu(entry->tsft);
+                       rx_status.mactime = tsft;
                        rx_status.flag |= RX_FLAG_MACTIME_START;
                        if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
                                rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -174,11 +296,13 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
                }
 
        done:
-               entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
-               entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
+               cmd_desc = entry;
+               cmd_desc->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
+               cmd_desc->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
                                           MAX_RX_SIZE);
                if (priv->rx_idx == 31)
-                       entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
+                       cmd_desc->flags |=
+                               cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
                priv->rx_idx = (priv->rx_idx + 1) % 32;
        }
 }
@@ -218,6 +342,55 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
        }
 }
 
+static irqreturn_t rtl8187se_interrupt(int irq, void *dev_id)
+{
+       struct ieee80211_hw *dev = dev_id;
+       struct rtl8180_priv *priv = dev->priv;
+       u32 reg;
+       unsigned long flags;
+       static int desc_err;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       /* Note: 32-bit interrupt status */
+       reg = rtl818x_ioread32(priv, &priv->map->INT_STATUS_SE);
+       if (unlikely(reg == 0xFFFFFFFF)) {
+               spin_unlock_irqrestore(&priv->lock, flags);
+               return IRQ_HANDLED;
+       }
+
+       rtl818x_iowrite32(priv, &priv->map->INT_STATUS_SE, reg);
+
+       if (reg & IMR_TIMEOUT1)
+               rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
+
+       if (reg & (IMR_TBDOK | IMR_TBDER))
+               rtl8180_handle_tx(dev, 4);
+
+       if (reg & (IMR_TVODOK | IMR_TVODER))
+               rtl8180_handle_tx(dev, 0);
+
+       if (reg & (IMR_TVIDOK | IMR_TVIDER))
+               rtl8180_handle_tx(dev, 1);
+
+       if (reg & (IMR_TBEDOK | IMR_TBEDER))
+               rtl8180_handle_tx(dev, 2);
+
+       if (reg & (IMR_TBKDOK | IMR_TBKDER))
+               rtl8180_handle_tx(dev, 3);
+
+       if (reg & (IMR_ROK | IMR_RER | RTL818X_INT_SE_RX_DU | IMR_RQOSOK))
+               rtl8180_handle_rx(dev);
+       /* The interface sometimes generates several RX DMA descriptor errors
+        * at startup. Do not report these.
+        */
+       if ((reg & RTL818X_INT_SE_RX_DU) && desc_err++ > 2)
+               if (net_ratelimit())
+                       wiphy_err(dev->wiphy, "No RX DMA Descriptor avail\n");
+
+       spin_unlock_irqrestore(&priv->lock, flags);
+       return IRQ_HANDLED;
+}
+
 static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
 {
        struct ieee80211_hw *dev = dev_id;
@@ -234,12 +407,6 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
        rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg);
 
        if (reg & (RTL818X_INT_TXB_OK | RTL818X_INT_TXB_ERR))
-               rtl8180_handle_tx(dev, 3);
-
-       if (reg & (RTL818X_INT_TXH_OK | RTL818X_INT_TXH_ERR))
-               rtl8180_handle_tx(dev, 2);
-
-       if (reg & (RTL818X_INT_TXN_OK | RTL818X_INT_TXN_ERR))
                rtl8180_handle_tx(dev, 1);
 
        if (reg & (RTL818X_INT_TXL_OK | RTL818X_INT_TXL_ERR))
@@ -263,12 +430,14 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
        struct rtl8180_tx_ring *ring;
        struct rtl8180_tx_desc *entry;
        unsigned long flags;
-       unsigned int idx, prio;
+       unsigned int idx, prio, hw_prio;
        dma_addr_t mapping;
        u32 tx_flags;
        u8 rc_flags;
        u16 plcp_len = 0;
        __le16 rts_duration = 0;
+       /* do arithmetic and then convert to le16 */
+       u16 frame_duration = 0;
 
        prio = skb_get_queue_mapping(skb);
        ring = &priv->tx_ring[prio];
@@ -280,7 +449,6 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
                kfree_skb(skb);
                dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
                return;
-
        }
 
        tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
@@ -288,7 +456,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
                   (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
                   skb->len;
 
-       if (priv->r8185)
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
                tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
                            RTL818X_TX_DESC_FLAG_NO_ENC;
 
@@ -305,7 +473,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
                rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
                                                      info);
 
-       if (!priv->r8185) {
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
                unsigned int remainder;
 
                plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
@@ -316,6 +484,18 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
                        plcp_len |= 1 << 15;
        }
 
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               __le16 duration;
+               /* SIFS time (required by HW) is already included by
+                * ieee80211_generic_frame_duration
+                */
+               duration = ieee80211_generic_frame_duration(dev, priv->vif,
+                                       IEEE80211_BAND_2GHZ, skb->len,
+                                       ieee80211_get_tx_rate(dev, info));
+
+               frame_duration =  priv->ack_time + le16_to_cpu(duration);
+       }
+
        spin_lock_irqsave(&priv->lock, flags);
 
        if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -328,21 +508,91 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
        idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
        entry = &ring->desc[idx];
 
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               entry->frame_duration = cpu_to_le16(frame_duration);
+               entry->frame_len_se = cpu_to_le16(skb->len);
+
+               /* tpc polarity */
+               entry->flags3 = cpu_to_le16(1<<4);
+       } else
+               entry->frame_len = cpu_to_le32(skb->len);
+
        entry->rts_duration = rts_duration;
        entry->plcp_len = cpu_to_le16(plcp_len);
        entry->tx_buf = cpu_to_le32(mapping);
-       entry->frame_len = cpu_to_le32(skb->len);
+
        entry->flags2 = info->control.rates[1].idx >= 0 ?
                ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0;
        entry->retry_limit = info->control.rates[0].count;
+
+       /* We must be sure that tx_flags is written last because the HW
+        * looks at it to check if the rest of data is valid or not
+        */
+       wmb();
        entry->flags = cpu_to_le32(tx_flags);
+       /* We must be sure this has been written before followings HW
+        * register write, because this write will made the HW attempts
+        * to DMA the just-written data
+        */
+       wmb();
+
        __skb_queue_tail(&ring->queue, skb);
        if (ring->entries - skb_queue_len(&ring->queue) < 2)
                ieee80211_stop_queue(dev, prio);
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               /* just poll: rings are stopped with TPPollStop reg */
+               hw_prio = rtl8187se_queues_map[prio];
+               rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+                        (1 << hw_prio));
+       } else {
+               hw_prio = rtl8180_queues_map[prio];
+               rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING,
+                        (1 << hw_prio) | /* ring to poll  */
+                        (1<<1) | (1<<2));/* stopped rings */
+       }
+}
+
+static void rtl8180_set_anaparam3(struct rtl8180_priv *priv, u16 anaparam3)
+{
+       u8 reg;
+
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                        RTL818X_EEPROM_CMD_CONFIG);
+
+       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+       rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+                reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+       rtl818x_iowrite16(priv, &priv->map->ANAPARAM3, anaparam3);
+
+       rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+                reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                        RTL818X_EEPROM_CMD_NORMAL);
+}
+
+void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2)
+{
+       u8 reg;
+
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                        RTL818X_EEPROM_CMD_CONFIG);
+
+       reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+       rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+                reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+       rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
+
+       rtl818x_iowrite8(priv, &priv->map->CONFIG3,
+                reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
+
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                        RTL818X_EEPROM_CMD_NORMAL);
 }
 
 void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
@@ -359,17 +609,171 @@ void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam)
        rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
 }
 
+static void rtl8187se_mac_config(struct ieee80211_hw *dev)
+{
+       struct rtl8180_priv *priv = dev->priv;
+       u8 reg;
+
+       rtl818x_iowrite32(priv, REG_ADDR4(0x1F0), 0);
+       rtl818x_ioread32(priv, REG_ADDR4(0x1F0));
+       rtl818x_iowrite32(priv, REG_ADDR4(0x1F4), 0);
+       rtl818x_ioread32(priv, REG_ADDR4(0x1F4));
+       rtl818x_iowrite8(priv, REG_ADDR1(0x1F8), 0);
+       rtl818x_ioread8(priv, REG_ADDR1(0x1F8));
+       /* Enable DA10 TX power saving */
+       reg = rtl818x_ioread8(priv, &priv->map->PHY_PR);
+       rtl818x_iowrite8(priv, &priv->map->PHY_PR, reg | 0x04);
+       /* Power */
+       rtl818x_iowrite16(priv, PI_DATA_REG, 0x1000);
+       rtl818x_iowrite16(priv, SI_DATA_REG, 0x1000);
+       /* AFE - default to power ON */
+       rtl818x_iowrite16(priv, REG_ADDR2(0x370), 0x0560);
+       rtl818x_iowrite16(priv, REG_ADDR2(0x372), 0x0560);
+       rtl818x_iowrite16(priv, REG_ADDR2(0x374), 0x0DA4);
+       rtl818x_iowrite16(priv, REG_ADDR2(0x376), 0x0DA4);
+       rtl818x_iowrite16(priv, REG_ADDR2(0x378), 0x0560);
+       rtl818x_iowrite16(priv, REG_ADDR2(0x37A), 0x0560);
+       rtl818x_iowrite16(priv, REG_ADDR2(0x37C), 0x00EC);
+       rtl818x_iowrite16(priv, REG_ADDR2(0x37E), 0x00EC);
+       rtl818x_iowrite8(priv, REG_ADDR1(0x24E), 0x01);
+       /* unknown, needed for suspend to RAM resume */
+       rtl818x_iowrite8(priv, REG_ADDR1(0x0A), 0x72);
+}
+
+static void rtl8187se_set_antenna_config(struct ieee80211_hw *dev, u8 def_ant,
+                                        bool diversity)
+{
+       struct rtl8180_priv *priv = dev->priv;
+
+       rtl8225_write_phy_cck(dev, 0x0C, 0x09);
+       if (diversity) {
+               if (def_ant == 1) {
+                       rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
+                       rtl8225_write_phy_cck(dev, 0x11, 0xBB);
+                       rtl8225_write_phy_cck(dev, 0x01, 0xC7);
+                       rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
+                       rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
+               } else { /* main antenna */
+                       rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+                       rtl8225_write_phy_cck(dev, 0x11, 0x9B);
+                       rtl8225_write_phy_cck(dev, 0x01, 0xC7);
+                       rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
+                       rtl8225_write_phy_ofdm(dev, 0x18, 0xB2);
+               }
+       } else { /* disable antenna diversity */
+               if (def_ant == 1) {
+                       rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00);
+                       rtl8225_write_phy_cck(dev, 0x11, 0xBB);
+                       rtl8225_write_phy_cck(dev, 0x01, 0x47);
+                       rtl8225_write_phy_ofdm(dev, 0x0D, 0x54);
+                       rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
+               } else { /* main antenna */
+                       rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+                       rtl8225_write_phy_cck(dev, 0x11, 0x9B);
+                       rtl8225_write_phy_cck(dev, 0x01, 0x47);
+                       rtl8225_write_phy_ofdm(dev, 0x0D, 0x5C);
+                       rtl8225_write_phy_ofdm(dev, 0x18, 0x32);
+               }
+       }
+       /* priv->curr_ant = def_ant; */
+}
+
+static void rtl8180_int_enable(struct ieee80211_hw *dev)
+{
+       struct rtl8180_priv *priv = dev->priv;
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               rtl818x_iowrite32(priv, &priv->map->IMR, IMR_TMGDOK |
+                         IMR_TBDER | IMR_THPDER |
+                         IMR_THPDER | IMR_THPDOK |
+                         IMR_TVODER | IMR_TVODOK |
+                         IMR_TVIDER | IMR_TVIDOK |
+                         IMR_TBEDER | IMR_TBEDOK |
+                         IMR_TBKDER | IMR_TBKDOK |
+                         IMR_RDU | IMR_RER |
+                         IMR_ROK | IMR_RQOSOK);
+       } else {
+               rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+       }
+}
+
+static void rtl8180_int_disable(struct ieee80211_hw *dev)
+{
+       struct rtl8180_priv *priv = dev->priv;
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               rtl818x_iowrite32(priv, &priv->map->IMR, 0);
+       } else {
+               rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+       }
+}
+
+static void rtl8180_conf_basic_rates(struct ieee80211_hw *dev,
+                           u32 rates_mask)
+{
+       struct rtl8180_priv *priv = dev->priv;
+
+       u8 max, min;
+       u16 reg;
+
+       max = fls(rates_mask) - 1;
+       min = ffs(rates_mask) - 1;
+
+       switch (priv->chip_family) {
+
+       case RTL818X_CHIP_FAMILY_RTL8180:
+               /* in 8180 this is NOT a BITMAP */
+               reg = rtl818x_ioread16(priv, &priv->map->BRSR);
+               reg &= ~3;
+               reg |= max;
+               rtl818x_iowrite16(priv, &priv->map->BRSR, reg);
+               break;
+
+       case RTL818X_CHIP_FAMILY_RTL8185:
+               /* in 8185 this is a BITMAP */
+               rtl818x_iowrite16(priv, &priv->map->BRSR, rates_mask);
+               rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (max << 4) | min);
+               break;
+
+       case RTL818X_CHIP_FAMILY_RTL8187SE:
+               /* in 8187se this is a BITMAP */
+               rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, rates_mask);
+               break;
+       }
+}
+
+static void rtl8180_config_cardbus(struct ieee80211_hw *dev)
+{
+       struct rtl8180_priv *priv = dev->priv;
+       u16 reg16;
+       u8 reg8;
+
+       reg8 = rtl818x_ioread8(priv, &priv->map->CONFIG3);
+       reg8 |= 1 << 1;
+       rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg8);
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               rtl818x_iowrite16(priv, FEMR_SE, 0xffff);
+       } else {
+               reg16 = rtl818x_ioread16(priv, &priv->map->FEMR);
+                       reg16 |= (1 << 15) | (1 << 14) | (1 << 4);
+               rtl818x_iowrite16(priv, &priv->map->FEMR, reg16);
+       }
+
+}
+
 static int rtl8180_init_hw(struct ieee80211_hw *dev)
 {
        struct rtl8180_priv *priv = dev->priv;
        u16 reg;
+       u32 reg32;
 
        rtl818x_iowrite8(priv, &priv->map->CMD, 0);
        rtl818x_ioread8(priv, &priv->map->CMD);
        msleep(10);
 
        /* reset */
-       rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+       rtl8180_int_disable(dev);
        rtl818x_ioread8(priv, &priv->map->CMD);
 
        reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -390,31 +794,45 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
        msleep(200);
 
        if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) {
-               /* For cardbus */
-               reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
-               reg |= 1 << 1;
-               rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
-               reg = rtl818x_ioread16(priv, &priv->map->FEMR);
-               reg |= (1 << 15) | (1 << 14) | (1 << 4);
-               rtl818x_iowrite16(priv, &priv->map->FEMR, reg);
+               rtl8180_config_cardbus(dev);
        }
 
-       rtl818x_iowrite8(priv, &priv->map->MSR, 0);
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+               rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
+       else
+               rtl818x_iowrite8(priv, &priv->map->MSR, 0);
 
-       if (!priv->r8185)
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
                rtl8180_set_anaparam(priv, priv->anaparam);
 
        rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
-       rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma);
-       rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma);
-       rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
-       rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
+       /* mac80211 queue have higher prio for lower index. The last queue
+        * (that mac80211 is not aware of) is reserved for beacons (and have
+        * the highest priority on the NIC)
+        */
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
+               rtl818x_iowrite32(priv, &priv->map->TBDA,
+                                 priv->tx_ring[1].dma);
+               rtl818x_iowrite32(priv, &priv->map->TLPDA,
+                                 priv->tx_ring[0].dma);
+       } else {
+               rtl818x_iowrite32(priv, &priv->map->TBDA,
+                                 priv->tx_ring[4].dma);
+               rtl818x_iowrite32(priv, &priv->map->TVODA,
+                                 priv->tx_ring[0].dma);
+               rtl818x_iowrite32(priv, &priv->map->TVIDA,
+                                 priv->tx_ring[1].dma);
+               rtl818x_iowrite32(priv, &priv->map->TBEDA,
+                                 priv->tx_ring[2].dma);
+               rtl818x_iowrite32(priv, &priv->map->TBKDA,
+                                 priv->tx_ring[3].dma);
+       }
 
        /* TODO: necessary? specs indicate not */
        rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
        reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
        rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3));
-       if (priv->r8185) {
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
                reg = rtl818x_ioread8(priv, &priv->map->CONFIG2);
                rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4));
        }
@@ -426,13 +844,17 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
 
        rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
 
-       if (priv->r8185) {
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
                rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
                rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81);
-               rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0);
+       } else {
+               rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
 
-               rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+               rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
+               rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
+       }
 
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
                /* TODO: set ClkRun enable? necessary? */
                reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE);
                rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6));
@@ -440,28 +862,90 @@ static int rtl8180_init_hw(struct ieee80211_hw *dev)
                reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
                rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2));
                rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
-       } else {
-               rtl818x_iowrite16(priv, &priv->map->BRSR, 0x1);
-               rtl818x_iowrite8(priv, &priv->map->SECURITY, 0);
+       }
 
-               rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6);
-               rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C);
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+
+               /* the set auto rate fallback bitmask from 1M to 54 Mb/s */
+               rtl818x_iowrite16(priv, ARFR, 0xFFF);
+               rtl818x_ioread16(priv, ARFR);
+
+               /* stop unused queus (no dma alloc) */
+               rtl818x_iowrite8(priv, &priv->map->TPPOLL_STOP,
+                              RTL818x_TPPOLL_STOP_MG | RTL818x_TPPOLL_STOP_HI);
+
+               rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0x00);
+               rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50);
+
+               rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0);
+
+               /* some black magic here.. */
+               rtl8187se_mac_config(dev);
+
+               rtl818x_iowrite16(priv, RFSW_CTRL, 0x569A);
+               rtl818x_ioread16(priv, RFSW_CTRL);
+
+               rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_ON);
+               rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_ON);
+               rtl8180_set_anaparam3(priv, RTL8225SE_ANAPARAM3);
+
+
+               rtl818x_iowrite8(priv, &priv->map->CONFIG5,
+                           rtl818x_ioread8(priv, &priv->map->CONFIG5) & 0x7F);
+
+               /*probably this switch led on */
+               rtl818x_iowrite8(priv, &priv->map->PGSELECT,
+                           rtl818x_ioread8(priv, &priv->map->PGSELECT) | 0x08);
+
+               rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
+               rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1BFF);
+               rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
+
+               rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x4003);
+
+               /* the reference code mac hardcode table write
+                * this reg by doing byte-wide accesses.
+                * It does it just for lowest and highest byte..
+                */
+               reg32 = rtl818x_ioread32(priv, &priv->map->RF_PARA);
+               reg32 &= 0x00ffff00;
+               reg32 |= 0xb8000054;
+               rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32);
        }
 
        priv->rf->init(dev);
-       if (priv->r8185)
-               rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
+
+       /* default basic rates are 1,2 Mbps for rtl8180. 1,2,6,9,12,18,24 Mbps
+        * otherwise. bitmask 0x3 and 0x01f3 respectively.
+        * NOTE: currenty rtl8225 RF code changes basic rates, so we need to do
+        * this after rf init.
+        * TODO: try to find out whether RF code really needs to do this..
+        */
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+               rtl8180_conf_basic_rates(dev, 0x3);
+       else
+               rtl8180_conf_basic_rates(dev, 0x1f3);
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+               rtl8187se_set_antenna_config(dev,
+                                            priv->antenna_diversity_default,
+                                            priv->antenna_diversity_en);
        return 0;
 }
 
 static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
 {
        struct rtl8180_priv *priv = dev->priv;
-       struct rtl8180_rx_desc *entry;
+       struct rtl818x_rx_cmd_desc *entry;
        int i;
 
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+               priv->rx_ring_sz = sizeof(struct rtl8187se_rx_desc);
+       else
+               priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc);
+
        priv->rx_ring = pci_alloc_consistent(priv->pdev,
-                                            sizeof(*priv->rx_ring) * 32,
+                                            priv->rx_ring_sz * 32,
                                             &priv->rx_ring_dma);
 
        if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
@@ -469,20 +953,28 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
                return -ENOMEM;
        }
 
-       memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * 32);
+       memset(priv->rx_ring, 0, priv->rx_ring_sz * 32);
        priv->rx_idx = 0;
 
        for (i = 0; i < 32; i++) {
                struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE);
                dma_addr_t *mapping;
-               entry = &priv->rx_ring[i];
-               if (!skb)
-                       return 0;
-
+               entry = priv->rx_ring + priv->rx_ring_sz*i;
+               if (!skb) {
+                       wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
+                       return -ENOMEM;
+               }
                priv->rx_buf[i] = skb;
                mapping = (dma_addr_t *)skb->cb;
                *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
                                          MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+               if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+                       kfree_skb(skb);
+                       wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
+                       return -ENOMEM;
+               }
+
                entry->rx_buf = cpu_to_le32(*mapping);
                entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
                                           MAX_RX_SIZE);
@@ -507,7 +999,7 @@ static void rtl8180_free_rx_ring(struct ieee80211_hw *dev)
                kfree_skb(skb);
        }
 
-       pci_free_consistent(priv->pdev, sizeof(*priv->rx_ring) * 32,
+       pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
                            priv->rx_ring, priv->rx_ring_dma);
        priv->rx_ring = NULL;
 }
@@ -571,7 +1063,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
        if (ret)
                return ret;
 
-       for (i = 0; i < 4; i++)
+       for (i = 0; i < (dev->queues + 1); i++)
                if ((ret = rtl8180_init_tx_ring(dev, i, 16)))
                        goto err_free_rings;
 
@@ -579,23 +1071,28 @@ static int rtl8180_start(struct ieee80211_hw *dev)
        if (ret)
                goto err_free_rings;
 
-       rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma);
-       rtl818x_iowrite32(priv, &priv->map->TBDA, priv->tx_ring[3].dma);
-       rtl818x_iowrite32(priv, &priv->map->THPDA, priv->tx_ring[2].dma);
-       rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
-       rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
-
-       ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               ret = request_irq(priv->pdev->irq, rtl8187se_interrupt,
                          IRQF_SHARED, KBUILD_MODNAME, dev);
+       } else {
+               ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
+                         IRQF_SHARED, KBUILD_MODNAME, dev);
+       }
+
        if (ret) {
                wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
                goto err_free_rings;
        }
 
-       rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
+       rtl8180_int_enable(dev);
 
-       rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
-       rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
+       /* in rtl8187se at MAR regs offset there is the management
+        * TX descriptor DMA addres..
+        */
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8187SE) {
+               rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0);
+               rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0);
+       }
 
        reg = RTL818X_RX_CONF_ONLYERLPKT |
              RTL818X_RX_CONF_RX_AUTORESETPHY |
@@ -605,27 +1102,42 @@ static int rtl8180_start(struct ieee80211_hw *dev)
              RTL818X_RX_CONF_BROADCAST |
              RTL818X_RX_CONF_NICMAC;
 
-       if (priv->r8185)
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185)
                reg |= RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2;
-       else {
+       else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
                reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE1)
                        ? RTL818X_RX_CONF_CSDM1 : 0;
                reg |= (priv->rfparam & RF_PARAM_CARRIERSENSE2)
                        ? RTL818X_RX_CONF_CSDM2 : 0;
+       } else {
+               reg &= ~(RTL818X_RX_CONF_CSDM1 | RTL818X_RX_CONF_CSDM2);
        }
 
        priv->rx_conf = reg;
        rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
 
-       if (priv->r8185) {
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
                reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
-               reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
-               reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+
+               /* CW is not on per-packet basis.
+                * in rtl8185 the CW_VALUE reg is used.
+                * in rtl8187se the AC param regs are used.
+                */
+               reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+               /* retry limit IS on per-packet basis.
+                * the short and long retry limit in TX_CONF
+                * reg are ignored
+                */
+               reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
                rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
 
                reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
-               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
-               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+               /* TX antenna and TX gain are not on per-packet basis.
+                * TX Antenna is selected by ANTSEL reg (RX in BB regs).
+                * TX gain is selected with CCK_TX_AGC and OFDM_TX_AGC regs
+                */
+               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
                reg |=  RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
                rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
 
@@ -637,11 +1149,16 @@ static int rtl8180_start(struct ieee80211_hw *dev)
        reg |= (6 << 21 /* MAX TX DMA */) |
               RTL818X_TX_CONF_NO_ICV;
 
-       if (priv->r8185)
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+               reg |= 1<<30;  /*  "duration procedure mode" */
+
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180)
                reg &= ~RTL818X_TX_CONF_PROBE_DTS;
        else
                reg &= ~RTL818X_TX_CONF_HW_SEQNUM;
 
+       reg &= ~RTL818X_TX_CONF_DISCW;
+
        /* different meaning, same value on both rtl8185 and rtl8180 */
        reg &= ~RTL818X_TX_CONF_SAT_HWPLCP;
 
@@ -656,7 +1173,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
 
  err_free_rings:
        rtl8180_free_rx_ring(dev);
-       for (i = 0; i < 4; i++)
+       for (i = 0; i < (dev->queues + 1); i++)
                if (priv->tx_ring[i].desc)
                        rtl8180_free_tx_ring(dev, i);
 
@@ -669,7 +1186,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
        u8 reg;
        int i;
 
-       rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
+       rtl8180_int_disable(dev);
 
        reg = rtl818x_ioread8(priv, &priv->map->CMD);
        reg &= ~RTL818X_CMD_TX_ENABLE;
@@ -686,7 +1203,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
        free_irq(priv->pdev->irq, dev);
 
        rtl8180_free_rx_ring(dev);
-       for (i = 0; i < 4; i++)
+       for (i = 0; i < (dev->queues + 1); i++)
                rtl8180_free_tx_ring(dev, i);
 }
 
@@ -794,6 +1311,123 @@ static int rtl8180_config(struct ieee80211_hw *dev, u32 changed)
        return 0;
 }
 
+static void rtl8187se_conf_ac_parm(struct ieee80211_hw *dev, u8 queue)
+{
+       const struct ieee80211_tx_queue_params *params;
+       struct rtl8180_priv *priv = dev->priv;
+
+       /* hw value */
+       u32 ac_param;
+
+       u8 aifs;
+       u8 txop;
+       u8 cw_min, cw_max;
+
+       params = &priv->queue_param[queue];
+
+       cw_min = fls(params->cw_min);
+       cw_max = fls(params->cw_max);
+
+       aifs = 10 + params->aifs * priv->slot_time;
+
+       /* TODO: check if txop HW is in us (mult by 32) */
+       txop = params->txop;
+
+       ac_param = txop << AC_PARAM_TXOP_LIMIT_SHIFT |
+               cw_max << AC_PARAM_ECW_MAX_SHIFT |
+               cw_min << AC_PARAM_ECW_MIN_SHIFT |
+               aifs << AC_PARAM_AIFS_SHIFT;
+
+       switch (queue) {
+       case IEEE80211_AC_BK:
+               rtl818x_iowrite32(priv, &priv->map->AC_BK_PARAM, ac_param);
+               break;
+       case IEEE80211_AC_BE:
+               rtl818x_iowrite32(priv, &priv->map->AC_BE_PARAM, ac_param);
+               break;
+       case IEEE80211_AC_VI:
+               rtl818x_iowrite32(priv, &priv->map->AC_VI_PARAM, ac_param);
+               break;
+       case IEEE80211_AC_VO:
+               rtl818x_iowrite32(priv, &priv->map->AC_VO_PARAM, ac_param);
+               break;
+       }
+}
+
+static int rtl8180_conf_tx(struct ieee80211_hw *dev,
+                           struct ieee80211_vif *vif, u16 queue,
+                           const struct ieee80211_tx_queue_params *params)
+{
+       struct rtl8180_priv *priv = dev->priv;
+       u8 cw_min, cw_max;
+
+       /* nothing to do ? */
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+               return 0;
+
+       cw_min = fls(params->cw_min);
+       cw_max = fls(params->cw_max);
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               priv->queue_param[queue] = *params;
+               rtl8187se_conf_ac_parm(dev, queue);
+       } else
+               rtl818x_iowrite8(priv, &priv->map->CW_VAL,
+                                (cw_max << 4) | cw_min);
+       return 0;
+}
+
+static void rtl8180_conf_erp(struct ieee80211_hw *dev,
+                           struct ieee80211_bss_conf *info)
+{
+       struct rtl8180_priv *priv = dev->priv;
+       u8 sifs, difs;
+       int eifs;
+       u8 hw_eifs;
+
+       /* TODO: should we do something ? */
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180)
+               return;
+
+       /* I _hope_ this means 10uS for the HW.
+        * In reference code it is 0x22 for
+        * both rtl8187L and rtl8187SE
+        */
+       sifs = 0x22;
+
+       if (info->use_short_slot)
+               priv->slot_time = 9;
+       else
+               priv->slot_time = 20;
+
+       /* 10 is SIFS time in uS */
+       difs = 10 + 2 * priv->slot_time;
+       eifs = 10 + difs + priv->ack_time;
+
+       /* HW should use 4uS units for EIFS (I'm sure for rtl8185)*/
+       hw_eifs = DIV_ROUND_UP(eifs, 4);
+
+
+       rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time);
+       rtl818x_iowrite8(priv, &priv->map->SIFS, sifs);
+       rtl818x_iowrite8(priv, &priv->map->DIFS, difs);
+
+       /* from reference code. set ack timeout reg = eifs reg */
+       rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, hw_eifs);
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+               rtl818x_iowrite8(priv, &priv->map->EIFS_8187SE, hw_eifs);
+       else if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8185) {
+               /* rtl8187/rtl8185 HW bug. After EIFS is elapsed,
+                * the HW still wait for DIFS.
+                * HW uses 4uS units for EIFS.
+                */
+               hw_eifs = DIV_ROUND_UP(eifs - difs, 4);
+
+               rtl818x_iowrite8(priv, &priv->map->EIFS, hw_eifs);
+       }
+}
+
 static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
                                     struct ieee80211_vif *vif,
                                     struct ieee80211_bss_conf *info,
@@ -818,11 +1452,40 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
                                reg = RTL818X_MSR_INFRA;
                } else
                        reg = RTL818X_MSR_NO_LINK;
+
+               if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+                       reg |= RTL818X_MSR_ENEDCA;
+
                rtl818x_iowrite8(priv, &priv->map->MSR, reg);
        }
 
-       if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
-               priv->rf->conf_erp(dev, info);
+       if (changed & BSS_CHANGED_BASIC_RATES)
+               rtl8180_conf_basic_rates(dev, info->basic_rates);
+
+       if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE)) {
+
+               /* when preamble changes, acktime duration changes, and erp must
+                * be recalculated. ACK time is calculated at lowest rate.
+                * Since mac80211 include SIFS time we remove it (-10)
+                */
+               priv->ack_time =
+                       le16_to_cpu(ieee80211_generic_frame_duration(dev,
+                                       priv->vif,
+                                       IEEE80211_BAND_2GHZ, 10,
+                                       &priv->rates[0])) - 10;
+
+               rtl8180_conf_erp(dev, info);
+
+               /* mac80211 supplies aifs_n to driver and calls
+                * conf_tx callback whether aifs_n changes, NOT
+                * when aifs changes.
+                * Aifs should be recalculated if slot changes.
+                */
+               if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+                       for (i = 0; i < 4; i++)
+                               rtl8187se_conf_ac_parm(dev, i);
+               }
+       }
 
        if (changed & BSS_CHANGED_BEACON_ENABLED)
                vif_priv->enable_beacon = info->enable_beacon;
@@ -880,6 +1543,7 @@ static const struct ieee80211_ops rtl8180_ops = {
        .remove_interface       = rtl8180_remove_interface,
        .config                 = rtl8180_config,
        .bss_info_changed       = rtl8180_bss_info_changed,
+       .conf_tx                = rtl8180_conf_tx,
        .prepare_multicast      = rtl8180_prepare_multicast,
        .configure_filter       = rtl8180_configure_filter,
        .get_tsf                = rtl8180_get_tsf,
@@ -887,8 +1551,7 @@ static const struct ieee80211_ops rtl8180_ops = {
 
 static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
 {
-       struct ieee80211_hw *dev = eeprom->data;
-       struct rtl8180_priv *priv = dev->priv;
+       struct rtl8180_priv *priv = eeprom->data;
        u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
 
        eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE;
@@ -899,8 +1562,7 @@ static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
 
 static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
 {
-       struct ieee80211_hw *dev = eeprom->data;
-       struct rtl8180_priv *priv = dev->priv;
+       struct rtl8180_priv *priv = eeprom->data;
        u8 reg = 2 << 6;
 
        if (eeprom->reg_data_in)
@@ -917,6 +1579,83 @@ static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
        udelay(10);
 }
 
+static void rtl8180_eeprom_read(struct rtl8180_priv *priv)
+{
+       struct eeprom_93cx6 eeprom;
+       int eeprom_cck_table_adr;
+       u16 eeprom_val;
+       int i;
+
+       eeprom.data = priv;
+       eeprom.register_read = rtl8180_eeprom_register_read;
+       eeprom.register_write = rtl8180_eeprom_register_write;
+       if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
+               eeprom.width = PCI_EEPROM_WIDTH_93C66;
+       else
+               eeprom.width = PCI_EEPROM_WIDTH_93C46;
+
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                       RTL818X_EEPROM_CMD_PROGRAM);
+       rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
+       udelay(10);
+
+       eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
+       eeprom_val &= 0xFF;
+       priv->rf_type = eeprom_val;
+
+       eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
+       priv->csthreshold = eeprom_val >> 8;
+
+       eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)priv->mac_addr, 3);
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+               eeprom_cck_table_adr = 0x30;
+       else
+               eeprom_cck_table_adr = 0x10;
+
+       /* CCK TX power */
+       for (i = 0; i < 14; i += 2) {
+               u16 txpwr;
+               eeprom_93cx6_read(&eeprom, eeprom_cck_table_adr + (i >> 1),
+                               &txpwr);
+               priv->channels[i].hw_value = txpwr & 0xFF;
+               priv->channels[i + 1].hw_value = txpwr >> 8;
+       }
+
+       /* OFDM TX power */
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
+               for (i = 0; i < 14; i += 2) {
+                       u16 txpwr;
+                       eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
+                       priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
+                       priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
+               }
+       }
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8180) {
+               __le32 anaparam;
+               eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
+               priv->anaparam = le32_to_cpu(anaparam);
+               eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
+       }
+
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE) {
+               eeprom_93cx6_read(&eeprom, 0x3F, &eeprom_val);
+               priv->antenna_diversity_en = !!(eeprom_val & 0x100);
+               priv->antenna_diversity_default = (eeprom_val & 0xC00) == 0x400;
+
+               eeprom_93cx6_read(&eeprom, 0x7C, &eeprom_val);
+               priv->xtal_out = eeprom_val & 0xF;
+               priv->xtal_in = (eeprom_val & 0xF0) >> 4;
+               priv->xtal_cal = !!(eeprom_val & 0x1000);
+               priv->thermal_meter_val = (eeprom_val & 0xF00) >> 8;
+               priv->thermal_meter_en = !!(eeprom_val & 0x2000);
+       }
+
+       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
+                       RTL818X_EEPROM_CMD_NORMAL);
+}
+
 static int rtl8180_probe(struct pci_dev *pdev,
                                   const struct pci_device_id *id)
 {
@@ -924,12 +1663,9 @@ static int rtl8180_probe(struct pci_dev *pdev,
        struct rtl8180_priv *priv;
        unsigned long mem_addr, mem_len;
        unsigned int io_addr, io_len;
-       int err, i;
-       struct eeprom_93cx6 eeprom;
+       int err;
        const char *chip_name, *rf_name = NULL;
        u32 reg;
-       u16 eeprom_val;
-       u8 mac_addr[ETH_ALEN];
 
        err = pci_enable_device(pdev);
        if (err) {
@@ -1011,7 +1747,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
        dev->vif_data_size = sizeof(struct rtl8180_vif);
        dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                                        BIT(NL80211_IFTYPE_ADHOC);
-       dev->queues = 1;
        dev->max_signal = 65;
 
        reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
@@ -1019,43 +1754,55 @@ static int rtl8180_probe(struct pci_dev *pdev,
        switch (reg) {
        case RTL818X_TX_CONF_R8180_ABCD:
                chip_name = "RTL8180";
+               priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
                break;
+
        case RTL818X_TX_CONF_R8180_F:
                chip_name = "RTL8180vF";
+               priv->chip_family = RTL818X_CHIP_FAMILY_RTL8180;
                break;
+
        case RTL818X_TX_CONF_R8185_ABC:
                chip_name = "RTL8185";
+               priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
                break;
+
        case RTL818X_TX_CONF_R8185_D:
                chip_name = "RTL8185vD";
+               priv->chip_family = RTL818X_CHIP_FAMILY_RTL8185;
+               break;
+
+       case RTL818X_TX_CONF_RTL8187SE:
+               chip_name = "RTL8187SE";
+               priv->chip_family = RTL818X_CHIP_FAMILY_RTL8187SE;
                break;
+
        default:
                printk(KERN_ERR "%s (rtl8180): Unknown chip! (0x%x)\n",
                       pci_name(pdev), reg >> 25);
                goto err_iounmap;
        }
 
-       priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC;
-       if (priv->r8185) {
+       /* we declare to MAC80211 all the queues except for beacon queue
+        * that will be eventually handled by DRV.
+        * TX rings are arranged in such a way that lower is the IDX,
+        * higher is the priority, in order to achieve direct mapping
+        * with mac80211, however the beacon queue is an exception and it
+        * is mapped on the highst tx ring IDX.
+        */
+       if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+               dev->queues = RTL8187SE_NR_TX_QUEUES - 1;
+       else
+               dev->queues = RTL8180_NR_TX_QUEUES - 1;
+
+       if (priv->chip_family != RTL818X_CHIP_FAMILY_RTL8180) {
                priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
                pci_try_set_mwi(pdev);
        }
 
-       eeprom.data = dev;
-       eeprom.register_read = rtl8180_eeprom_register_read;
-       eeprom.register_write = rtl8180_eeprom_register_write;
-       if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
-               eeprom.width = PCI_EEPROM_WIDTH_93C66;
-       else
-               eeprom.width = PCI_EEPROM_WIDTH_93C46;
-
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_PROGRAM);
-       rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
-       udelay(10);
+       rtl8180_eeprom_read(priv);
 
-       eeprom_93cx6_read(&eeprom, 0x06, &eeprom_val);
-       eeprom_val &= 0xFF;
-       switch (eeprom_val) {
+       switch (priv->rf_type) {
        case 1: rf_name = "Intersil";
                break;
        case 2: rf_name = "RFMD";
@@ -1066,14 +1813,18 @@ static int rtl8180_probe(struct pci_dev *pdev,
                break;
        case 5: priv->rf = &grf5101_rf_ops;
                break;
-       case 9: priv->rf = rtl8180_detect_rf(dev);
+       case 9:
+               if (priv->chip_family == RTL818X_CHIP_FAMILY_RTL8187SE)
+                       priv->rf = rtl8187se_detect_rf(dev);
+               else
+                       priv->rf = rtl8180_detect_rf(dev);
                break;
        case 10:
                rf_name = "RTL8255";
                break;
        default:
                printk(KERN_ERR "%s (rtl8180): Unknown RF! (0x%x)\n",
-                      pci_name(pdev), eeprom_val);
+                      pci_name(pdev), priv->rf_type);
                goto err_iounmap;
        }
 
@@ -1083,42 +1834,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
                goto err_iounmap;
        }
 
-       eeprom_93cx6_read(&eeprom, 0x17, &eeprom_val);
-       priv->csthreshold = eeprom_val >> 8;
-       if (!priv->r8185) {
-               __le32 anaparam;
-               eeprom_93cx6_multiread(&eeprom, 0xD, (__le16 *)&anaparam, 2);
-               priv->anaparam = le32_to_cpu(anaparam);
-               eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
-       }
-
-       eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
-       if (!is_valid_ether_addr(mac_addr)) {
+       if (!is_valid_ether_addr(priv->mac_addr)) {
                printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
                       " randomly generated MAC addr\n", pci_name(pdev));
-               eth_random_addr(mac_addr);
-       }
-       SET_IEEE80211_PERM_ADDR(dev, mac_addr);
-
-       /* CCK TX power */
-       for (i = 0; i < 14; i += 2) {
-               u16 txpwr;
-               eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr);
-               priv->channels[i].hw_value = txpwr & 0xFF;
-               priv->channels[i + 1].hw_value = txpwr >> 8;
-       }
-
-       /* OFDM TX power */
-       if (priv->r8185) {
-               for (i = 0; i < 14; i += 2) {
-                       u16 txpwr;
-                       eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
-                       priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
-                       priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
-               }
+               eth_random_addr(priv->mac_addr);
        }
-
-       rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
+       SET_IEEE80211_PERM_ADDR(dev, priv->mac_addr);
 
        spin_lock_init(&priv->lock);
 
@@ -1130,12 +1851,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
        }
 
        wiphy_info(dev->wiphy, "hwaddr %pm, %s + %s\n",
-                  mac_addr, chip_name, priv->rf->name);
+                  priv->mac_addr, chip_name, priv->rf->name);
 
        return 0;
 
  err_iounmap:
-       iounmap(priv->map);
+       pci_iounmap(pdev, priv->map);
 
  err_free_dev:
        ieee80211_free_hw(dev);
index 30523314da43882a09e2d5bb4511ca0e4c57af6f..291a55970d1ab0ebdaabcc6463a4a458afab307d 100644 (file)
 #define ANAPARAM_PWR1_SHIFT    20
 #define ANAPARAM_PWR1_MASK     (0x7F << ANAPARAM_PWR1_SHIFT)
 
+/* rtl8180/rtl8185 have 3 queue + beacon queue.
+ * mac80211 can use just one, + beacon = 2 tot.
+ */
+#define RTL8180_NR_TX_QUEUES 2
+
+/* rtl8187SE have 6 queues + beacon queues
+ * mac80211 can use 4 QoS data queue, + beacon = 5 tot
+ */
+#define RTL8187SE_NR_TX_QUEUES 5
+
+/* for array static allocation, it is the max of above */
+#define RTL818X_NR_TX_QUEUES 5
+
 struct rtl8180_tx_desc {
        __le32 flags;
        __le16 rts_duration;
        __le16 plcp_len;
        __le32 tx_buf;
-       __le32 frame_len;
+       union{
+               __le32 frame_len;
+               struct {
+                       __le16 frame_len_se;
+                       __le16 frame_duration;
+               } __packed;
+       } __packed;
        __le32 next_tx_desc;
        u8 cw;
        u8 retry_limit;
        u8 agc;
        u8 flags2;
-       u32 reserved[2];
+       /* rsvd for 8180/8185.
+        * valid for 8187se but we dont use it
+        */
+       u32 reserved;
+       /* all rsvd for 8180/8185 */
+       __le16 flags3;
+       __le16 frag_qsize;
+} __packed;
+
+struct rtl818x_rx_cmd_desc {
+       __le32 flags;
+       u32 reserved;
+       __le32 rx_buf;
 } __packed;
 
 struct rtl8180_rx_desc {
        __le32 flags;
        __le32 flags2;
-       union {
-               __le32 rx_buf;
-               __le64 tsft;
-       };
+       __le64 tsft;
+
+} __packed;
+
+struct rtl8187se_rx_desc {
+       __le32 flags;
+       __le64 tsft;
+       __le32 flags2;
+       __le32 flags3;
+       u32 reserved[3];
 } __packed;
 
 struct rtl8180_tx_ring {
@@ -71,28 +108,45 @@ struct rtl8180_priv {
 
        /* rtl8180 driver specific */
        spinlock_t lock;
-       struct rtl8180_rx_desc *rx_ring;
+       void *rx_ring;
+       u8 rx_ring_sz;
        dma_addr_t rx_ring_dma;
        unsigned int rx_idx;
        struct sk_buff *rx_buf[32];
-       struct rtl8180_tx_ring tx_ring[4];
+       struct rtl8180_tx_ring tx_ring[RTL818X_NR_TX_QUEUES];
        struct ieee80211_channel channels[14];
        struct ieee80211_rate rates[12];
        struct ieee80211_supported_band band;
+       struct ieee80211_tx_queue_params queue_param[4];
        struct pci_dev *pdev;
        u32 rx_conf;
-
-       int r8185;
+       u8 slot_time;
+       u16 ack_time;
+
+       enum {
+               RTL818X_CHIP_FAMILY_RTL8180,
+               RTL818X_CHIP_FAMILY_RTL8185,
+               RTL818X_CHIP_FAMILY_RTL8187SE,
+       } chip_family;
        u32 anaparam;
        u16 rfparam;
        u8 csthreshold;
-
+       u8 mac_addr[ETH_ALEN];
+       u8 rf_type;
+       u8 xtal_out;
+       u8 xtal_in;
+       u8 xtal_cal;
+       u8 thermal_meter_val;
+       u8 thermal_meter_en;
+       u8 antenna_diversity_en;
+       u8 antenna_diversity_default;
        /* sequence # */
        u16 seqno;
 };
 
 void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
 void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam);
+void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2);
 
 static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr)
 {
index d60a5f399022447c81130e7edc9e60d45d59a3c0..9bda5bc78edafd72964b6f03772e62f10da01ca9 100644 (file)
@@ -282,6 +282,7 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
 
        msleep(1); /* FIXME: optional? */
 
+       /* TODO: use set_anaparam2 dev.c_func*/
        /* anaparam2 on */
        rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
        reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
@@ -730,32 +731,11 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
        msleep(10);
 }
 
-static void rtl8225_rf_conf_erp(struct ieee80211_hw *dev,
-                               struct ieee80211_bss_conf *info)
-{
-       struct rtl8180_priv *priv = dev->priv;
-
-       if (info->use_short_slot) {
-               rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
-               rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
-               rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
-               rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
-               rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73);
-       } else {
-               rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
-               rtl818x_iowrite8(priv, &priv->map->SIFS, 0x44);
-               rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
-               rtl818x_iowrite8(priv, &priv->map->EIFS, 81);
-               rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5);
-       }
-}
-
 static const struct rtl818x_rf_ops rtl8225_ops = {
        .name           = "rtl8225",
        .init           = rtl8225_rf_init,
        .stop           = rtl8225_rf_stop,
        .set_chan       = rtl8225_rf_set_channel,
-       .conf_erp       = rtl8225_rf_conf_erp,
 };
 
 static const struct rtl818x_rf_ops rtl8225z2_ops = {
@@ -763,7 +743,6 @@ static const struct rtl818x_rf_ops rtl8225z2_ops = {
        .init           = rtl8225z2_rf_init,
        .stop           = rtl8225_rf_stop,
        .set_chan       = rtl8225_rf_set_channel,
-       .conf_erp       = rtl8225_rf_conf_erp,
 };
 
 const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.c
new file mode 100644 (file)
index 0000000..fde8986
--- /dev/null
@@ -0,0 +1,475 @@
+
+/* Radio tuning for RTL8225 on RTL8187SE
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on the r8180 and Realtek r8187se drivers, which are:
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
+ *
+ * Also based on the rtl8187 driver, which is:
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/mac80211.h>
+
+#include "rtl8180.h"
+#include "rtl8225se.h"
+
+#define PFX "rtl8225 (se) "
+
+static const u32 RF_GAIN_TABLE[] = {
+       0x0096, 0x0076, 0x0056, 0x0036, 0x0016, 0x01f6, 0x01d6, 0x01b6,
+       0x0196, 0x0176, 0x00F7, 0x00D7, 0x00B7, 0x0097, 0x0077, 0x0057,
+       0x0037, 0x00FB, 0x00DB, 0x00BB, 0x00FF, 0x00E3, 0x00C3, 0x00A3,
+       0x0083, 0x0063, 0x0043, 0x0023, 0x0003, 0x01E3, 0x01C3, 0x01A3,
+       0x0183, 0x0163, 0x0143, 0x0123, 0x0103
+};
+
+static const u8 cck_ofdm_gain_settings[] = {
+       0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
+       0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+       0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
+       0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+       0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
+       0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+};
+
+static const u8 rtl8225se_tx_gain_cck_ofdm[] = {
+       0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
+};
+
+static const u8 rtl8225se_tx_power_cck[] = {
+       0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
+       0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
+       0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
+       0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
+       0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
+       0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
+};
+
+static const u8 rtl8225se_tx_power_cck_ch14[] = {
+       0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
+       0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
+       0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
+       0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
+       0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
+       0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225se_tx_power_ofdm[] = {
+       0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
+};
+
+static const u32 rtl8225se_chan[] = {
+       0x0080, 0x0100, 0x0180, 0x0200, 0x0280, 0x0300, 0x0380,
+       0x0400, 0x0480, 0x0500, 0x0580, 0x0600, 0x0680, 0x074A,
+};
+
+static const u8 rtl8225sez2_tx_power_cck_ch14[] = {
+       0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
+};
+
+static const u8 rtl8225sez2_tx_power_cck_B[] = {
+       0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x04
+};
+
+static const u8 rtl8225sez2_tx_power_cck_A[] = {
+       0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04
+};
+
+static const u8 rtl8225sez2_tx_power_cck[] = {
+       0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
+};
+
+static const u8 ZEBRA_AGC[] = {
+       0x7E, 0x7E, 0x7E, 0x7E, 0x7D, 0x7C, 0x7B, 0x7A,
+       0x79, 0x78, 0x77, 0x76, 0x75, 0x74, 0x73, 0x72,
+       0x71, 0x70, 0x6F, 0x6E, 0x6D, 0x6C, 0x6B, 0x6A,
+       0x69, 0x68, 0x67, 0x66, 0x65, 0x64, 0x63, 0x62,
+       0x48, 0x47, 0x46, 0x45, 0x44, 0x29, 0x28, 0x27,
+       0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x08, 0x07,
+       0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
+       0x0f, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x15, 0x16,
+       0x17, 0x17, 0x18, 0x18, 0x19, 0x1a, 0x1a, 0x1b,
+       0x1b, 0x1c, 0x1c, 0x1d, 0x1d, 0x1d, 0x1e, 0x1e,
+       0x1f, 0x1f, 0x1f, 0x20, 0x20, 0x20, 0x20, 0x21,
+       0x21, 0x21, 0x22, 0x22, 0x22, 0x23, 0x23, 0x24,
+       0x24, 0x25, 0x25, 0x25, 0x26, 0x26, 0x27, 0x27,
+       0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F, 0x2F
+};
+
+static const u8 OFDM_CONFIG[] = {
+       0x10, 0x0F, 0x0A, 0x0C, 0x14, 0xFA, 0xFF, 0x50,
+       0x00, 0x50, 0x00, 0x00, 0x00, 0x5C, 0x00, 0x00,
+       0x40, 0x00, 0x40, 0x00, 0x00, 0x00, 0xA8, 0x26,
+       0x32, 0x33, 0x06, 0xA5, 0x6F, 0x55, 0xC8, 0xBB,
+       0x0A, 0xE1, 0x2C, 0x4A, 0x86, 0x83, 0x34, 0x00,
+       0x4F, 0x24, 0x6F, 0xC2, 0x03, 0x40, 0x80, 0x00,
+       0xC0, 0xC1, 0x58, 0xF1, 0x00, 0xC4, 0x90, 0x3e,
+       0xD8, 0x3C, 0x7B, 0x10, 0x10
+};
+
+static void rtl8187se_three_wire_io(struct ieee80211_hw *dev, u8 *data,
+                                   u8 len, bool write)
+{
+       struct rtl8180_priv *priv = dev->priv;
+       int i;
+       u8 tmp;
+
+       do {
+               for (i = 0; i < 5; i++) {
+                       tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
+                       if (!(tmp & 0x3))
+                               break;
+                       udelay(10);
+               }
+               if (i == 5)
+                       wiphy_err(dev->wiphy, PFX
+                               "CmdReg: 0x%x RE/WE bits aren't clear\n", tmp);
+
+               tmp = rtl818x_ioread8(priv, &priv->map->rf_sw_config) | 0x02;
+               rtl818x_iowrite8(priv, &priv->map->rf_sw_config, tmp);
+
+               tmp = rtl818x_ioread8(priv, REG_ADDR1(0x84)) & 0xF7;
+               rtl818x_iowrite8(priv, REG_ADDR1(0x84), tmp);
+               if (write) {
+                       if (len == 16) {
+                               rtl818x_iowrite16(priv, SW_3W_DB0,
+                                 *(u16 *)data);
+                       } else if (len == 64) {
+                               rtl818x_iowrite32(priv, SW_3W_DB0_4,
+                                 *((u32 *)data));
+                               rtl818x_iowrite32(priv, SW_3W_DB1_4,
+                                 *((u32 *)(data + 4)));
+                       } else
+                               wiphy_err(dev->wiphy, PFX
+                                       "Unimplemented length\n");
+               } else {
+                       rtl818x_iowrite16(priv, SW_3W_DB0, *(u16 *)data);
+               }
+               if (write)
+                       tmp = 2;
+               else
+                       tmp = 1;
+               rtl818x_iowrite8(priv, SW_3W_CMD1, tmp);
+               for (i = 0; i < 5; i++) {
+                       tmp = rtl818x_ioread8(priv, SW_3W_CMD1);
+                       if (!(tmp & 0x3))
+                               break;
+                       udelay(10);
+               }
+               rtl818x_iowrite8(priv, SW_3W_CMD1, 0);
+               if (!write) {
+                       *((u16 *)data) = rtl818x_ioread16(priv, SI_DATA_REG);
+                       *((u16 *)data) &= 0x0FFF;
+               }
+       } while (0);
+}
+
+static u32 rtl8187se_rf_readreg(struct ieee80211_hw *dev, u8 addr)
+{
+       u32 dataread = addr & 0x0F;
+       rtl8187se_three_wire_io(dev, (u8 *)&dataread, 16, 0);
+       return dataread;
+}
+
+static void rtl8187se_rf_writereg(struct ieee80211_hw *dev, u8 addr, u32 data)
+{
+       u32 outdata = (data << 4) | (u32)(addr & 0x0F);
+       rtl8187se_three_wire_io(dev, (u8 *)&outdata, 16, 1);
+}
+
+
+static void rtl8225se_write_zebra_agc(struct ieee80211_hw *dev)
+{
+       int i;
+
+       for (i = 0; i < 128; i++) {
+               rtl8225se_write_phy_ofdm(dev, 0xF, ZEBRA_AGC[i]);
+               rtl8225se_write_phy_ofdm(dev, 0xE, i+0x80);
+               rtl8225se_write_phy_ofdm(dev, 0xE, 0);
+       }
+}
+
+static void rtl8187se_write_ofdm_config(struct ieee80211_hw *dev)
+{
+       /* write OFDM_CONFIG table */
+       int i;
+
+       for (i = 0; i < 60; i++)
+               rtl8225se_write_phy_ofdm(dev, i, OFDM_CONFIG[i]);
+
+}
+
+static void rtl8225sez2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
+{
+       struct rtl8180_priv *priv = dev->priv;
+       u8 cck_power, ofdm_power;
+
+       cck_power = priv->channels[channel - 1].hw_value & 0xFF;
+       if (cck_power > 35)
+               cck_power = 35;
+       rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
+                        cck_ofdm_gain_settings[cck_power]);
+
+       usleep_range(1000, 5000);
+       ofdm_power = priv->channels[channel - 1].hw_value >> 8;
+       if (ofdm_power > 35)
+               ofdm_power = 35;
+
+       rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
+                        cck_ofdm_gain_settings[ofdm_power]);
+       if (ofdm_power < 12) {
+               rtl8225se_write_phy_ofdm(dev, 7, 0x5C);
+               rtl8225se_write_phy_ofdm(dev, 9, 0x5C);
+       }
+       if (ofdm_power < 18) {
+               rtl8225se_write_phy_ofdm(dev, 7, 0x54);
+               rtl8225se_write_phy_ofdm(dev, 9, 0x54);
+       } else {
+               rtl8225se_write_phy_ofdm(dev, 7, 0x50);
+               rtl8225se_write_phy_ofdm(dev, 9, 0x50);
+       }
+
+       usleep_range(1000, 5000);
+}
+
+static void rtl8187se_write_rf_gain(struct ieee80211_hw *dev)
+{
+       int i;
+
+       for (i = 0; i <= 36; i++) {
+               rtl8187se_rf_writereg(dev, 0x01, i); mdelay(1);
+               rtl8187se_rf_writereg(dev, 0x02, RF_GAIN_TABLE[i]); mdelay(1);
+       }
+}
+
+static void rtl8187se_write_initial_gain(struct ieee80211_hw *dev,
+                                       int init_gain)
+{
+       switch (init_gain) {
+       default:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x26); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
+               break;
+       case 2:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFA); mdelay(1);
+               break;
+       case 3:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x36); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+               break;
+       case 4:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0x86); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+               break;
+       case 5:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x46); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFB); mdelay(1);
+               break;
+       case 6:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0x96); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+               break;
+       case 7:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x56); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0xA6); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+               break;
+       case 8:
+               rtl8225se_write_phy_ofdm(dev, 0x17, 0x66); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x24, 0xB6); mdelay(1);
+               rtl8225se_write_phy_ofdm(dev, 0x05, 0xFC); mdelay(1);
+               break;
+       }
+}
+
+void rtl8225se_rf_init(struct ieee80211_hw *dev)
+{
+       struct rtl8180_priv *priv = dev->priv;
+       u32 rf23, rf24;
+       u8 d_cut = 0;
+       u8 tmp;
+
+       /* Page 1 */
+       rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
+       rf23 = rtl8187se_rf_readreg(dev, 0x08); mdelay(1);
+       rf24 = rtl8187se_rf_readreg(dev, 0x09); mdelay(1);
+       if (rf23 == 0x0818 && rf24 == 0x070C)
+               d_cut = 1;
+
+       wiphy_info(dev->wiphy, "RTL8225-SE version %s\n",
+               d_cut ? "D" : "not-D");
+
+       /* Page 0: reg 0 - 15 */
+       rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x01, 0x06E0); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x03, 0x07F1); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x05, 0x0C72); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x06, 0x0AE6); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x07, 0x00CA); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x08, 0x0E1C); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x09, 0x02F0); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0A, 0x09D0); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0B, 0x01BA); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0C, 0x0640); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0E, 0x0020); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0F, 0x0990); mdelay(1);
+       /* page 1: reg 16-30 */
+       rtl8187se_rf_writereg(dev, 0x00, 0x013F); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x03, 0x0806); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x04, 0x03A7); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x05, 0x059B); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x06, 0x0081); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x07, 0x01A0); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0B, 0x0418); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0C, 0x0FBE); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(1);
+       if (d_cut)
+               rtl8187se_rf_writereg(dev, 0x0E, 0x0807);
+       else
+               rtl8187se_rf_writereg(dev, 0x0E, 0x0806);
+       mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x00, 0x01D7); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x03, 0x0E00); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x04, 0x0E50); mdelay(1);
+
+       rtl8187se_write_rf_gain(dev);
+
+       rtl8187se_rf_writereg(dev, 0x05, 0x0203); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x06, 0x0200); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
+       rtl8187se_rf_writereg(dev, 0x0D, 0x0008); mdelay(11);
+       rtl8187se_rf_writereg(dev, 0x00, 0x0037); mdelay(11);
+       rtl8187se_rf_writereg(dev, 0x04, 0x0160); mdelay(11);
+       rtl8187se_rf_writereg(dev, 0x07, 0x0080); mdelay(11);
+       rtl8187se_rf_writereg(dev, 0x02, 0x088D); mdelay(221);
+       rtl8187se_rf_writereg(dev, 0x00, 0x0137); mdelay(11);
+       rtl8187se_rf_writereg(dev, 0x07, 0x0000); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x07, 0x0180); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x07, 0x0220); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x07, 0x03E0); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x06, 0x00C1); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0A, 0x0001); mdelay(1);
+       if (priv->xtal_cal) {
+               tmp = (priv->xtal_in << 4) | (priv->xtal_out << 1) |
+                     (1 << 11) | (1 << 9);
+               rtl8187se_rf_writereg(dev, 0x0F, tmp);
+               wiphy_info(dev->wiphy, "Xtal cal\n");
+               mdelay(1);
+       } else {
+               wiphy_info(dev->wiphy, "NO Xtal cal\n");
+               rtl8187se_rf_writereg(dev, 0x0F, 0x0ACC);
+               mdelay(1);
+       }
+       /* page 0 */
+       rtl8187se_rf_writereg(dev, 0x00, 0x00BF); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x0D, 0x08DF); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x02, 0x004D); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x04, 0x0975); mdelay(31);
+       rtl8187se_rf_writereg(dev, 0x00, 0x0197); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x05, 0x05AB); mdelay(1);
+
+       rtl8187se_rf_writereg(dev, 0x00, 0x009F); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x01, 0x0000); mdelay(1);
+       rtl8187se_rf_writereg(dev, 0x02, 0x0000); mdelay(1);
+       /* power save parameters */
+       /* TODO: move to dev.c */
+       rtl818x_iowrite8(priv, REG_ADDR1(0x024E),
+                rtl818x_ioread8(priv, REG_ADDR1(0x24E)) & 0x9F);
+       rtl8225se_write_phy_cck(dev, 0x00, 0xC8);
+       rtl8225se_write_phy_cck(dev, 0x06, 0x1C);
+       rtl8225se_write_phy_cck(dev, 0x10, 0x78);
+       rtl8225se_write_phy_cck(dev, 0x2E, 0xD0);
+       rtl8225se_write_phy_cck(dev, 0x2F, 0x06);
+       rtl8225se_write_phy_cck(dev, 0x01, 0x46);
+
+       /* power control */
+       rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x10);
+       rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x1B);
+
+       rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03);
+       rtl8225se_write_phy_ofdm(dev, 0x00, 0x12);
+
+       rtl8225se_write_zebra_agc(dev);
+
+       rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
+
+       rtl8187se_write_ofdm_config(dev);
+
+       /* turn on RF */
+       rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
+       rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
+       /* turn on RF again */
+       rtl8187se_rf_writereg(dev, 0x00, 0x009F); udelay(500);
+       rtl8187se_rf_writereg(dev, 0x04, 0x0972); udelay(500);
+       /* turn on BB */
+       rtl8225se_write_phy_ofdm(dev, 0x10, 0x40);
+       rtl8225se_write_phy_ofdm(dev, 0x12, 0x40);
+
+       rtl8187se_write_initial_gain(dev, 4);
+}
+
+void rtl8225se_rf_stop(struct ieee80211_hw *dev)
+{
+       /* checked for 8187se */
+       struct rtl8180_priv *priv = dev->priv;
+
+       /* turn off BB RXIQ matrix to cut off rx signal */
+       rtl8225se_write_phy_ofdm(dev, 0x10, 0x00);
+       rtl8225se_write_phy_ofdm(dev, 0x12, 0x00);
+       /* turn off RF */
+       rtl8187se_rf_writereg(dev, 0x04, 0x0000);
+       rtl8187se_rf_writereg(dev, 0x00, 0x0000);
+
+       usleep_range(1000, 5000);
+       /* turn off A/D and D/A */
+       rtl8180_set_anaparam(priv, RTL8225SE_ANAPARAM_OFF);
+       rtl8180_set_anaparam2(priv, RTL8225SE_ANAPARAM2_OFF);
+}
+
+void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
+                                  struct ieee80211_conf *conf)
+{
+       int chan =
+               ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
+
+       rtl8225sez2_rf_set_tx_power(dev, chan);
+       rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
+       if ((rtl8187se_rf_readreg(dev, 0x7) & 0x0F80) !=
+               rtl8225se_chan[chan - 1])
+               rtl8187se_rf_writereg(dev, 0x7, rtl8225se_chan[chan - 1]);
+       usleep_range(10000, 20000);
+}
+
+static const struct rtl818x_rf_ops rtl8225se_ops = {
+       .name           = "rtl8225-se",
+       .init           = rtl8225se_rf_init,
+       .stop           = rtl8225se_rf_stop,
+       .set_chan       = rtl8225se_rf_set_channel,
+};
+
+const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *dev)
+{
+       return &rtl8225se_ops;
+}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8225se.h
new file mode 100644 (file)
index 0000000..2294002
--- /dev/null
@@ -0,0 +1,61 @@
+
+/* Definitions for RTL8187SE hardware
+ *
+ * Copyright 2009 Larry Finger <Larry.Finger@lwfinger.net>
+ * Copyright 2014 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * Based on the r8180 and Realtek r8187se drivers, which are:
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
+ *
+ * Also based on the rtl8187 driver, which is:
+ * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RTL8187SE_RTL8225_H
+#define RTL8187SE_RTL8225_H
+
+#define RTL8225SE_ANAPARAM_ON  0xb0054d00
+#define RTL8225SE_ANAPARAM2_ON 0x000004c6
+
+/* all off except PLL */
+#define RTL8225SE_ANAPARAM_OFF 0xb0054dec
+/* all on including PLL */
+#define RTL8225SE_ANAPARAM_OFF2        0xb0054dfc
+
+#define RTL8225SE_ANAPARAM2_OFF        0x00ff04c6
+
+#define RTL8225SE_ANAPARAM3    0x10
+
+enum rtl8187se_power_state {
+       RTL8187SE_POWER_ON,
+       RTL8187SE_POWER_OFF,
+       RTL8187SE_POWER_SLEEP
+};
+
+static inline void rtl8225se_write_phy_ofdm(struct ieee80211_hw *dev,
+                                         u8 addr, u8 data)
+{
+       rtl8180_write_phy(dev, addr, data);
+}
+
+static inline void rtl8225se_write_phy_cck(struct ieee80211_hw *dev,
+                                        u8 addr, u8 data)
+{
+       rtl8180_write_phy(dev, addr, data | 0x10000);
+}
+
+
+const struct rtl818x_rf_ops *rtl8187se_detect_rf(struct ieee80211_hw *);
+void rtl8225se_rf_stop(struct ieee80211_hw *dev);
+void rtl8225se_rf_set_channel(struct ieee80211_hw *dev,
+                                    struct ieee80211_conf *conf);
+void rtl8225se_rf_conf_erp(struct ieee80211_hw *dev,
+                                 struct ieee80211_bss_conf *info);
+void rtl8225se_rf_init(struct ieee80211_hw *dev);
+
+#endif /* RTL8187SE_RTL8225_H */
index fd78df813a8533911ffaeb7c08d53c0a440810f1..0ca17cda48fa1c01b3a8dd2ed98c5fe0c5646640 100644 (file)
@@ -592,7 +592,7 @@ static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
        rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
        rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
        if (priv->is_rtl8187b)
-               rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3);
+               rtl818x_iowrite8(priv, &priv->map->ANAPARAM3A, anaparam3);
        reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
        rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
        rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -785,7 +785,7 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
        rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
 
        reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
-       reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+       reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
        rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
 
        /* Auto Rate Fallback Register (ARFR): 1M-54M setting */
@@ -943,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
                rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
 
                reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
-               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
-               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+               reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
                reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
                rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
 
@@ -986,13 +986,13 @@ static int rtl8187_start(struct ieee80211_hw *dev)
        rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
 
        reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
-       reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
-       reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+       reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+       reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
        rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
 
        reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
-       reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
-       reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+       reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+       reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
        reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
        rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
 
@@ -1636,10 +1636,10 @@ static int rtl8187_probe(struct usb_interface *intf,
 
  err_free_dmabuf:
        kfree(priv->io_dmabuf);
- err_free_dev:
-       ieee80211_free_hw(dev);
        usb_set_intfdata(intf, NULL);
        usb_put_dev(udev);
+ err_free_dev:
+       ieee80211_free_hw(dev);
        return err;
 }
 
index ce23dfd4238148b4b1da2b61c8eaf39409ef710d..45ea4e1c4abe157ad952be2d8a5022efc1e1715a 100644 (file)
 #define RTL818X_H
 
 struct rtl818x_csr {
+
        u8      MAC[6];
        u8      reserved_0[2];
-       __le32  MAR[2];
-       u8      RX_FIFO_COUNT;
-       u8      reserved_1;
-       u8      TX_FIFO_COUNT;
-       u8      BQREQ;
-       u8      reserved_2[4];
+
+       union {
+               __le32  MAR[2];  /* 0x8 */
+
+               struct{ /* rtl8187se */
+                       u8 rf_sw_config; /* 0x8 */
+                       u8 reserved_01[3];
+                       __le32 TMGDA; /* 0xc */
+               } __packed;
+       } __packed;
+
+       union { /*  0x10  */
+               struct {
+                       u8      RX_FIFO_COUNT;
+                       u8      reserved_1;
+                       u8      TX_FIFO_COUNT;
+                       u8      BQREQ;
+               } __packed;
+
+               __le32 TBKDA; /* for 8187se */
+       } __packed;
+
+       __le32 TBEDA; /* 0x14 - for rtl8187se */
+
        __le32  TSFT[2];
-       __le32  TLPDA;
-       __le32  TNPDA;
-       __le32  THPDA;
-       __le16  BRSR;
-       u8      BSSID[6];
-       u8      RESP_RATE;
-       u8      EIFS;
-       u8      reserved_3[1];
-       u8      CMD;
+
+       union { /* 0x20 */
+               __le32  TLPDA;
+               __le32  TVIDA; /* for 8187se */
+       } __packed;
+
+       union { /* 0x24 */
+               __le32  TNPDA;
+               __le32  TVODA; /* for 8187se */
+       } __packed;
+
+       /* hi pri ring for all cards */
+       __le32  THPDA; /* 0x28 */
+
+       union { /* 0x2c */
+               struct {
+                       u8 reserved_2a;
+                       u8 EIFS_8187SE;
+               } __packed;
+
+               __le16  BRSR;
+       } __packed;
+
+       u8      BSSID[6]; /* 0x2e */
+
+       union { /* 0x34 */
+               struct {
+                       u8 RESP_RATE;
+                       u8 EIFS;
+               } __packed;
+               __le16 BRSR_8187SE;
+       } __packed;
+
+       u8      reserved_3[1]; /* 0x36 */
+       u8      CMD; /* 0x37 */
 #define RTL818X_CMD_TX_ENABLE          (1 << 2)
 #define RTL818X_CMD_RX_ENABLE          (1 << 3)
 #define RTL818X_CMD_RESET              (1 << 4)
-       u8      reserved_4[4];
-       __le16  INT_MASK;
-       __le16  INT_STATUS;
+       u8      reserved_4[4]; /* 0x38 */
+       union {
+               struct {
+                       __le16  INT_MASK;
+                       __le16  INT_STATUS;
+               } __packed;
+
+               __le32  INT_STATUS_SE; /* 0x3c */
+       } __packed;
+/* status bits for rtl8187 and rtl8180/8185 */
 #define RTL818X_INT_RX_OK              (1 <<  0)
 #define RTL818X_INT_RX_ERR             (1 <<  1)
 #define RTL818X_INT_TXL_OK             (1 <<  2)
@@ -56,7 +108,34 @@ struct rtl818x_csr {
 #define RTL818X_INT_BEACON             (1 << 13)
 #define RTL818X_INT_TIME_OUT           (1 << 14)
 #define RTL818X_INT_TX_FO              (1 << 15)
-       __le32  TX_CONF;
+/* status bits for rtl8187se */
+#define RTL818X_INT_SE_TIMER3          (1 <<  0)
+#define RTL818X_INT_SE_TIMER2          (1 <<  1)
+#define RTL818X_INT_SE_RQ0SOR          (1 <<  2)
+#define RTL818X_INT_SE_TXBED_OK                (1 <<  3)
+#define RTL818X_INT_SE_TXBED_ERR       (1 <<  4)
+#define RTL818X_INT_SE_TXBE_OK         (1 <<  5)
+#define RTL818X_INT_SE_TXBE_ERR                (1 <<  6)
+#define RTL818X_INT_SE_RX_OK           (1 <<  7)
+#define RTL818X_INT_SE_RX_ERR          (1 <<  8)
+#define RTL818X_INT_SE_TXL_OK          (1 <<  9)
+#define RTL818X_INT_SE_TXL_ERR         (1 << 10)
+#define RTL818X_INT_SE_RX_DU           (1 << 11)
+#define RTL818X_INT_SE_RX_FIFO         (1 << 12)
+#define RTL818X_INT_SE_TXN_OK          (1 << 13)
+#define RTL818X_INT_SE_TXN_ERR         (1 << 14)
+#define RTL818X_INT_SE_TXH_OK          (1 << 15)
+#define RTL818X_INT_SE_TXH_ERR         (1 << 16)
+#define RTL818X_INT_SE_TXB_OK          (1 << 17)
+#define RTL818X_INT_SE_TXB_ERR         (1 << 18)
+#define RTL818X_INT_SE_ATIM_TO         (1 << 19)
+#define RTL818X_INT_SE_BK_TO           (1 << 20)
+#define RTL818X_INT_SE_TIMER1          (1 << 21)
+#define RTL818X_INT_SE_TX_FIFO         (1 << 22)
+#define RTL818X_INT_SE_WAKEUP          (1 << 23)
+#define RTL818X_INT_SE_BK_DMA          (1 << 24)
+#define RTL818X_INT_SE_TMGD_OK         (1 << 30)
+       __le32  TX_CONF; /* 0x40 */
 #define RTL818X_TX_CONF_LOOPBACK_MAC   (1 << 17)
 #define RTL818X_TX_CONF_LOOPBACK_CONT  (3 << 17)
 #define RTL818X_TX_CONF_NO_ICV         (1 << 19)
@@ -68,6 +147,7 @@ struct rtl818x_csr {
 #define RTL818X_TX_CONF_R8185_D                (5 << 25)
 #define RTL818X_TX_CONF_R8187vD                (5 << 25)
 #define RTL818X_TX_CONF_R8187vD_B      (6 << 25)
+#define RTL818X_TX_CONF_RTL8187SE      (6 << 25)
 #define RTL818X_TX_CONF_HWVER_MASK     (7 << 25)
 #define RTL818X_TX_CONF_DISREQQSIZE    (1 << 28)
 #define RTL818X_TX_CONF_PROBE_DTS      (1 << 29)
@@ -122,31 +202,67 @@ struct rtl818x_csr {
        u8      PGSELECT;
        u8      SECURITY;
        __le32  ANAPARAM2;
-       u8      reserved_10[12];
-       __le16  BEACON_INTERVAL;
-       __le16  ATIM_WND;
-       __le16  BEACON_INTERVAL_TIME;
-       __le16  ATIMTR_INTERVAL;
-       u8      PHY_DELAY;
-       u8      CARRIER_SENSE_COUNTER;
-       u8      reserved_11[2];
-       u8      PHY[4];
-       __le16  RFPinsOutput;
-       __le16  RFPinsEnable;
-       __le16  RFPinsSelect;
-       __le16  RFPinsInput;
-       __le32  RF_PARA;
-       __le32  RF_TIMING;
-       u8      GP_ENABLE;
-       u8      GPIO0;
-       u8      GPIO1;
-       u8      reserved_12;
-       __le32  HSSI_PARA;
-       u8      reserved_13[4];
-       u8      TX_AGC_CTL;
-#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT                (1 << 0)
-#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT      (1 << 1)
-#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT                        (1 << 2)
+       u8      reserved_10[8];
+       __le32  IMR;            /* 0x6c - Interrupt mask reg for 8187se */
+#define IMR_TMGDOK      ((1 << 30))
+#define IMR_DOT11HINT  ((1 << 25))     /* 802.11h Measurement Interrupt */
+#define IMR_BCNDMAINT  ((1 << 24))     /* Beacon DMA Interrupt */
+#define IMR_WAKEINT    ((1 << 23))     /* Wake Up Interrupt */
+#define IMR_TXFOVW     ((1 << 22))     /* Tx FIFO Overflow */
+#define IMR_TIMEOUT1   ((1 << 21))     /* Time Out Interrupt 1 */
+#define IMR_BCNINT     ((1 << 20))     /* Beacon Time out */
+#define IMR_ATIMINT    ((1 << 19))     /* ATIM Time Out */
+#define IMR_TBDER      ((1 << 18))     /* Tx Beacon Descriptor Error */
+#define IMR_TBDOK      ((1 << 17))     /* Tx Beacon Descriptor OK */
+#define IMR_THPDER     ((1 << 16))     /* Tx High Priority Descriptor Error */
+#define IMR_THPDOK     ((1 << 15))     /* Tx High Priority Descriptor OK */
+#define IMR_TVODER     ((1 << 14))     /* Tx AC_VO Descriptor Error Int */
+#define IMR_TVODOK     ((1 << 13))     /* Tx AC_VO Descriptor OK Interrupt */
+#define IMR_FOVW       ((1 << 12))     /* Rx FIFO Overflow Interrupt */
+#define IMR_RDU                ((1 << 11))     /* Rx Descriptor Unavailable */
+#define IMR_TVIDER     ((1 << 10))     /* Tx AC_VI Descriptor Error */
+#define IMR_TVIDOK     ((1 << 9))      /* Tx AC_VI Descriptor OK Interrupt */
+#define IMR_RER                ((1 << 8))      /* Rx Error Interrupt */
+#define IMR_ROK                ((1 << 7))      /* Receive OK Interrupt */
+#define IMR_TBEDER     ((1 << 6))      /* Tx AC_BE Descriptor Error */
+#define IMR_TBEDOK     ((1 << 5))      /* Tx AC_BE Descriptor OK */
+#define IMR_TBKDER     ((1 << 4))      /* Tx AC_BK Descriptor Error */
+#define IMR_TBKDOK     ((1 << 3))      /* Tx AC_BK Descriptor OK */
+#define IMR_RQOSOK     ((1 << 2))      /* Rx QoS OK Interrupt */
+#define IMR_TIMEOUT2   ((1 << 1))      /* Time Out Interrupt 2 */
+#define IMR_TIMEOUT3   ((1 << 0))      /* Time Out Interrupt 3 */
+       __le16  BEACON_INTERVAL; /* 0x70 */
+       __le16  ATIM_WND; /*  0x72 */
+       __le16  BEACON_INTERVAL_TIME; /*  0x74 */
+       __le16  ATIMTR_INTERVAL; /*  0x76 */
+       u8      PHY_DELAY; /*  0x78 */
+       u8      CARRIER_SENSE_COUNTER; /* 0x79 */
+       u8      reserved_11[2]; /* 0x7a */
+       u8      PHY[4]; /* 0x7c  */
+       __le16  RFPinsOutput; /* 0x80 */
+       __le16  RFPinsEnable; /* 0x82 */
+       __le16  RFPinsSelect; /* 0x84 */
+       __le16  RFPinsInput;  /* 0x86 */
+       __le32  RF_PARA; /*  0x88 */
+       __le32  RF_TIMING; /*  0x8c */
+       u8      GP_ENABLE; /*  0x90 */
+       u8      GPIO0; /*  0x91 */
+       u8      GPIO1; /*  0x92 */
+       u8      TPPOLL_STOP; /*  0x93 - rtl8187se only */
+#define RTL818x_TPPOLL_STOP_BQ                 (1 << 7)
+#define RTL818x_TPPOLL_STOP_VI                 (1 << 4)
+#define RTL818x_TPPOLL_STOP_VO                 (1 << 5)
+#define RTL818x_TPPOLL_STOP_BE                 (1 << 3)
+#define RTL818x_TPPOLL_STOP_BK                 (1 << 2)
+#define RTL818x_TPPOLL_STOP_MG                 (1 << 1)
+#define RTL818x_TPPOLL_STOP_HI                 (1 << 6)
+
+       __le32  HSSI_PARA; /*  0x94 */
+       u8      reserved_13[4]; /* 0x98 */
+       u8      TX_AGC_CTL; /*  0x9c */
+#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN      (1 << 0)
+#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL    (1 << 1)
+#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT                (1 << 2)
        u8      TX_GAIN_CCK;
        u8      TX_GAIN_OFDM;
        u8      TX_ANTENNA;
@@ -158,8 +274,8 @@ struct rtl818x_csr {
        u8      SLOT;
        u8      reserved_16[5];
        u8      CW_CONF;
-#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT     (1 << 0)
-#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT  (1 << 1)
+#define RTL818X_CW_CONF_PERPACKET_CW   (1 << 0)
+#define RTL818X_CW_CONF_PERPACKET_RETRY        (1 << 1)
        u8      CW_VAL;
        u8      RATE_FALLBACK;
 #define RTL818X_RATE_FALLBACK_ENABLE   (1 << 7)
@@ -167,7 +283,8 @@ struct rtl818x_csr {
        u8      reserved_17[24];
        u8      CONFIG5;
        u8      TX_DMA_POLLING;
-       u8      reserved_18[2];
+       u8      PHY_PR;
+       u8      reserved_18;
        __le16  CWR;
        u8      RETRY_CTR;
        u8      reserved_19[3];
@@ -179,20 +296,64 @@ struct rtl818x_csr {
        __le32  RDSAR;
        __le16  TID_AC_MAP;
        u8      reserved_20[4];
-       u8      ANAPARAM3;
-       u8      reserved_21[5];
-       __le16  FEMR;
-       u8      reserved_22[4];
-       __le16  TALLY_CNT;
-       u8      TALLY_SEL;
+       union {
+               __le16  ANAPARAM3; /* 0xee */
+               u8      ANAPARAM3A; /* for rtl8187 */
+       };
+
+#define AC_PARAM_TXOP_LIMIT_SHIFT      16
+#define AC_PARAM_ECW_MAX_SHIFT         12
+#define AC_PARAM_ECW_MIN_SHIFT         8
+#define AC_PARAM_AIFS_SHIFT            0
+
+       __le32 AC_VO_PARAM; /* 0xf0 */
+
+       union { /* 0xf4 */
+               __le32 AC_VI_PARAM;
+               __le16 FEMR;
+       } __packed;
+
+       union{ /* 0xf8 */
+               __le32  AC_BE_PARAM; /* rtl8187se */
+               struct{
+                       u8      reserved_21[2];
+                       __le16  TALLY_CNT; /* 0xfa */
+               } __packed;
+       } __packed;
+
+       union {
+               u8      TALLY_SEL; /* 0xfc */
+               __le32  AC_BK_PARAM;
+
+       } __packed;
+
 } __packed;
 
+/* These are addresses with NON-standard usage.
+ * They have offsets very far from this struct.
+ * I don't like to introduce a ton of "reserved"..
+ * They are for RTL8187SE
+ */
+#define REG_ADDR1(addr)        ((u8 __iomem *)priv->map + addr)
+#define REG_ADDR2(addr)        ((__le16 __iomem *)priv->map + (addr >> 1))
+#define REG_ADDR4(addr)        ((__le32 __iomem *)priv->map + (addr >> 2))
+
+#define FEMR_SE                REG_ADDR2(0x1D4)
+#define ARFR           REG_ADDR2(0x1E0)
+#define RFSW_CTRL      REG_ADDR2(0x272)
+#define SW_3W_DB0      REG_ADDR2(0x274)
+#define SW_3W_DB0_4    REG_ADDR4(0x274)
+#define SW_3W_DB1      REG_ADDR2(0x278)
+#define SW_3W_DB1_4    REG_ADDR4(0x278)
+#define SW_3W_CMD1     REG_ADDR1(0x27D)
+#define PI_DATA_REG    REG_ADDR2(0x360)
+#define SI_DATA_REG     REG_ADDR2(0x362)
+
 struct rtl818x_rf_ops {
        char *name;
        void (*init)(struct ieee80211_hw *);
        void (*stop)(struct ieee80211_hw *);
        void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
-       void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *);
        u8 (*calc_rssi)(u8 agc, u8 sq);
 };
 
index c2ffce7a907c90cf3fcc01400f50f2850d371533..bf3cf124e4eaac4e16fe9cbcd9299dbbcb9df250 100644 (file)
@@ -5,7 +5,7 @@ menuconfig RTL_CARDS
        ---help---
          This option will enable support for the Realtek mac80211-based
          wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
-         rtl8723eu, and rtl8188eu share some common code.
+         rtl8723ae, rtl8723be, and rtl8188ae share some common code.
 
 if RTL_CARDS
 
@@ -48,12 +48,27 @@ config RTL8723AE
        depends on PCI
        select RTLWIFI
        select RTLWIFI_PCI
+       select RTL8723_COMMON
+       select RTLBTCOEXIST
        ---help---
        This is the driver for Realtek RTL8723AE 802.11n PCIe
        wireless network adapters.
 
        If you choose to build it as a module, it will be called rtl8723ae
 
+config RTL8723BE
+       tristate "Realtek RTL8723BE PCIe Wireless Network Adapter"
+       depends on PCI
+       select RTLWIFI
+       select RTLWIFI_PCI
+       select RTL8723_COMMON
+       select RTLBTCOEXIST
+       ---help---
+       This is the driver for Realtek RTL8723BE 802.11n PCIe
+       wireless network adapters.
+
+       If you choose to build it as a module, it will be called rtl8723be
+
 config RTL8188EE
        tristate "Realtek RTL8188EE Wireless Network Adapter"
        depends on PCI
@@ -101,4 +116,14 @@ config RTL8192C_COMMON
        depends on RTL8192CE || RTL8192CU
        default y
 
+config RTL8723_COMMON
+       tristate
+       depends on RTL8723AE || RTL8723BE
+       default y
+
+config RTLBTCOEXIST
+       tristate
+       depends on RTL8723AE || RTL8723BE
+       default y
+
 endif
index d56f023a4b90dfadabf45670b2f5c8d0ca1fa46d..bba36a06abcc4c3cf3d896eab771ef0406611993 100644 (file)
@@ -24,6 +24,9 @@ obj-$(CONFIG_RTL8192CU)               += rtl8192cu/
 obj-$(CONFIG_RTL8192SE)                += rtl8192se/
 obj-$(CONFIG_RTL8192DE)                += rtl8192de/
 obj-$(CONFIG_RTL8723AE)                += rtl8723ae/
+obj-$(CONFIG_RTL8723BE)                += rtl8723be/
 obj-$(CONFIG_RTL8188EE)                += rtl8188ee/
+obj-$(CONFIG_RTLBTCOEXIST)     += btcoexist/
+obj-$(CONFIG_RTL8723_COMMON)   += rtl8723com/
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/Makefile b/drivers/net/wireless/rtlwifi/btcoexist/Makefile
new file mode 100644 (file)
index 0000000..47ceecf
--- /dev/null
@@ -0,0 +1,7 @@
+btcoexist-objs :=      halbtc8723b2ant.o       \
+                       halbtcoutsrc.o          \
+                       rtl_btc.o
+
+obj-$(CONFIG_RTLBTCOEXIST) += btcoexist.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/rtlwifi/btcoexist/halbt_precomp.h
new file mode 100644 (file)
index 0000000..d76684e
--- /dev/null
@@ -0,0 +1,75 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
+
+#ifndef        __HALBT_PRECOMP_H__
+#define __HALBT_PRECOMP_H__
+/*************************************************************
+ * include files
+ *************************************************************/
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+
+#include "halbtcoutsrc.h"
+
+#include "halbtc8723b2ant.h"
+
+#define BIT0   0x00000001
+#define BIT1   0x00000002
+#define BIT2   0x00000004
+#define BIT3   0x00000008
+#define BIT4   0x00000010
+#define BIT5   0x00000020
+#define BIT6   0x00000040
+#define BIT7   0x00000080
+#define BIT8   0x00000100
+#define BIT9   0x00000200
+#define BIT10  0x00000400
+#define BIT11  0x00000800
+#define BIT12  0x00001000
+#define BIT13  0x00002000
+#define BIT14  0x00004000
+#define BIT15  0x00008000
+#define BIT16  0x00010000
+#define BIT17  0x00020000
+#define BIT18  0x00040000
+#define BIT19  0x00080000
+#define BIT20  0x00100000
+#define BIT21  0x00200000
+#define BIT22  0x00400000
+#define BIT23  0x00800000
+#define BIT24  0x01000000
+#define BIT25  0x02000000
+#define BIT26  0x04000000
+#define BIT27  0x08000000
+#define BIT28  0x10000000
+#define BIT29  0x20000000
+#define BIT30  0x40000000
+#define BIT31  0x80000000
+
+#endif /* __HALBT_PRECOMP_H__ */
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
new file mode 100644 (file)
index 0000000..d916ab9
--- /dev/null
@@ -0,0 +1,3698 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+/***************************************************************
+ * Description:
+ *
+ * This file is for RTL8723B Co-exist mechanism
+ *
+ * History
+ * 2012/11/15 Cosa first check in.
+ *
+ **************************************************************/
+/**************************************************************
+ * include files
+ **************************************************************/
+#include "halbt_precomp.h"
+/**************************************************************
+ * Global variables, these are static variables
+ **************************************************************/
+static struct coex_dm_8723b_2ant glcoex_dm_8723b_2ant;
+static struct coex_dm_8723b_2ant *coex_dm = &glcoex_dm_8723b_2ant;
+static struct coex_sta_8723b_2ant glcoex_sta_8723b_2ant;
+static struct coex_sta_8723b_2ant *coex_sta = &glcoex_sta_8723b_2ant;
+
+static const char *const glbt_info_src_8723b_2ant[] = {
+       "BT Info[wifi fw]",
+       "BT Info[bt rsp]",
+       "BT Info[bt auto report]",
+};
+
+static u32 glcoex_ver_date_8723b_2ant = 20130731;
+static u32 glcoex_ver_8723b_2ant = 0x3b;
+
+/**************************************************************
+ * local function proto type if needed
+ **************************************************************/
+/**************************************************************
+ * local function start with btc8723b2ant_
+ **************************************************************/
+static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+                                    u8 rssi_thresh1)
+{
+       s32 bt_rssi = 0;
+       u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
+
+       bt_rssi = coex_sta->bt_rssi;
+
+       if (level_num == 2) {
+               if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+                   (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+                       if (bt_rssi >= rssi_thresh +
+                                      BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+                               bt_rssi_state = BTC_RSSI_STATE_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "switch to High\n");
+                       } else {
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "stay at Low\n");
+                       }
+               } else {
+                       if (bt_rssi < rssi_thresh) {
+                               bt_rssi_state = BTC_RSSI_STATE_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "switch to Low\n");
+                       } else {
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "stay at High\n");
+                       }
+               }
+       } else if (level_num == 3) {
+               if (rssi_thresh > rssi_thresh1) {
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                 "[BTCoex], BT Rssi thresh error!!\n");
+                       return coex_sta->pre_bt_rssi_state;
+               }
+
+               if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
+                   (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
+                       if (bt_rssi >= rssi_thresh +
+                                      BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+                               bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "switch to Medium\n");
+                       } else {
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "stay at Low\n");
+                       }
+               } else if ((coex_sta->pre_bt_rssi_state ==
+                                               BTC_RSSI_STATE_MEDIUM) ||
+                          (coex_sta->pre_bt_rssi_state ==
+                                               BTC_RSSI_STATE_STAY_MEDIUM)) {
+                       if (bt_rssi >= rssi_thresh1 +
+                                      BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+                               bt_rssi_state = BTC_RSSI_STATE_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "switch to High\n");
+                       } else if (bt_rssi < rssi_thresh) {
+                               bt_rssi_state = BTC_RSSI_STATE_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "switch to Low\n");
+                       } else {
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "stay at Medium\n");
+                       }
+               } else {
+                       if (bt_rssi < rssi_thresh1) {
+                               bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "switch to Medium\n");
+                       } else {
+                               bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
+                                         "[BTCoex], BT Rssi state "
+                                         "stay at High\n");
+                       }
+               }
+       }
+
+       coex_sta->pre_bt_rssi_state = bt_rssi_state;
+
+       return bt_rssi_state;
+}
+
+static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
+                                      u8 index, u8 level_num,
+                                      u8 rssi_thresh, u8 rssi_thresh1)
+{
+       s32 wifi_rssi = 0;
+       u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
+
+       btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+
+       if (level_num == 2) {
+               if ((coex_sta->pre_wifi_rssi_state[index] ==
+                                               BTC_RSSI_STATE_LOW) ||
+                   (coex_sta->pre_wifi_rssi_state[index] ==
+                                               BTC_RSSI_STATE_STAY_LOW)) {
+                       if (wifi_rssi >= rssi_thresh +
+                                        BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+                               wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "switch to High\n");
+                       } else {
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "stay at Low\n");
+                       }
+               } else {
+                       if (wifi_rssi < rssi_thresh) {
+                               wifi_rssi_state = BTC_RSSI_STATE_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "switch to Low\n");
+                       } else {
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "stay at High\n");
+                       }
+               }
+       } else if (level_num == 3) {
+               if (rssi_thresh > rssi_thresh1) {
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
+                                 "[BTCoex], wifi RSSI thresh error!!\n");
+                       return coex_sta->pre_wifi_rssi_state[index];
+               }
+
+               if ((coex_sta->pre_wifi_rssi_state[index] ==
+                                               BTC_RSSI_STATE_LOW) ||
+                   (coex_sta->pre_wifi_rssi_state[index] ==
+                                               BTC_RSSI_STATE_STAY_LOW)) {
+                       if (wifi_rssi >= rssi_thresh +
+                                       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+                               wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "switch to Medium\n");
+                       } else {
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "stay at Low\n");
+                       }
+               } else if ((coex_sta->pre_wifi_rssi_state[index] ==
+                                               BTC_RSSI_STATE_MEDIUM) ||
+                          (coex_sta->pre_wifi_rssi_state[index] ==
+                                               BTC_RSSI_STATE_STAY_MEDIUM)) {
+                       if (wifi_rssi >= rssi_thresh1 +
+                                        BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
+                               wifi_rssi_state = BTC_RSSI_STATE_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "switch to High\n");
+                       } else if (wifi_rssi < rssi_thresh) {
+                               wifi_rssi_state = BTC_RSSI_STATE_LOW;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "switch to Low\n");
+                       } else {
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "stay at Medium\n");
+                       }
+               } else {
+                       if (wifi_rssi < rssi_thresh1) {
+                               wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "switch to Medium\n");
+                       } else {
+                               wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_WIFI_RSSI_STATE,
+                                         "[BTCoex], wifi RSSI state "
+                                         "stay at High\n");
+                       }
+               }
+       }
+
+       coex_sta->pre_wifi_rssi_state[index] = wifi_rssi_state;
+
+       return wifi_rssi_state;
+}
+
+static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
+{
+       u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
+       u32 reg_hp_tx = 0, reg_hp_rx = 0;
+       u32 reg_lp_tx = 0, reg_lp_rx = 0;
+
+       reg_hp_txrx = 0x770;
+       reg_lp_txrx = 0x774;
+
+       u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_hp_txrx);
+       reg_hp_tx = u32tmp & MASKLWORD;
+       reg_hp_rx = (u32tmp & MASKHWORD) >> 16;
+
+       u32tmp = btcoexist->btc_read_4byte(btcoexist, reg_lp_txrx);
+       reg_lp_tx = u32tmp & MASKLWORD;
+       reg_lp_rx = (u32tmp & MASKHWORD) >> 16;
+
+       coex_sta->high_priority_tx = reg_hp_tx;
+       coex_sta->high_priority_rx = reg_hp_rx;
+       coex_sta->low_priority_tx = reg_lp_tx;
+       coex_sta->low_priority_rx = reg_lp_rx;
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+                 "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+                 reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
+                 "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+                 reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+
+       /* reset counter */
+       btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+}
+
+static bool btc8723b2ant_is_wifi_status_changed(struct btc_coexist *btcoexist)
+{
+       static bool pre_wifi_busy;
+       static bool pre_under_4way;
+       static bool pre_bt_hs_on;
+       bool wifi_busy = false, under_4way = false, bt_hs_on = false;
+       bool wifi_connected = false;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+                          &under_4way);
+
+       if (wifi_connected) {
+               if (wifi_busy != pre_wifi_busy) {
+                       pre_wifi_busy = wifi_busy;
+                       return true;
+               }
+
+               if (under_4way != pre_under_4way) {
+                       pre_under_4way = under_4way;
+                       return true;
+               }
+
+               if (bt_hs_on != pre_bt_hs_on) {
+                       pre_bt_hs_on = bt_hs_on;
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
+{
+       /*struct btc_stack_info *stack_info = &btcoexist->stack_info;*/
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       bool bt_hs_on = false;
+
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1) /* profile from bt patch */
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+       bt_link_info->bt_link_exist = coex_sta->bt_link_exist;
+       bt_link_info->sco_exist = coex_sta->sco_exist;
+       bt_link_info->a2dp_exist = coex_sta->a2dp_exist;
+       bt_link_info->pan_exist = coex_sta->pan_exist;
+       bt_link_info->hid_exist = coex_sta->hid_exist;
+
+       /* work around for HS mode. */
+       if (bt_hs_on) {
+               bt_link_info->pan_exist = true;
+               bt_link_info->bt_link_exist = true;
+       }
+#else  /* profile from bt stack */
+       bt_link_info->bt_link_exist = stack_info->bt_link_exist;
+       bt_link_info->sco_exist = stack_info->sco_exist;
+       bt_link_info->a2dp_exist = stack_info->a2dp_exist;
+       bt_link_info->pan_exist = stack_info->pan_exist;
+       bt_link_info->hid_exist = stack_info->hid_exist;
+
+       /*for win-8 stack HID report error*/
+       if (!stack_info->hid_exist)
+               stack_info->hid_exist = coex_sta->hid_exist;
+       /*sync  BTInfo with BT firmware and stack*/
+       /* when stack HID report error, here we use the info from bt fw.*/
+       if (!stack_info->bt_link_exist)
+               stack_info->bt_link_exist = coex_sta->bt_link_exist;
+#endif
+       /* check if Sco only */
+       if (bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+           !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+               bt_link_info->sco_only = true;
+       else
+               bt_link_info->sco_only = false;
+
+       /* check if A2dp only */
+       if (!bt_link_info->sco_exist && bt_link_info->a2dp_exist &&
+           !bt_link_info->pan_exist && !bt_link_info->hid_exist)
+               bt_link_info->a2dp_only = true;
+       else
+               bt_link_info->a2dp_only = false;
+
+       /* check if Pan only */
+       if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+           bt_link_info->pan_exist && !bt_link_info->hid_exist)
+               bt_link_info->pan_only = true;
+       else
+               bt_link_info->pan_only = false;
+
+       /* check if Hid only */
+       if (!bt_link_info->sco_exist && !bt_link_info->a2dp_exist &&
+           !bt_link_info->pan_exist && bt_link_info->hid_exist)
+               bt_link_info->hid_only = true;
+       else
+               bt_link_info->hid_only = false;
+}
+
+static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
+{
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       bool bt_hs_on = false;
+       u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
+       u8 num_of_diff_profile = 0;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+
+       if (!bt_link_info->bt_link_exist) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], No BT link exists!!!\n");
+               return algorithm;
+       }
+
+       if (bt_link_info->sco_exist)
+               num_of_diff_profile++;
+       if (bt_link_info->hid_exist)
+               num_of_diff_profile++;
+       if (bt_link_info->pan_exist)
+               num_of_diff_profile++;
+       if (bt_link_info->a2dp_exist)
+               num_of_diff_profile++;
+
+       if (num_of_diff_profile == 1) {
+               if (bt_link_info->sco_exist) {
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], SCO only\n");
+                       algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+               } else {
+                       if (bt_link_info->hid_exist) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], HID only\n");
+                               algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+                       } else if (bt_link_info->a2dp_exist) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], A2DP only\n");
+                               algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
+                       } else if (bt_link_info->pan_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], PAN(HS) only\n");
+                                       algorithm =
+                                               BT_8723B_2ANT_COEX_ALGO_PANHS;
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], PAN(EDR) only\n");
+                                       algorithm =
+                                               BT_8723B_2ANT_COEX_ALGO_PANEDR;
+                               }
+                       }
+               }
+       } else if (num_of_diff_profile == 2) {
+               if (bt_link_info->sco_exist) {
+                       if (bt_link_info->hid_exist) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], SCO + HID\n");
+                               algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                       } else if (bt_link_info->a2dp_exist) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], SCO + A2DP ==> SCO\n");
+                               algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                       } else if (bt_link_info->pan_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], SCO + PAN(HS)\n");
+                                       algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], SCO + PAN(EDR)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                               }
+                       }
+               } else {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->a2dp_exist) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], HID + A2DP\n");
+                               algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+                       } else if (bt_link_info->hid_exist &&
+                                  bt_link_info->pan_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], HID + PAN(HS)\n");
+                                       algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], HID + PAN(EDR)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                               }
+                       } else if (bt_link_info->pan_exist &&
+                                  bt_link_info->a2dp_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], A2DP + PAN(HS)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex],A2DP + PAN(EDR)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
+                               }
+                       }
+               }
+       } else if (num_of_diff_profile == 3) {
+               if (bt_link_info->sco_exist) {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->a2dp_exist) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], SCO + HID + A2DP"
+                                         " ==> HID\n");
+                               algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                       } else if (bt_link_info->hid_exist &&
+                                  bt_link_info->pan_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], SCO + HID + "
+                                                 "PAN(HS)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], SCO + HID + "
+                                                 "PAN(EDR)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                               }
+                       } else if (bt_link_info->pan_exist &&
+                                  bt_link_info->a2dp_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], SCO + A2DP + "
+                                                 "PAN(HS)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], SCO + A2DP + "
+                                                 "PAN(EDR) ==> HID\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                               }
+                       }
+               } else {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->pan_exist &&
+                           bt_link_info->a2dp_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], HID + A2DP + "
+                                                 "PAN(HS)\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], HID + A2DP + "
+                                                 "PAN(EDR)\n");
+                                       algorithm =
+                                       BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
+                               }
+                       }
+               }
+       } else if (num_of_diff_profile >= 3) {
+               if (bt_link_info->sco_exist) {
+                       if (bt_link_info->hid_exist &&
+                           bt_link_info->pan_exist &&
+                           bt_link_info->a2dp_exist) {
+                               if (bt_hs_on) {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], Error!!! SCO + HID"
+                                                 " + A2DP + PAN(HS)\n");
+                               } else {
+                                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                                 "[BTCoex], SCO + HID + A2DP +"
+                                                 " PAN(EDR)==>PAN(EDR)+HID\n");
+                                       algorithm =
+                                           BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
+                               }
+                       }
+               }
+       }
+       return algorithm;
+}
+
+static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
+{
+       bool ret = false;
+       bool bt_hs_on = false, wifi_connected = false;
+       s32 bt_hs_rssi = 0;
+       u8 bt_rssi_state;
+
+       if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on))
+               return false;
+       if (!btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                               &wifi_connected))
+               return false;
+       if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
+               return false;
+
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+       if (wifi_connected) {
+               if (bt_hs_on) {
+                       if (bt_hs_rssi > 37) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+                                         "[BTCoex], Need to decrease bt "
+                                         "power for HS mode!!\n");
+                               ret = true;
+                       }
+               } else {
+                       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+                                         "[BTCoex], Need to decrease bt "
+                                         "power for Wifi is connected!!\n");
+                               ret = true;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
+                                               u8 dac_swing_lvl)
+{
+       u8 h2c_parameter[1] = {0};
+
+       /* There are several type of dacswing
+        * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
+        */
+       h2c_parameter[0] = dac_swing_lvl;
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+                 "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+                 "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+
+       btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
+                                          bool dec_bt_pwr)
+{
+       u8 h2c_parameter[1] = {0};
+
+       h2c_parameter[0] = 0;
+
+       if (dec_bt_pwr)
+               h2c_parameter[0] |= BIT1;
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+                 "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+                 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+
+       btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
+                                   bool force_exec, bool dec_bt_pwr)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+                 "[BTCoex], %s Dec BT power = %s\n",
+                 (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+       coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+                         coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+
+               if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
+                       return;
+       }
+       btc8723b2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
+
+       coex_dm->pre_dec_bt_pwr = coex_dm->cur_dec_bt_pwr;
+}
+
+static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
+                                         bool force_exec, u8 fw_dac_swing_lvl)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+                 "[BTCoex], %s set FW Dac Swing level = %d\n",
+                 (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+       coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], preFwDacSwingLvl=%d, "
+                         "curFwDacSwingLvl=%d\n",
+                         coex_dm->pre_fw_dac_swing_lvl,
+                         coex_dm->cur_fw_dac_swing_lvl);
+
+               if (coex_dm->pre_fw_dac_swing_lvl ==
+                  coex_dm->cur_fw_dac_swing_lvl)
+                       return;
+       }
+
+       btc8723b2ant_set_fw_dac_swing_level(btcoexist,
+                                           coex_dm->cur_fw_dac_swing_lvl);
+       coex_dm->pre_fw_dac_swing_lvl = coex_dm->cur_fw_dac_swing_lvl;
+}
+
+static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
+                                                bool rx_rf_shrink_on)
+{
+       if (rx_rf_shrink_on) {
+               /* Shrink RF Rx LPF corner */
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                         "[BTCoex], Shrink RF Rx LPF corner!!\n");
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+                                         0xfffff, 0xffffc);
+       } else {
+               /* Resume RF Rx LPF corner */
+               /* After initialized, we can use coex_dm->btRf0x1eBackup */
+               if (btcoexist->initilized) {
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                                 "[BTCoex], Resume RF Rx LPF corner!!\n");
+                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
+                                                 0xfffff,
+                                                 coex_dm->bt_rf0x1e_backup);
+               }
+       }
+}
+
+static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
+                                  bool force_exec, bool rx_rf_shrink_on)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+                 "[BTCoex], %s turn Rx RF Shrink = %s\n",
+                 (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
+                 "ON" : "OFF"));
+       coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+                         "[BTCoex], bPreRfRxLpfShrink=%d, "
+                         "bCurRfRxLpfShrink=%d\n",
+                         coex_dm->pre_rf_rx_lpf_shrink,
+                         coex_dm->cur_rf_rx_lpf_shrink);
+
+               if (coex_dm->pre_rf_rx_lpf_shrink ==
+                   coex_dm->cur_rf_rx_lpf_shrink)
+                       return;
+       }
+       btc8723b2ant_set_sw_rf_rx_lpf_corner(btcoexist,
+                                            coex_dm->cur_rf_rx_lpf_shrink);
+
+       coex_dm->pre_rf_rx_lpf_shrink = coex_dm->cur_rf_rx_lpf_shrink;
+}
+
+static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
+                                       bool low_penalty_ra)
+{
+       u8 h2c_parameter[6] = {0};
+
+       h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
+
+       if (low_penalty_ra) {
+               h2c_parameter[1] |= BIT0;
+               /*normal rate except MCS7/6/5, OFDM54/48/36*/
+               h2c_parameter[2] = 0x00;
+               h2c_parameter[3] = 0xf7;  /*MCS7 or OFDM54*/
+               h2c_parameter[4] = 0xf8;  /*MCS6 or OFDM48*/
+               h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36*/
+       }
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+                 "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                 (low_penalty_ra ? "ON!!" : "OFF!!"));
+
+       btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
+}
+
+static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
+                                       bool force_exec, bool low_penalty_ra)
+{
+       /*return; */
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+                 "[BTCoex], %s turn LowPenaltyRA = %s\n",
+                 (force_exec ? "force to" : ""), (low_penalty_ra ?
+                 "ON" : "OFF"));
+       coex_dm->cur_low_penalty_ra = low_penalty_ra;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+                         "[BTCoex], bPreLowPenaltyRa=%d, "
+                         "bCurLowPenaltyRa=%d\n",
+                         coex_dm->pre_low_penalty_ra,
+                         coex_dm->cur_low_penalty_ra);
+
+               if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
+                       return;
+       }
+       btc8723b_set_penalty_txrate(btcoexist, coex_dm->cur_low_penalty_ra);
+
+       coex_dm->pre_low_penalty_ra = coex_dm->cur_low_penalty_ra;
+}
+
+static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
+                                          u32 level)
+{
+       u8 val = (u8) level;
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                 "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
+}
+
+static void btc8723b2ant_set_sw_fulltime_dac_swing(struct btc_coexist *btcoex,
+                                                  bool sw_dac_swing_on,
+                                                  u32 sw_dac_swing_lvl)
+{
+       if (sw_dac_swing_on)
+               btc8723b2ant_set_dac_swing_reg(btcoex, sw_dac_swing_lvl);
+       else
+               btc8723b2ant_set_dac_swing_reg(btcoex, 0x18);
+}
+
+
+static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
+                                  bool force_exec, bool dac_swing_on,
+                                  u32 dac_swing_lvl)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+                 "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+                 (force_exec ? "force to" : ""),
+                 (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+       coex_dm->cur_dac_swing_on = dac_swing_on;
+       coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+                         "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
+                         " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+                         coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
+                         coex_dm->cur_dac_swing_on,
+                         coex_dm->cur_dac_swing_lvl);
+
+               if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
+                   (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
+                       return;
+       }
+       mdelay(30);
+       btc8723b2ant_set_sw_fulltime_dac_swing(btcoexist, dac_swing_on,
+                                              dac_swing_lvl);
+
+       coex_dm->pre_dac_swing_on = coex_dm->cur_dac_swing_on;
+       coex_dm->pre_dac_swing_lvl = coex_dm->cur_dac_swing_lvl;
+}
+
+static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
+                                      bool agc_table_en)
+{
+       u8 rssi_adjust_val = 0;
+
+       /*  BB AGC Gain Table */
+       if (agc_table_en) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                         "[BTCoex], BB Agc Table On!\n");
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6b1D0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6a1E0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
+       } else {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                         "[BTCoex], BB Agc Table Off!\n");
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa71D0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa61E0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa51F0001);
+               btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa4200001);
+       }
+
+
+       /* RF Gain */
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
+       if (agc_table_en) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                         "[BTCoex], Agc Table On!\n");
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+                                         0xfffff, 0x38fff);
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+                                         0xfffff, 0x38ffe);
+       } else {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                         "[BTCoex], Agc Table Off!\n");
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+                                         0xfffff, 0x380c3);
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
+                                         0xfffff, 0x28ce6);
+       }
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
+
+       if (agc_table_en) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                         "[BTCoex], Agc Table On!\n");
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+                                         0xfffff, 0x38fff);
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+                                         0xfffff, 0x38ffe);
+       } else {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                         "[BTCoex], Agc Table Off!\n");
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+                                         0xfffff, 0x380c3);
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
+                                         0xfffff, 0x28ce6);
+       }
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x0);
+
+       /* set rssiAdjustVal for wifi module. */
+       if (agc_table_en)
+               rssi_adjust_val = 8;
+       btcoexist->btc_set(btcoexist, BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+                          &rssi_adjust_val);
+}
+
+static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
+                                  bool force_exec, bool agc_table_en)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+                 "[BTCoex], %s %s Agc Table\n",
+                 (force_exec ? "force to" : ""),
+                 (agc_table_en ? "Enable" : "Disable"));
+       coex_dm->cur_agc_table_en = agc_table_en;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+                         "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+                         coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+
+               if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
+                       return;
+       }
+       btc8723b2ant_set_agc_table(btcoexist, agc_table_en);
+
+       coex_dm->pre_agc_table_en = coex_dm->cur_agc_table_en;
+}
+
+static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
+                                       u32 val0x6c0, u32 val0x6c4,
+                                       u32 val0x6c8, u8 val0x6cc)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                 "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+       btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                 "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+       btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                 "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+       btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
+                 "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+       btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
+}
+
+static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
+                                   bool force_exec, u32 val0x6c0,
+                                   u32 val0x6c4, u32 val0x6c8,
+                                   u8 val0x6cc)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
+                 "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
+                 " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+                 (force_exec ? "force to" : ""), val0x6c0,
+                 val0x6c4, val0x6c8, val0x6cc);
+       coex_dm->cur_val0x6c0 = val0x6c0;
+       coex_dm->cur_val0x6c4 = val0x6c4;
+       coex_dm->cur_val0x6c8 = val0x6c8;
+       coex_dm->cur_val0x6cc = val0x6cc;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+                         "[BTCoex], preVal0x6c0=0x%x, "
+                         "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
+                         "preVal0x6cc=0x%x !!\n",
+                         coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+                         coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
+                         "[BTCoex], curVal0x6c0=0x%x, "
+                         "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
+                         "curVal0x6cc=0x%x !!\n",
+                         coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+                         coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+
+               if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
+                   (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
+                   (coex_dm->pre_val0x6c8 == coex_dm->cur_val0x6c8) &&
+                   (coex_dm->pre_val0x6cc == coex_dm->cur_val0x6cc))
+                       return;
+       }
+       btc8723b2ant_set_coex_table(btcoexist, val0x6c0, val0x6c4,
+                                   val0x6c8, val0x6cc);
+
+       coex_dm->pre_val0x6c0 = coex_dm->cur_val0x6c0;
+       coex_dm->pre_val0x6c4 = coex_dm->cur_val0x6c4;
+       coex_dm->pre_val0x6c8 = coex_dm->cur_val0x6c8;
+       coex_dm->pre_val0x6cc = coex_dm->cur_val0x6cc;
+}
+
+static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
+                                  bool force_exec, u8 type)
+{
+       switch (type) {
+       case 0:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x55555555, 0xffff, 0x3);
+               break;
+       case 1:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55555555,
+                                       0x5afa5afa, 0xffff, 0x3);
+               break;
+       case 2:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5a5a5a5a,
+                                       0x5a5a5a5a, 0xffff, 0x3);
+               break;
+       case 3:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0xaaaaaaaa,
+                                       0xaaaaaaaa, 0xffff, 0x3);
+               break;
+       case 4:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0xffffffff,
+                                       0xffffffff, 0xffff, 0x3);
+               break;
+       case 5:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5fff5fff,
+                                       0x5fff5fff, 0xffff, 0x3);
+               break;
+       case 6:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+                                       0x5a5a5a5a, 0xffff, 0x3);
+               break;
+       case 7:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+                                       0x5afa5afa, 0xffff, 0x3);
+               break;
+       case 8:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x5aea5aea,
+                                       0x5aea5aea, 0xffff, 0x3);
+               break;
+       case 9:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+                                       0x5aea5aea, 0xffff, 0x3);
+               break;
+       case 10:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+                                       0x5aff5aff, 0xffff, 0x3);
+               break;
+       case 11:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+                                       0x5a5f5a5f, 0xffff, 0x3);
+               break;
+       case 12:
+               btc8723b2ant_coex_table(btcoexist, force_exec, 0x55ff55ff,
+                                       0x5f5f5f5f, 0xffff, 0x3);
+               break;
+       default:
+               break;
+       }
+}
+
+static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
+                                               bool enable)
+{
+       u8 h2c_parameter[1] = {0};
+
+       if (enable)
+               h2c_parameter[0] |= BIT0;/* function enable*/
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+                 "[BTCoex], set FW for BT Ignore Wlan_Act, "
+                 "FW write 0x63=0x%x\n", h2c_parameter[0]);
+
+       btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
+}
+
+static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
+                                        bool force_exec, bool enable)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+                 "[BTCoex], %s turn Ignore WlanAct %s\n",
+                 (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       coex_dm->cur_ignore_wlan_act = enable;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], bPreIgnoreWlanAct = %d, "
+                         "bCurIgnoreWlanAct = %d!!\n",
+                         coex_dm->pre_ignore_wlan_act,
+                         coex_dm->cur_ignore_wlan_act);
+
+               if (coex_dm->pre_ignore_wlan_act ==
+                   coex_dm->cur_ignore_wlan_act)
+                       return;
+       }
+       btc8723b2ant_set_fw_ignore_wlan_act(btcoexist, enable);
+
+       coex_dm->pre_ignore_wlan_act = coex_dm->cur_ignore_wlan_act;
+}
+
+static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
+                                       u8 byte2, u8 byte3, u8 byte4, u8 byte5)
+{
+       u8 h2c_parameter[5];
+
+       h2c_parameter[0] = byte1;
+       h2c_parameter[1] = byte2;
+       h2c_parameter[2] = byte3;
+       h2c_parameter[3] = byte4;
+       h2c_parameter[4] = byte5;
+
+       coex_dm->ps_tdma_para[0] = byte1;
+       coex_dm->ps_tdma_para[1] = byte2;
+       coex_dm->ps_tdma_para[2] = byte3;
+       coex_dm->ps_tdma_para[3] = byte4;
+       coex_dm->ps_tdma_para[4] = byte5;
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+                 "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+                 h2c_parameter[0],
+                 h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+                 h2c_parameter[3] << 8 | h2c_parameter[4]);
+
+       btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
+}
+
+static void btc8723b2ant_sw_mechanism1(struct btc_coexist *btcoexist,
+                                      bool shrink_rx_lpf, bool low_penalty_ra,
+                                      bool limited_dig, bool bt_lna_constrain)
+{
+       btc8723b2ant_rf_shrink(btcoexist, NORMAL_EXEC, shrink_rx_lpf);
+       btc8723b2ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
+}
+
+static void btc8723b2ant_sw_mechanism2(struct btc_coexist *btcoexist,
+                                      bool agc_table_shift, bool adc_backoff,
+                                      bool sw_dac_swing, u32 dac_swing_lvl)
+{
+       btc8723b2ant_agc_table(btcoexist, NORMAL_EXEC, agc_table_shift);
+       btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, sw_dac_swing,
+                              dac_swing_lvl);
+}
+
+static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
+                            bool turn_on, u8 type)
+{
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+                 "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+                 (force_exec ? "force to" : ""),
+                 (turn_on ? "ON" : "OFF"), type);
+       coex_dm->cur_ps_tdma_on = turn_on;
+       coex_dm->cur_ps_tdma = type;
+
+       if (!force_exec) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+                         coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+                         coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+
+               if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
+                   (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
+                       return;
+       }
+       if (turn_on) {
+               switch (type) {
+               case 1:
+               default:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+                                                   0x1a, 0xe1, 0x90);
+                       break;
+               case 2:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+                                                   0x12, 0xe1, 0x90);
+                       break;
+               case 3:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0xf1, 0x90);
+                       break;
+               case 4:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
+                                                   0x03, 0xf1, 0x90);
+                       break;
+               case 5:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+                                                   0x1a, 0x60, 0x90);
+                       break;
+               case 6:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+                                                   0x12, 0x60, 0x90);
+                       break;
+               case 7:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                                                   0x3, 0x70, 0x90);
+                       break;
+               case 8:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x10,
+                                                   0x3, 0x70, 0x90);
+                       break;
+               case 9:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+                                                   0x1a, 0xe1, 0x90);
+                       break;
+               case 10:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+                                                   0x12, 0xe1, 0x90);
+                       break;
+               case 11:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+                                                   0xa, 0xe1, 0x90);
+                       break;
+               case 12:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+                                                   0x5, 0xe1, 0x90);
+                       break;
+               case 13:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+                                                   0x1a, 0x60, 0x90);
+                       break;
+               case 14:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x12,
+                                                   0x12, 0x60, 0x90);
+                       break;
+               case 15:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0xa,
+                                                   0xa, 0x60, 0x90);
+                       break;
+               case 16:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+                                                   0x5, 0x60, 0x90);
+                       break;
+               case 17:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xa3, 0x2f,
+                                                   0x2f, 0x60, 0x90);
+                       break;
+               case 18:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x5,
+                                                   0x5, 0xe1, 0x90);
+                       break;
+               case 19:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+                                                   0x25, 0xe1, 0x90);
+                       break;
+               case 20:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x25,
+                                                   0x25, 0x60, 0x90);
+                       break;
+               case 21:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x15,
+                                                   0x03, 0x70, 0x90);
+                       break;
+               case 71:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1a,
+                                                   0x1a, 0xe1, 0x90);
+                       break;
+               }
+       } else {
+               /* disable PS tdma */
+               switch (type) {
+               case 0:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x40, 0x0);
+                       break;
+               case 1:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x48, 0x0);
+                       break;
+               default:
+                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0x0, 0x0, 0x0,
+                                                   0x40, 0x0);
+                       break;
+               }
+       }
+
+       /* update pre state */
+       coex_dm->pre_ps_tdma_on = coex_dm->cur_ps_tdma_on;
+       coex_dm->pre_ps_tdma = coex_dm->cur_ps_tdma;
+}
+
+static void btc8723b2ant_coex_alloff(struct btc_coexist *btcoexist)
+{
+       /* fw all off */
+       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       /* sw all off */
+       btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+       /* hw all off */
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+}
+
+static void btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+       /* force to reset coex mechanism*/
+
+       btc8723b2ant_ps_tdma(btcoexist, FORCE_EXEC, false, 1);
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+       btc8723b2ant_dec_bt_pwr(btcoexist, FORCE_EXEC, false);
+
+       btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+}
+
+static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
+{
+       bool wifi_connected = false;
+       bool low_pwr_disable = true;
+
+       btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+                          &low_pwr_disable);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+
+       if (wifi_connected) {
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+       } else {
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+       }
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, FORCE_EXEC, 6);
+       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btc8723b2ant_sw_mechanism1(btcoexist, false, false, false, false);
+       btc8723b2ant_sw_mechanism2(btcoexist, false, false, false, 0x18);
+
+       coex_dm->need_recover_0x948 = true;
+       coex_dm->backup_0x948 = btcoexist->btc_read_2byte(btcoexist, 0x948);
+
+       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+}
+
+static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
+{
+       bool common = false, wifi_connected = false;
+       bool wifi_busy = false;
+       bool bt_hs_on = false, low_pwr_disable = false;
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                          &wifi_connected);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+
+       if (!wifi_connected) {
+               low_pwr_disable = false;
+               btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
+                                  &low_pwr_disable);
+
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], Wifi non-connected idle!!\n");
+
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+                                         0x0);
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+               btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+               btc8723b2ant_sw_mechanism1(btcoexist, false, false, false,
+                                          false);
+               btc8723b2ant_sw_mechanism2(btcoexist, false, false, false,
+                                          0x18);
+
+               common = true;
+       } else {
+               if (BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+                   coex_dm->bt_status) {
+                       low_pwr_disable = false;
+                       btcoexist->btc_set(btcoexist,
+                                          BTC_SET_ACT_DISABLE_LOW_POWER,
+                                          &low_pwr_disable);
+
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Wifi connected + "
+                                 "BT non connected-idle!!\n");
+
+                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+                                                 0xfffff, 0x0);
+                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+                       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+                                                     0xb);
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+                                               false);
+
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+
+                       common = true;
+               } else if (BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE ==
+                          coex_dm->bt_status) {
+                       low_pwr_disable = true;
+                       btcoexist->btc_set(btcoexist,
+                                          BTC_SET_ACT_DISABLE_LOW_POWER,
+                                          &low_pwr_disable);
+
+                       if (bt_hs_on)
+                               return false;
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Wifi connected + "
+                                 "BT connected-idle!!\n");
+
+                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+                                                 0xfffff, 0x0);
+                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 0);
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+                       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+                                                     0xb);
+                       btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC,
+                                               false);
+
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+
+                       common = true;
+               } else {
+                       low_pwr_disable = true;
+                       btcoexist->btc_set(btcoexist,
+                                          BTC_SET_ACT_DISABLE_LOW_POWER,
+                                          &low_pwr_disable);
+
+                       if (wifi_busy) {
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], Wifi Connected-Busy + "
+                                         "BT Busy!!\n");
+                               common = false;
+                       } else {
+                               if (bt_hs_on)
+                                       return false;
+
+                               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                         "[BTCoex], Wifi Connected-Idle + "
+                                         "BT Busy!!\n");
+
+                               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
+                                                         0x1, 0xfffff, 0x0);
+                               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC,
+                                                      7);
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 21);
+                               btc8723b2ant_fw_dac_swing_lvl(btcoexist,
+                                                             NORMAL_EXEC,
+                                                             0xb);
+                               if (btc8723b_need_dec_pwr(btcoexist))
+                                       btc8723b2ant_dec_bt_pwr(btcoexist,
+                                                               NORMAL_EXEC,
+                                                               true);
+                               else
+                                       btc8723b2ant_dec_bt_pwr(btcoexist,
+                                                               NORMAL_EXEC,
+                                                               false);
+                               btc8723b2ant_sw_mechanism1(btcoexist, false,
+                                                          false, false,
+                                                          false);
+                               btc8723b2ant_sw_mechanism2(btcoexist, false,
+                                                          false, false,
+                                                          0x18);
+                               common = true;
+                       }
+               }
+       }
+
+       return common;
+}
+
+static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
+                         s32 result)
+{
+       /* Set PS TDMA for max interval == 1 */
+       if (tx_pause) {
+               BTC_PRINT(BTC_MSG_ALGORITHM,
+                         ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], TxPause = 1\n");
+
+               if (coex_dm->cur_ps_tdma == 71) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 5);
+                       coex_dm->tdma_adj_type = 5;
+               } else if (coex_dm->cur_ps_tdma == 1) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 5);
+                       coex_dm->tdma_adj_type = 5;
+               } else if (coex_dm->cur_ps_tdma == 2) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 6);
+                       coex_dm->tdma_adj_type = 6;
+               } else if (coex_dm->cur_ps_tdma == 3) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 7);
+                       coex_dm->tdma_adj_type = 7;
+               } else if (coex_dm->cur_ps_tdma == 4) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 8);
+                       coex_dm->tdma_adj_type = 8;
+               } else if (coex_dm->cur_ps_tdma == 9) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 13);
+                       coex_dm->tdma_adj_type = 13;
+               } else if (coex_dm->cur_ps_tdma == 10) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 14);
+                       coex_dm->tdma_adj_type = 14;
+               } else if (coex_dm->cur_ps_tdma == 11) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 15);
+                       coex_dm->tdma_adj_type = 15;
+               } else if (coex_dm->cur_ps_tdma == 12) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                            true, 16);
+                       coex_dm->tdma_adj_type = 16;
+               }
+
+               if (result == -1) {
+                       if (coex_dm->cur_ps_tdma == 5) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 6);
+                               coex_dm->tdma_adj_type = 6;
+                       } else if (coex_dm->cur_ps_tdma == 6) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 7) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 8);
+                               coex_dm->tdma_adj_type = 8;
+                       } else if (coex_dm->cur_ps_tdma == 13) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 14);
+                               coex_dm->tdma_adj_type = 14;
+                       } else if (coex_dm->cur_ps_tdma == 14) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 15) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 16);
+                               coex_dm->tdma_adj_type = 16;
+                       }
+               }  else if (result == 1) {
+                       if (coex_dm->cur_ps_tdma == 8) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 7) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 6);
+                               coex_dm->tdma_adj_type = 6;
+                       } else if (coex_dm->cur_ps_tdma == 6) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 5);
+                               coex_dm->tdma_adj_type = 5;
+                       } else if (coex_dm->cur_ps_tdma == 16) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 15) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 14);
+                               coex_dm->tdma_adj_type = 14;
+                       } else if (coex_dm->cur_ps_tdma == 14) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 13);
+                               coex_dm->tdma_adj_type = 13;
+                       }
+               }
+       } else {
+               BTC_PRINT(BTC_MSG_ALGORITHM,
+                         ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], TxPause = 0\n");
+               if (coex_dm->cur_ps_tdma == 5) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
+                       coex_dm->tdma_adj_type = 71;
+               } else if (coex_dm->cur_ps_tdma == 6) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+                       coex_dm->tdma_adj_type = 2;
+               } else if (coex_dm->cur_ps_tdma == 7) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+                       coex_dm->tdma_adj_type = 3;
+               } else if (coex_dm->cur_ps_tdma == 8) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+                       coex_dm->tdma_adj_type = 4;
+               } else if (coex_dm->cur_ps_tdma == 13) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+                       coex_dm->tdma_adj_type = 9;
+               } else if (coex_dm->cur_ps_tdma == 14) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+                       coex_dm->tdma_adj_type = 10;
+               } else if (coex_dm->cur_ps_tdma == 15) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+                       coex_dm->tdma_adj_type = 11;
+               } else if (coex_dm->cur_ps_tdma == 16) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+                       coex_dm->tdma_adj_type = 12;
+               }
+
+               if (result == -1) {
+                       if (coex_dm->cur_ps_tdma == 71) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 1);
+                               coex_dm->tdma_adj_type = 1;
+                       } else if (coex_dm->cur_ps_tdma == 1) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 2);
+                               coex_dm->tdma_adj_type = 2;
+                       } else if (coex_dm->cur_ps_tdma == 2) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                       } else if (coex_dm->cur_ps_tdma == 3) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 4);
+                               coex_dm->tdma_adj_type = 4;
+                       } else if (coex_dm->cur_ps_tdma == 9) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 10);
+                               coex_dm->tdma_adj_type = 10;
+                       } else if (coex_dm->cur_ps_tdma == 10) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                       } else if (coex_dm->cur_ps_tdma == 11) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 12);
+                               coex_dm->tdma_adj_type = 12;
+                       }
+               }  else if (result == 1) {
+                       int tmp = coex_dm->cur_ps_tdma;
+                       switch (tmp) {
+                       case 4:
+                       case 3:
+                       case 2:
+                       case 12:
+                       case 11:
+                       case 10:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, tmp - 1);
+                               coex_dm->tdma_adj_type = tmp - 1;
+                               break;
+                       case 1:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 71);
+                               coex_dm->tdma_adj_type = 71;
+                               break;
+                       }
+               }
+       }
+}
+
+static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
+                         s32 result)
+{
+       /* Set PS TDMA for max interval == 2 */
+       if (tx_pause) {
+               BTC_PRINT(BTC_MSG_ALGORITHM,
+                         ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], TxPause = 1\n");
+               if (coex_dm->cur_ps_tdma == 1) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+                       coex_dm->tdma_adj_type = 6;
+               } else if (coex_dm->cur_ps_tdma == 2) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
+                       coex_dm->tdma_adj_type = 6;
+               } else if (coex_dm->cur_ps_tdma == 3) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+                       coex_dm->tdma_adj_type = 7;
+               } else if (coex_dm->cur_ps_tdma == 4) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
+                       coex_dm->tdma_adj_type = 8;
+               } else if (coex_dm->cur_ps_tdma == 9) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+                       coex_dm->tdma_adj_type = 14;
+               } else if (coex_dm->cur_ps_tdma == 10) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+                       coex_dm->tdma_adj_type = 14;
+               } else if (coex_dm->cur_ps_tdma == 11) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+                       coex_dm->tdma_adj_type = 15;
+               } else if (coex_dm->cur_ps_tdma == 12) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
+                       coex_dm->tdma_adj_type = 16;
+               }
+               if (result == -1) {
+                       if (coex_dm->cur_ps_tdma == 5) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 6);
+                               coex_dm->tdma_adj_type = 6;
+                       } else if (coex_dm->cur_ps_tdma == 6) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 7) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 8);
+                               coex_dm->tdma_adj_type = 8;
+                       } else if (coex_dm->cur_ps_tdma == 13) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 14);
+                               coex_dm->tdma_adj_type = 14;
+                       } else if (coex_dm->cur_ps_tdma == 14) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 15) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 16);
+                               coex_dm->tdma_adj_type = 16;
+                       }
+               }  else if (result == 1) {
+                       if (coex_dm->cur_ps_tdma == 8) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 7) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 6);
+                               coex_dm->tdma_adj_type = 6;
+                       } else if (coex_dm->cur_ps_tdma == 6) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 6);
+                               coex_dm->tdma_adj_type = 6;
+                       } else if (coex_dm->cur_ps_tdma == 16) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 15) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 14);
+                               coex_dm->tdma_adj_type = 14;
+                       } else if (coex_dm->cur_ps_tdma == 14) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 14);
+                               coex_dm->tdma_adj_type = 14;
+                       }
+               }
+       } else {
+               BTC_PRINT(BTC_MSG_ALGORITHM,
+                         ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], TxPause = 0\n");
+               if (coex_dm->cur_ps_tdma == 5) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+                       coex_dm->tdma_adj_type = 2;
+               } else if (coex_dm->cur_ps_tdma == 6) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
+                       coex_dm->tdma_adj_type = 2;
+               } else if (coex_dm->cur_ps_tdma == 7) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+                       coex_dm->tdma_adj_type = 3;
+               } else if (coex_dm->cur_ps_tdma == 8) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+                       coex_dm->tdma_adj_type = 4;
+               } else if (coex_dm->cur_ps_tdma == 13) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+                       coex_dm->tdma_adj_type = 10;
+               } else if (coex_dm->cur_ps_tdma == 14) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 10);
+                       coex_dm->tdma_adj_type = 10;
+               } else if (coex_dm->cur_ps_tdma == 15) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+                       coex_dm->tdma_adj_type = 11;
+               } else if (coex_dm->cur_ps_tdma == 16) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+                       coex_dm->tdma_adj_type = 12;
+               }
+               if (result == -1) {
+                       if (coex_dm->cur_ps_tdma == 1) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 2);
+                               coex_dm->tdma_adj_type = 2;
+                       } else if (coex_dm->cur_ps_tdma == 2) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                       } else if (coex_dm->cur_ps_tdma == 3) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 4);
+                               coex_dm->tdma_adj_type = 4;
+                       } else if (coex_dm->cur_ps_tdma == 9) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 10);
+                               coex_dm->tdma_adj_type = 10;
+                       } else if (coex_dm->cur_ps_tdma == 10) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                       } else if (coex_dm->cur_ps_tdma == 11) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 12);
+                               coex_dm->tdma_adj_type = 12;
+                       }
+               } else if (result == 1) {
+                       if (coex_dm->cur_ps_tdma == 4) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                       } else if (coex_dm->cur_ps_tdma == 3) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 2);
+                               coex_dm->tdma_adj_type = 2;
+                       } else if (coex_dm->cur_ps_tdma == 2) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 2);
+                               coex_dm->tdma_adj_type = 2;
+                       } else if (coex_dm->cur_ps_tdma == 12) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                       } else if (coex_dm->cur_ps_tdma == 11) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 10);
+                               coex_dm->tdma_adj_type = 10;
+                       } else if (coex_dm->cur_ps_tdma == 10) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 10);
+                               coex_dm->tdma_adj_type = 10;
+                       }
+               }
+       }
+}
+
+static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
+                         s32 result)
+{
+       /* Set PS TDMA for max interval == 3 */
+       if (tx_pause) {
+               BTC_PRINT(BTC_MSG_ALGORITHM,
+                         ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], TxPause = 1\n");
+               if (coex_dm->cur_ps_tdma == 1) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+                       coex_dm->tdma_adj_type = 7;
+               } else if (coex_dm->cur_ps_tdma == 2) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+                       coex_dm->tdma_adj_type = 7;
+               } else if (coex_dm->cur_ps_tdma == 3) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
+                       coex_dm->tdma_adj_type = 7;
+               } else if (coex_dm->cur_ps_tdma == 4) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 8);
+                       coex_dm->tdma_adj_type = 8;
+               } else if (coex_dm->cur_ps_tdma == 9) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+                       coex_dm->tdma_adj_type = 15;
+               } else if (coex_dm->cur_ps_tdma == 10) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+                       coex_dm->tdma_adj_type = 15;
+               } else if (coex_dm->cur_ps_tdma == 11) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 15);
+                       coex_dm->tdma_adj_type = 15;
+               } else if (coex_dm->cur_ps_tdma == 12) {
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 16);
+                       coex_dm->tdma_adj_type = 16;
+               }
+               if (result == -1) {
+                       if (coex_dm->cur_ps_tdma == 5) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 6) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 7) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 8);
+                               coex_dm->tdma_adj_type = 8;
+                       } else if (coex_dm->cur_ps_tdma == 13) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 14) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 15) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 16);
+                               coex_dm->tdma_adj_type = 16;
+                       }
+               }  else if (result == 1) {
+                       if (coex_dm->cur_ps_tdma == 8) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 7) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 6) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 7);
+                               coex_dm->tdma_adj_type = 7;
+                       } else if (coex_dm->cur_ps_tdma == 16) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 15) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       } else if (coex_dm->cur_ps_tdma == 14) {
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 15);
+                               coex_dm->tdma_adj_type = 15;
+                       }
+               }
+       } else {
+               BTC_PRINT(BTC_MSG_ALGORITHM,
+                         ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], TxPause = 0\n");
+               switch (coex_dm->cur_ps_tdma) {
+               case 5:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+                       coex_dm->tdma_adj_type = 3;
+                       break;
+               case 6:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+                       coex_dm->tdma_adj_type = 3;
+                       break;
+               case 7:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
+                       coex_dm->tdma_adj_type = 3;
+                       break;
+               case 8:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 4);
+                       coex_dm->tdma_adj_type = 4;
+                       break;
+               case 13:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+                       coex_dm->tdma_adj_type = 11;
+                       break;
+               case 14:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+                       coex_dm->tdma_adj_type = 11;
+                       break;
+               case 15:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 11);
+                       coex_dm->tdma_adj_type = 11;
+                       break;
+               case 16:
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 12);
+                       coex_dm->tdma_adj_type = 12;
+                       break;
+               }
+               if (result == -1) {
+                       switch (coex_dm->cur_ps_tdma) {
+                       case 1:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                               break;
+                       case 2:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                               break;
+                       case 3:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 4);
+                               coex_dm->tdma_adj_type = 4;
+                               break;
+                       case 9:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                               break;
+                       case 10:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                               break;
+                       case 11:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 12);
+                               coex_dm->tdma_adj_type = 12;
+                               break;
+                       }
+               } else if (result == 1) {
+                       switch (coex_dm->cur_ps_tdma) {
+                       case 4:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                               break;
+                       case 3:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                               break;
+                       case 2:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 3);
+                               coex_dm->tdma_adj_type = 3;
+                               break;
+                       case 12:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                               break;
+                       case 11:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                               break;
+                       case 10:
+                               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                                    true, 11);
+                               coex_dm->tdma_adj_type = 11;
+                       }
+               }
+       }
+}
+
+static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
+                                         bool sco_hid, bool tx_pause,
+                                         u8 max_interval)
+{
+       static s32 up, dn, m, n, wait_count;
+       /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
+       s32 result;
+       u8 retry_count = 0;
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
+                 "[BTCoex], TdmaDurationAdjust()\n");
+
+       if (!coex_dm->auto_tdma_adjust) {
+               coex_dm->auto_tdma_adjust = true;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               if (sco_hid) {
+                       if (tx_pause) {
+                               if (max_interval == 1) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 13);
+                                       coex_dm->tdma_adj_type = 13;
+                               } else if (max_interval == 2) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 14);
+                                       coex_dm->tdma_adj_type = 14;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->tdma_adj_type = 15;
+                               } else {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 15);
+                                       coex_dm->tdma_adj_type = 15;
+                               }
+                       } else {
+                               if (max_interval == 1) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 9);
+                                       coex_dm->tdma_adj_type = 9;
+                               } else if (max_interval == 2) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 10);
+                                       coex_dm->tdma_adj_type = 10;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
+                                       coex_dm->tdma_adj_type = 11;
+                               } else {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 11);
+                                       coex_dm->tdma_adj_type = 11;
+                               }
+                       }
+               } else {
+                       if (tx_pause) {
+                               if (max_interval == 1) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 5);
+                                       coex_dm->tdma_adj_type = 5;
+                               } else if (max_interval == 2) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 6);
+                                       coex_dm->tdma_adj_type = 6;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->tdma_adj_type = 7;
+                               } else {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 7);
+                                       coex_dm->tdma_adj_type = 7;
+                               }
+                       } else {
+                               if (max_interval == 1) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 1);
+                                       coex_dm->tdma_adj_type = 1;
+                               } else if (max_interval == 2) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 2);
+                                       coex_dm->tdma_adj_type = 2;
+                               } else if (max_interval == 3) {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->tdma_adj_type = 3;
+                               } else {
+                                       btc8723b2ant_ps_tdma(btcoexist,
+                                                            NORMAL_EXEC,
+                                                            true, 3);
+                                       coex_dm->tdma_adj_type = 3;
+                               }
+                       }
+               }
+
+               up = 0;
+               dn = 0;
+               m = 1;
+               n = 3;
+               result = 0;
+               wait_count = 0;
+       } else {
+               /*accquire the BT TRx retry count from BT_Info byte2*/
+               retry_count = coex_sta->bt_retry_cnt;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], retry_count = %d\n", retry_count);
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+                         up, dn, m, n, wait_count);
+               result = 0;
+               wait_count++;
+                /* no retry in the last 2-second duration*/
+               if (retry_count == 0) {
+                       up++;
+                       dn--;
+
+                       if (dn <= 0)
+                               dn = 0;
+
+                       if (up >= n) {
+                               wait_count = 0;
+                               n = 3;
+                               up = 0;
+                               dn = 0;
+                               result = 1;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_TRACE_FW_DETAIL,
+                                         "[BTCoex], Increase wifi "
+                                         "duration!!\n");
+                       } /* <=3 retry in the last 2-second duration*/
+               } else if (retry_count <= 3) {
+                       up--;
+                       dn++;
+
+                       if (up <= 0)
+                               up = 0;
+
+                       if (dn == 2) {
+                               if (wait_count <= 2)
+                                       m++;
+                               else
+                                       m = 1;
+
+                               if (m >= 20)
+                                       m = 20;
+
+                               n = 3 * m;
+                               up = 0;
+                               dn = 0;
+                               wait_count = 0;
+                               result = -1;
+                               BTC_PRINT(BTC_MSG_ALGORITHM,
+                                         ALGO_TRACE_FW_DETAIL,
+                                         "[BTCoex], Decrease wifi duration "
+                                         "for retry_counter<3!!\n");
+                       }
+               } else {
+                       if (wait_count == 1)
+                               m++;
+                       else
+                               m = 1;
+
+                       if (m >= 20)
+                               m = 20;
+
+                       n = 3 * m;
+                       up = 0;
+                       dn = 0;
+                       wait_count = 0;
+                       result = -1;
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                                 "[BTCoex], Decrease wifi duration "
+                                 "for retry_counter>3!!\n");
+               }
+
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], max Interval = %d\n", max_interval);
+               if (max_interval == 1)
+                       set_tdma_int1(btcoexist, tx_pause, result);
+               else if (max_interval == 2)
+                       set_tdma_int2(btcoexist, tx_pause, result);
+               else if (max_interval == 3)
+                       set_tdma_int3(btcoexist, tx_pause, result);
+       }
+
+       /*if current PsTdma not match with the recorded one (when scan, dhcp..),
+        *then we have to adjust it back to the previous recorded one.
+        */
+       if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
+               bool scan = false, link = false, roam = false;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                         "[BTCoex], PsTdma type dismatch!!!, "
+                         "curPsTdma=%d, recordPsTdma=%d\n",
+                         coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+
+               btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+               btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+               btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+
+               if (!scan && !link && !roam)
+                       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
+                                            coex_dm->tdma_adj_type);
+               else
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
+                                 "[BTCoex], roaming/link/scan is under"
+                                 " progress, will adjust next time!!!\n");
+       }
+}
+
+/* SCO only or SCO+PAN(HS) */
+static void btc8723b2ant_action_sco(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+       /*for SCO quality at 11b/g mode*/
+       if (BTC_WIFI_BW_LEGACY == wifi_bw)
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 2);
+       else  /*for SCO quality & wifi performance balance at 11n mode*/
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 8);
+
+       /*for voice quality */
+       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 0);
+
+       /* sw mechanism */
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x4);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  true, 0x4);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  true, 0x4);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  true, 0x4);
+               }
+       }
+}
+
+static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+       if (BTC_WIFI_BW_LEGACY == wifi_bw) /*/for HID at 11b/g mode*/
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+       else  /*for HID quality & wifi performance balance at 11n mode*/
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 9);
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 9);
+       else
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13);
+
+       /* sw mechanism */
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+/*A2DP only / PAN(EDR) only/ A2DP+PAN(HS)*/
+static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+               btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+                                                 false, 1);
+       else
+               btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 1);
+
+       /* sw mechanism */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+static void btc8723b2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+       btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 2);
+
+       /* sw mechanism */
+       btcoexist->btc_get(btcoexist,
+               BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 10);
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 1);
+       else
+               btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
+
+       /* sw mechanism */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+/*PAN(HS) only*/
+static void btc8723b2ant_action_pan_hs(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+       btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+/*PAN(EDR)+A2DP*/
+static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 12);
+               if (BTC_WIFI_BW_HT40 == wifi_bw)
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+                                                         true, 3);
+               else
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, false,
+                                                         false, 3);
+       } else {
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+               btc8723b2ant_tdma_duration_adjust(btcoexist, false, true, 3);
+       }
+
+       /* sw mechanism */
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, false,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               if (BTC_WIFI_BW_HT40 == wifi_bw) {
+                       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+                                                     3);
+                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
+                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+                                                 0xfffff, 0x780);
+               } else {
+                       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC,
+                                                     6);
+                       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+                       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
+                                                 0xfffff, 0x0);
+               }
+               btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+       } else {
+               btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+               btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 11);
+               btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
+                                         0x0);
+               btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+       }
+
+       /* sw mechanism */
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+/* HID+A2DP+PAN(EDR) */
+static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                      0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+               if (BTC_WIFI_BW_HT40 == wifi_bw)
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+                                                         true, 2);
+               else
+                       btc8723b2ant_tdma_duration_adjust(btcoexist, true,
+                                                         false, 3);
+       } else {
+               btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 3);
+       }
+
+       /* sw mechanism */
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
+{
+       u8 wifi_rssi_state, bt_rssi_state;
+       u32 wifi_bw;
+
+       wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
+                                                         0, 2, 15, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 35, 0);
+
+       btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
+
+       btc8723b2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
+
+       if (btc8723b_need_dec_pwr(btcoexist))
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
+       else
+               btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, false);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+
+       btc8723b_coex_tbl_type(btcoexist, NORMAL_EXEC, 7);
+
+       if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
+           (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+               btc8723b2ant_tdma_duration_adjust(btcoexist, true, false, 2);
+       else
+               btc8723b2ant_tdma_duration_adjust(btcoexist, true, true, 2);
+
+       /* sw mechanism */
+       if (BTC_WIFI_BW_HT40 == wifi_bw) {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, true, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       } else {
+               if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
+                   (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, true, false,
+                                                  false, 0x18);
+               } else {
+                       btc8723b2ant_sw_mechanism1(btcoexist, false, true,
+                                                  false, false);
+                       btc8723b2ant_sw_mechanism2(btcoexist, false, false,
+                                                  false, 0x18);
+               }
+       }
+}
+
+static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
+{
+       u8 algorithm = 0;
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                 "[BTCoex], RunCoexistMechanism()===>\n");
+
+       if (btcoexist->manual_control) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], RunCoexistMechanism(), "
+                         "return for Manual CTRL <===\n");
+               return;
+       }
+
+       if (coex_sta->under_ips) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], wifi is under IPS !!!\n");
+               return;
+       }
+
+       algorithm = btc8723b2ant_action_algorithm(btcoexist);
+       if (coex_sta->c2h_bt_inquiry_page &&
+           (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], BT is under inquiry/page scan !!\n");
+               btc8723b2ant_action_bt_inquiry(btcoexist);
+               return;
+       } else {
+               if (coex_dm->need_recover_0x948) {
+                       coex_dm->need_recover_0x948 = false;
+                       btcoexist->btc_write_2byte(btcoexist, 0x948,
+                                                  coex_dm->backup_0x948);
+               }
+       }
+
+       coex_dm->cur_algorithm = algorithm;
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
+                 coex_dm->cur_algorithm);
+
+       if (btc8723b2ant_is_common_action(btcoexist)) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], Action 2-Ant common.\n");
+               coex_dm->auto_tdma_adjust = false;
+       } else {
+               if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], preAlgorithm=%d, "
+                                 "curAlgorithm=%d\n", coex_dm->pre_algorithm,
+                                 coex_dm->cur_algorithm);
+                       coex_dm->auto_tdma_adjust = false;
+               }
+               switch (coex_dm->cur_algorithm) {
+               case BT_8723B_2ANT_COEX_ALGO_SCO:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+                       btc8723b2ant_action_sco(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_HID:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+                       btc8723b2ant_action_hid(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_A2DP:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = A2DP.\n");
+                       btc8723b2ant_action_a2dp(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = A2DP+PAN(HS).\n");
+                       btc8723b2ant_action_a2dp_pan_hs(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_PANEDR:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = PAN(EDR).\n");
+                       btc8723b2ant_action_pan_edr(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_PANHS:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = HS mode.\n");
+                       btc8723b2ant_action_pan_hs(btcoexist);
+                               break;
+               case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = PAN+A2DP.\n");
+                       btc8723b2ant_action_pan_edr_a2dp(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = PAN(EDR)+HID.\n");
+                       btc8723b2ant_action_pan_edr_hid(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = HID+A2DP+PAN.\n");
+                       btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
+                       break;
+               case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = HID+A2DP.\n");
+                       btc8723b2ant_action_hid_a2dp(btcoexist);
+                       break;
+               default:
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], Action 2-Ant, "
+                                 "algorithm = coexist All Off!!\n");
+                       btc8723b2ant_coex_alloff(btcoexist);
+                       break;
+               }
+               coex_dm->pre_algorithm = coex_dm->cur_algorithm;
+       }
+}
+
+
+
+/*********************************************************************
+ *  work around function start with wa_btc8723b2ant_
+ *********************************************************************/
+/*********************************************************************
+ *  extern function start with EXbtc8723b2ant_
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
+{
+       struct btc_board_info *board_info = &btcoexist->board_info;
+       u32 u32tmp = 0, fw_ver;
+       u8 u8tmp = 0;
+       u8 h2c_parameter[2] = {0};
+
+
+       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+                 "[BTCoex], 2Ant Init HW Config!!\n");
+
+       /* backup rf 0x1e value */
+       coex_dm->bt_rf0x1e_backup = btcoexist->btc_get_rf_reg(btcoexist,
+                                                             BTC_RF_A, 0x1e,
+                                                             0xfffff);
+
+       /* 0x4c[23]=0, 0x4c[24]=1  Antenna control by WL/BT */
+       u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+       u32tmp &= ~BIT23;
+       u32tmp |= BIT24;
+       btcoexist->btc_write_4byte(btcoexist, 0x4c, u32tmp);
+
+       btcoexist->btc_write_1byte(btcoexist, 0x974, 0xff);
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x944, 0x3, 0x3);
+       btcoexist->btc_write_1byte(btcoexist, 0x930, 0x77);
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x67, 0x20, 0x1);
+
+       /* Antenna switch control parameter */
+       /* btcoexist->btc_write_4byte(btcoexist, 0x858, 0x55555555);*/
+
+       /*Force GNT_BT to low*/
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+       btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+
+       /* 0x790[5:0]=0x5 */
+       u8tmp = btcoexist->btc_read_1byte(btcoexist, 0x790);
+       u8tmp &= 0xc0;
+       u8tmp |= 0x5;
+       btcoexist->btc_write_1byte(btcoexist, 0x790, u8tmp);
+
+
+       /*Antenna config        */
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+
+ /*ext switch for fw ver < 0xc */
+       if (fw_ver < 0xc00) {
+               if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+                       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+                                                          0x3, 0x1);
+                       /*Main Ant to  BT for IPS case 0x4c[23]=1*/
+                       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
+                                                          0x1);
+
+                       /*tell firmware "no antenna inverse"*/
+                       h2c_parameter[0] = 0;
+                       h2c_parameter[1] = 1;  /* ext switch type */
+                       btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+                                               h2c_parameter);
+               } else {
+                       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c,
+                                                          0x3, 0x2);
+                       /*Aux Ant to  BT for IPS case 0x4c[23]=1*/
+                       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1,
+                                                          0x0);
+
+                       /*tell firmware "antenna inverse"*/
+                       h2c_parameter[0] = 1;
+                       h2c_parameter[1] = 1;  /*ext switch type*/
+                       btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+                                               h2c_parameter);
+               }
+       } else {
+               /*ext switch always at s1 (if exist) */
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x92c, 0x3, 0x1);
+               /*Main Ant to  BT for IPS case 0x4c[23]=1*/
+               btcoexist->btc_write_1byte_bitmask(btcoexist, 0x64, 0x1, 0x1);
+
+               if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+                       /*tell firmware "no antenna inverse"*/
+                       h2c_parameter[0] = 0;
+                       h2c_parameter[1] = 0;  /*ext switch type*/
+                       btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+                                               h2c_parameter);
+               } else {
+                       /*tell firmware "antenna inverse"*/
+                       h2c_parameter[0] = 1;
+                       h2c_parameter[1] = 0;  /*ext switch type*/
+                       btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+                                               h2c_parameter);
+               }
+       }
+
+       /* PTA parameter */
+       btc8723b_coex_tbl_type(btcoexist, FORCE_EXEC, 0);
+
+       /* Enable counter statistics */
+       /*0x76e[3] =1, WLAN_Act control by PTA*/
+       btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
+       btcoexist->btc_write_1byte(btcoexist, 0x778, 0x3);
+       btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
+}
+
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
+{
+       BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+                 "[BTCoex], Coex Mechanism Init!!\n");
+       btc8723b2ant_init_coex_dm(btcoexist);
+}
+
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
+{
+       struct btc_board_info *board_info = &btcoexist->board_info;
+       struct btc_stack_info *stack_info = &btcoexist->stack_info;
+       struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
+       u8 *cli_buf = btcoexist->cli_buf;
+       u8 u8tmp[4], i, bt_info_ext, ps_tdma_case = 0;
+       u32 u32tmp[4];
+       bool roam = false, scan = false;
+       bool link = false, wifi_under_5g = false;
+       bool bt_hs_on = false, wifi_busy = false;
+       s32 wifi_rssi = 0, bt_hs_rssi = 0;
+       u32 wifi_bw, wifi_traffic_dir, fa_ofdm, fa_cck;
+       u8 wifi_dot11_chnl, wifi_hs_chnl;
+       u32 fw_ver = 0, bt_patch_ver = 0;
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                  "\r\n ============[BT Coexist info]============");
+       CL_PRINTF(cli_buf);
+
+       if (btcoexist->manual_control) {
+               CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                          "\r\n ==========[Under Manual Control]============");
+               CL_PRINTF(cli_buf);
+               CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                          "\r\n ==========================================");
+               CL_PRINTF(cli_buf);
+       }
+
+       if (!board_info->bt_exist) {
+               CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n BT not exists !!!");
+               CL_PRINTF(cli_buf);
+               return;
+       }
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+                  "Ant PG number/ Ant mechanism:",
+                  board_info->pg_ant_num, board_info->btdm_ant_num);
+       CL_PRINTF(cli_buf);
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %d",
+                  "BT stack/ hci ext ver",
+                  ((stack_info->profile_notified) ? "Yes" : "No"),
+                  stack_info->hci_version);
+       CL_PRINTF(cli_buf);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                  "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+                  "CoexVer/ FwVer/ PatchVer",
+                  glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+                  fw_ver, bt_patch_ver, bt_patch_ver);
+       CL_PRINTF(cli_buf);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
+                          &wifi_dot11_chnl);
+       btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d(%d)",
+                  "Dot11 channel / HsChnl(HsMode)",
+                  wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+       CL_PRINTF(cli_buf);
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x ",
+                  "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info[0],
+                  coex_dm->wifi_chnl_info[1], coex_dm->wifi_chnl_info[2]);
+       CL_PRINTF(cli_buf);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
+       btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+                  "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+       CL_PRINTF(cli_buf);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+                  "Wifi link/ roam/ scan", link, roam, scan);
+       CL_PRINTF(cli_buf);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_BUSY, &wifi_busy);
+       btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+                          &wifi_traffic_dir);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s / %s/ %s ",
+                  "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+                  ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
+                  (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
+                  ((!wifi_busy) ? "idle" :
+                  ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
+                  "uplink" : "downlink")));
+       CL_PRINTF(cli_buf);
+
+       CL_PRINTF(cli_buf);
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d / %d / %d / %d",
+                  "SCO/HID/PAN/A2DP",
+                  bt_link_info->sco_exist, bt_link_info->hid_exist,
+                  bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+       CL_PRINTF(cli_buf);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
+
+       bt_info_ext = coex_sta->bt_info_ext;
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s",
+                  "BT Info A2DP rate",
+                  (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+       CL_PRINTF(cli_buf);
+
+       for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
+               if (coex_sta->bt_info_c2h_cnt[i]) {
+                       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                                  "\r\n %-35s = %02x %02x %02x "
+                                  "%02x %02x %02x %02x(%d)",
+                                  glbt_info_src_8723b_2ant[i],
+                                  coex_sta->bt_info_c2h[i][0],
+                                  coex_sta->bt_info_c2h[i][1],
+                                  coex_sta->bt_info_c2h[i][2],
+                                  coex_sta->bt_info_c2h[i][3],
+                                  coex_sta->bt_info_c2h[i][4],
+                                  coex_sta->bt_info_c2h[i][5],
+                                  coex_sta->bt_info_c2h[i][6],
+                                  coex_sta->bt_info_c2h_cnt[i]);
+                       CL_PRINTF(cli_buf);
+               }
+       }
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/%s",
+                  "PS state, IPS/LPS",
+                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+       CL_PRINTF(cli_buf);
+       btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
+
+       /* Sw mechanism */
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                  "\r\n %-35s", "============[Sw mechanism]============");
+       CL_PRINTF(cli_buf);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d ",
+                  "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+                  coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+       CL_PRINTF(cli_buf);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+                  "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+                  coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+                  coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+       CL_PRINTF(cli_buf);
+
+       /* Fw mechanism */
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+                  "============[Fw mechanism]============");
+       CL_PRINTF(cli_buf);
+
+       ps_tdma_case = coex_dm->cur_ps_tdma;
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                  "\r\n %-35s = %02x %02x %02x %02x %02x case-%d (auto:%d)",
+                  "PS TDMA", coex_dm->ps_tdma_para[0],
+                  coex_dm->ps_tdma_para[1], coex_dm->ps_tdma_para[2],
+                  coex_dm->ps_tdma_para[3], coex_dm->ps_tdma_para[4],
+                  ps_tdma_case, coex_dm->auto_tdma_adjust);
+       CL_PRINTF(cli_buf);
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d ",
+                  "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+                  coex_dm->cur_ignore_wlan_act);
+       CL_PRINTF(cli_buf);
+
+       /* Hw setting */
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s",
+                  "============[Hw setting]============");
+       CL_PRINTF(cli_buf);
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x",
+                  "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+       CL_PRINTF(cli_buf);
+
+       u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+                  "0x778/0x880[29:25]", u8tmp[0],
+                  (u32tmp[0]&0x3e000000) >> 25);
+       CL_PRINTF(cli_buf);
+
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
+       u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
+       u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x948/ 0x67[5] / 0x765",
+                  u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]);
+       CL_PRINTF(cli_buf);
+
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
+       u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
+       u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+                  u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+       CL_PRINTF(cli_buf);
+
+
+       u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
+       u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
+       u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                  "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+                  ((u8tmp[0] & 0x8)>>3), u8tmp[1],
+                  ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+       CL_PRINTF(cli_buf);
+
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
+       u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+                  "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+       CL_PRINTF(cli_buf);
+
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
+       u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x",
+                  "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+       CL_PRINTF(cli_buf);
+
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
+       u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
+       u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0xda8);
+       u32tmp[3] = btcoexist->btc_read_4byte(btcoexist, 0xcf0);
+
+       u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
+       u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
+
+       fa_ofdm = ((u32tmp[0]&0xffff0000) >> 16) +
+                 ((u32tmp[1]&0xffff0000) >> 16) +
+                  (u32tmp[1] & 0xffff) +
+                  (u32tmp[2] & 0xffff) +
+                 ((u32tmp[3]&0xffff0000) >> 16) +
+                  (u32tmp[3] & 0xffff);
+       fa_cck = (u8tmp[0] << 8) + u8tmp[1];
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                  "OFDM-CCA/OFDM-FA/CCK-FA",
+                  u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+       CL_PRINTF(cli_buf);
+
+       u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
+       u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
+       u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
+       u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE,
+                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                  "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+                  u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+       CL_PRINTF(cli_buf);
+
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+                  "0x770(high-pri rx/tx)",
+                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+       CL_PRINTF(cli_buf);
+       CL_SPRINTF(cli_buf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d",
+                  "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+                  coex_sta->low_priority_tx);
+       CL_PRINTF(cli_buf);
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
+       btc8723b2ant_monitor_bt_ctr(btcoexist);
+#endif
+       btcoexist->btc_disp_dbg_msg(btcoexist,
+       BTC_DBG_DISP_COEX_STATISTICS);
+}
+
+
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       if (BTC_IPS_ENTER == type) {
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], IPS ENTER notify\n");
+               coex_sta->under_ips = true;
+               btc8723b2ant_coex_alloff(btcoexist);
+       } else if (BTC_IPS_LEAVE == type) {
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], IPS LEAVE notify\n");
+               coex_sta->under_ips = false;
+       }
+}
+
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       if (BTC_LPS_ENABLE == type) {
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], LPS ENABLE notify\n");
+               coex_sta->under_lps = true;
+       } else if (BTC_LPS_DISABLE == type) {
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], LPS DISABLE notify\n");
+               coex_sta->under_lps = false;
+       }
+}
+
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       if (BTC_SCAN_START == type)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], SCAN START notify\n");
+       else if (BTC_SCAN_FINISH == type)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], SCAN FINISH notify\n");
+}
+
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       if (BTC_ASSOCIATE_START == type)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], CONNECT START notify\n");
+       else if (BTC_ASSOCIATE_FINISH == type)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], CONNECT FINISH notify\n");
+}
+
+void btc8723b_med_stat_notify(struct btc_coexist *btcoexist,
+                                           u8 type)
+{
+       u8 h2c_parameter[3] = {0};
+       u32 wifi_bw;
+       u8 wifi_central_chnl;
+
+       if (BTC_MEDIA_CONNECT == type)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], MEDIA connect notify\n");
+       else
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], MEDIA disconnect notify\n");
+
+       /* only 2.4G we need to inform bt the chnl mask */
+       btcoexist->btc_get(btcoexist,
+               BTC_GET_U1_WIFI_CENTRAL_CHNL, &wifi_central_chnl);
+       if ((BTC_MEDIA_CONNECT == type) &&
+           (wifi_central_chnl <= 14)) {
+               h2c_parameter[0] = 0x1;
+               h2c_parameter[1] = wifi_central_chnl;
+               btcoexist->btc_get(btcoexist,
+                       BTC_GET_U4_WIFI_BW, &wifi_bw);
+               if (BTC_WIFI_BW_HT40 == wifi_bw)
+                       h2c_parameter[2] = 0x30;
+               else
+                       h2c_parameter[2] = 0x20;
+       }
+
+       coex_dm->wifi_chnl_info[0] = h2c_parameter[0];
+       coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
+       coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
+                 "[BTCoex], FW write 0x66=0x%x\n",
+                 h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+                 h2c_parameter[2]);
+
+       btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
+}
+
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+                                             u8 type)
+{
+       if (type == BTC_PACKET_DHCP)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex], DHCP Packet notify\n");
+}
+
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+                                      u8 *tmpbuf, u8 length)
+{
+       u8 bt_info = 0;
+       u8 i, rsp_source = 0;
+       bool bt_busy = false, limited_dig = false;
+       bool wifi_connected = false;
+
+       coex_sta->c2h_bt_info_req_sent = false;
+
+       rsp_source = tmpbuf[0]&0xf;
+       if (rsp_source >= BT_INFO_SRC_8723B_2ANT_MAX)
+               rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
+       coex_sta->bt_info_c2h_cnt[rsp_source]++;
+
+       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                 "[BTCoex], Bt info[%d], length=%d, hex data=[",
+                 rsp_source, length);
+       for (i = 0; i < length; i++) {
+               coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
+               if (i == 1)
+                       bt_info = tmpbuf[i];
+               if (i == length-1)
+                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                                 "0x%02x]\n", tmpbuf[i]);
+               else
+                       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                                 "0x%02x, ", tmpbuf[i]);
+       }
+
+       if (btcoexist->manual_control) {
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], BtInfoNotify(), "
+                         "return for Manual CTRL<===\n");
+               return;
+       }
+
+       if (BT_INFO_SRC_8723B_2ANT_WIFI_FW != rsp_source) {
+               coex_sta->bt_retry_cnt =        /* [3:0]*/
+                       coex_sta->bt_info_c2h[rsp_source][2] & 0xf;
+
+               coex_sta->bt_rssi =
+                       coex_sta->bt_info_c2h[rsp_source][3] * 2 + 10;
+
+               coex_sta->bt_info_ext =
+                       coex_sta->bt_info_c2h[rsp_source][4];
+
+               /* Here we need to resend some wifi info to BT
+                * because bt is reset and loss of the info.
+                */
+               if ((coex_sta->bt_info_ext & BIT1)) {
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], BT ext info bit1 check,"
+                                 " send wifi BW&Chnl to BT!!\n");
+                       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
+                                          &wifi_connected);
+                       if (wifi_connected)
+                               btc8723b_med_stat_notify(btcoexist,
+                                                        BTC_MEDIA_CONNECT);
+                       else
+                               btc8723b_med_stat_notify(btcoexist,
+                                                        BTC_MEDIA_DISCONNECT);
+               }
+
+               if ((coex_sta->bt_info_ext & BIT3)) {
+                       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                                 "[BTCoex], BT ext info bit3 check, "
+                                 "set BT NOT to ignore Wlan active!!\n");
+                       btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
+                                                    false);
+               } else {
+                       /* BT already NOT ignore Wlan active, do nothing here.*/
+               }
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+               if ((coex_sta->bt_info_ext & BIT4)) {
+                       /* BT auto report already enabled, do nothing*/
+               } else {
+                       btc8723b2ant_bt_auto_report(btcoexist, FORCE_EXEC,
+                                                   true);
+               }
+#endif
+       }
+
+       /* check BIT2 first ==> check if bt is under inquiry or page scan*/
+       if (bt_info & BT_INFO_8723B_2ANT_B_INQ_PAGE)
+               coex_sta->c2h_bt_inquiry_page = true;
+       else
+               coex_sta->c2h_bt_inquiry_page = false;
+
+       /* set link exist status*/
+       if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+               coex_sta->bt_link_exist = false;
+               coex_sta->pan_exist = false;
+               coex_sta->a2dp_exist = false;
+               coex_sta->hid_exist = false;
+               coex_sta->sco_exist = false;
+       } else { /*  connection exists */
+               coex_sta->bt_link_exist = true;
+               if (bt_info & BT_INFO_8723B_2ANT_B_FTP)
+                       coex_sta->pan_exist = true;
+               else
+                       coex_sta->pan_exist = false;
+               if (bt_info & BT_INFO_8723B_2ANT_B_A2DP)
+                       coex_sta->a2dp_exist = true;
+               else
+                       coex_sta->a2dp_exist = false;
+               if (bt_info & BT_INFO_8723B_2ANT_B_HID)
+                       coex_sta->hid_exist = true;
+               else
+                       coex_sta->hid_exist = false;
+               if (bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO)
+                       coex_sta->sco_exist = true;
+               else
+                       coex_sta->sco_exist = false;
+       }
+
+       btc8723b2ant_update_bt_link_info(btcoexist);
+
+       if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
+               coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], BtInfoNotify(), "
+                         "BT Non-Connected idle!!!\n");
+       /* connection exists but no busy */
+       } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
+               coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+       } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
+                  (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
+               coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+       } else if (bt_info & BT_INFO_8723B_2ANT_B_ACL_BUSY) {
+               coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+       } else {
+               coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
+               BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                         "[BTCoex], BtInfoNotify(), "
+                         "BT Non-Defined state!!!\n");
+       }
+
+       if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
+           (BT_8723B_2ANT_BT_STATUS_SCO_BUSY == coex_dm->bt_status) ||
+           (BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY == coex_dm->bt_status)) {
+               bt_busy = true;
+               limited_dig = true;
+       } else {
+               bt_busy = false;
+               limited_dig = false;
+       }
+
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_TRAFFIC_BUSY, &bt_busy);
+
+       coex_dm->limited_dig = limited_dig;
+       btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_LIMITED_DIG, &limited_dig);
+
+       btc8723b2ant_run_coexist_mechanism(btcoexist);
+}
+
+void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+                                              u8 type)
+{
+       if (BTC_STACK_OP_INQ_PAGE_PAIR_START == type)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex],StackOP Inquiry/page/pair start notify\n");
+       else if (BTC_STACK_OP_INQ_PAGE_PAIR_FINISH == type)
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
+                         "[BTCoex],StackOP Inquiry/page/pair finish notify\n");
+}
+
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
+{
+       BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+
+       btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
+       btc8723b_med_stat_notify(btcoexist, BTC_MEDIA_DISCONNECT);
+}
+
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist)
+{
+       struct btc_board_info *board_info = &btcoexist->board_info;
+       struct btc_stack_info *stack_info = &btcoexist->stack_info;
+       static u8 dis_ver_info_cnt;
+       u32 fw_ver = 0, bt_patch_ver = 0;
+
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                 "[BTCoex], =========================="
+                 "Periodical===========================\n");
+
+       if (dis_ver_info_cnt <= 5) {
+               dis_ver_info_cnt += 1;
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+                         "[BTCoex], ****************************"
+                         "************************************\n");
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+                         "[BTCoex], Ant PG Num/ Ant Mech/ "
+                         "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
+                         board_info->btdm_ant_num, board_info->btdm_ant_pos);
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+                         "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+                         ((stack_info->profile_notified) ? "Yes" : "No"),
+                         stack_info->hci_version);
+               btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
+                                  &bt_patch_ver);
+               btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+                         "[BTCoex], CoexVer/ FwVer/ PatchVer = "
+                         "%d_%x/ 0x%x/ 0x%x(%d)\n",
+                         glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+                         fw_ver, bt_patch_ver, bt_patch_ver);
+               BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
+                         "[BTCoex], *****************************"
+                         "***********************************\n");
+       }
+
+#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
+       btc8723b2ant_query_bt_info(btcoexist);
+       btc8723b2ant_monitor_bt_ctr(btcoexist);
+       btc8723b2ant_monitor_bt_enable_disable(btcoexist);
+#else
+       if (btc8723b2ant_is_wifi_status_changed(btcoexist) ||
+           coex_dm->auto_tdma_adjust)
+               btc8723b2ant_run_coexist_mechanism(btcoexist);
+#endif
+}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.h
new file mode 100644 (file)
index 0000000..e0ad8e5
--- /dev/null
@@ -0,0 +1,173 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#ifndef _HAL8723B_2_ANT
+#define _HAL8723B_2_ANT
+
+/************************************************************************
+ * The following is for 8723B 2Ant BT Co-exist definition
+ ************************************************************************/
+#define        BT_AUTO_REPORT_ONLY_8723B_2ANT                  1
+
+#define        BT_INFO_8723B_2ANT_B_FTP                        BIT7
+#define        BT_INFO_8723B_2ANT_B_A2DP                       BIT6
+#define        BT_INFO_8723B_2ANT_B_HID                        BIT5
+#define        BT_INFO_8723B_2ANT_B_SCO_BUSY                   BIT4
+#define        BT_INFO_8723B_2ANT_B_ACL_BUSY                   BIT3
+#define        BT_INFO_8723B_2ANT_B_INQ_PAGE                   BIT2
+#define        BT_INFO_8723B_2ANT_B_SCO_ESCO                   BIT1
+#define        BT_INFO_8723B_2ANT_B_CONNECTION                 BIT0
+
+#define BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT            2
+
+enum BT_INFO_SRC_8723B_2ANT {
+       BT_INFO_SRC_8723B_2ANT_WIFI_FW                  = 0x0,
+       BT_INFO_SRC_8723B_2ANT_BT_RSP                   = 0x1,
+       BT_INFO_SRC_8723B_2ANT_BT_ACTIVE_SEND           = 0x2,
+       BT_INFO_SRC_8723B_2ANT_MAX
+};
+
+enum BT_8723B_2ANT_BT_STATUS {
+       BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE      = 0x0,
+       BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE          = 0x1,
+       BT_8723B_2ANT_BT_STATUS_INQ_PAGE                = 0x2,
+       BT_8723B_2ANT_BT_STATUS_ACL_BUSY                = 0x3,
+       BT_8723B_2ANT_BT_STATUS_SCO_BUSY                = 0x4,
+       BT_8723B_2ANT_BT_STATUS_ACL_SCO_BUSY            = 0x5,
+       BT_8723B_2ANT_BT_STATUS_MAX
+};
+
+enum BT_8723B_2ANT_COEX_ALGO {
+       BT_8723B_2ANT_COEX_ALGO_UNDEFINED               = 0x0,
+       BT_8723B_2ANT_COEX_ALGO_SCO                     = 0x1,
+       BT_8723B_2ANT_COEX_ALGO_HID                     = 0x2,
+       BT_8723B_2ANT_COEX_ALGO_A2DP                    = 0x3,
+       BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS              = 0x4,
+       BT_8723B_2ANT_COEX_ALGO_PANEDR                  = 0x5,
+       BT_8723B_2ANT_COEX_ALGO_PANHS                   = 0x6,
+       BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP             = 0x7,
+       BT_8723B_2ANT_COEX_ALGO_PANEDR_HID              = 0x8,
+       BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR         = 0x9,
+       BT_8723B_2ANT_COEX_ALGO_HID_A2DP                = 0xa,
+       BT_8723B_2ANT_COEX_ALGO_MAX                     = 0xb,
+};
+
+struct coex_dm_8723b_2ant {
+       /* fw mechanism */
+       bool pre_dec_bt_pwr;
+       bool cur_dec_bt_pwr;
+       u8 pre_fw_dac_swing_lvl;
+       u8 cur_fw_dac_swing_lvl;
+       bool cur_ignore_wlan_act;
+       bool pre_ignore_wlan_act;
+       u8 pre_ps_tdma;
+       u8 cur_ps_tdma;
+       u8 ps_tdma_para[5];
+       u8 tdma_adj_type;
+       bool reset_tdma_adjust;
+       bool auto_tdma_adjust;
+       bool pre_ps_tdma_on;
+       bool cur_ps_tdma_on;
+       bool pre_bt_auto_report;
+       bool cur_bt_auto_report;
+
+       /* sw mechanism */
+       bool pre_rf_rx_lpf_shrink;
+       bool cur_rf_rx_lpf_shrink;
+       u32 bt_rf0x1e_backup;
+       bool pre_low_penalty_ra;
+       bool cur_low_penalty_ra;
+       bool pre_dac_swing_on;
+       u32 pre_dac_swing_lvl;
+       bool cur_dac_swing_on;
+       u32 cur_dac_swing_lvl;
+       bool pre_adc_back_off;
+       bool cur_adc_back_off;
+       bool pre_agc_table_en;
+       bool cur_agc_table_en;
+       u32 pre_val0x6c0;
+       u32 cur_val0x6c0;
+       u32 pre_val0x6c4;
+       u32 cur_val0x6c4;
+       u32 pre_val0x6c8;
+       u32 cur_val0x6c8;
+       u8 pre_val0x6cc;
+       u8 cur_val0x6cc;
+       bool limited_dig;
+
+       /* algorithm related */
+       u8 pre_algorithm;
+       u8 cur_algorithm;
+       u8 bt_status;
+       u8 wifi_chnl_info[3];
+
+       bool need_recover_0x948;
+       u16 backup_0x948;
+};
+
+struct coex_sta_8723b_2ant {
+       bool bt_link_exist;
+       bool sco_exist;
+       bool a2dp_exist;
+       bool hid_exist;
+       bool pan_exist;
+
+       bool under_lps;
+       bool under_ips;
+       u32 high_priority_tx;
+       u32 high_priority_rx;
+       u32 low_priority_tx;
+       u32 low_priority_rx;
+       u8 bt_rssi;
+       u8 pre_bt_rssi_state;
+       u8 pre_wifi_rssi_state[4];
+       bool c2h_bt_info_req_sent;
+       u8 bt_info_c2h[BT_INFO_SRC_8723B_2ANT_MAX][10];
+       u32 bt_info_c2h_cnt[BT_INFO_SRC_8723B_2ANT_MAX];
+       bool c2h_bt_inquiry_page;
+       u8 bt_retry_cnt;
+       u8 bt_info_ext;
+};
+
+/*********************************************************************
+ * The following is interface which will notify coex module.
+ *********************************************************************/
+void ex_halbtc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type);
+void btc8723b_med_stat_notify(struct btc_coexist *btcoexist, u8 type);
+void ex_halbtc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
+                                             u8 type);
+void ex_halbtc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
+                                      u8 *tmpbuf, u8 length);
+void ex_halbtc8723b2ant_stack_operation_notify(struct btc_coexist *btcoexist,
+                                              u8 type);
+void ex_halbtc8723b2ant_halt_notify(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_periodical(struct btc_coexist *btcoexist);
+void ex_halbtc8723b2ant_display_coex_info(struct btc_coexist *btcoexist);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
new file mode 100644 (file)
index 0000000..b6722de
--- /dev/null
@@ -0,0 +1,1011 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ ******************************************************************************/
+
+#include "halbt_precomp.h"
+
+/***********************************************
+ *             Global variables
+ ***********************************************/
+
+struct btc_coexist gl_bt_coexist;
+
+u32 btc_dbg_type[BTC_MSG_MAX];
+static u8 btc_dbg_buf[100];
+
+/***************************************************
+ *             Debug related function
+ ***************************************************/
+static bool halbtc_is_bt_coexist_available(struct btc_coexist *btcoexist)
+{
+       if (!btcoexist->binded || NULL == btcoexist->adapter)
+               return false;
+
+       return true;
+}
+
+static bool halbtc_is_wifi_busy(struct rtl_priv *rtlpriv)
+{
+       if (rtlpriv->link_info.busytraffic)
+               return true;
+       else
+               return false;
+}
+
+static void halbtc_dbg_init(void)
+{
+       u8 i;
+
+       for (i = 0; i < BTC_MSG_MAX; i++)
+               btc_dbg_type[i] = 0;
+
+       btc_dbg_type[BTC_MSG_INTERFACE] =
+/*                     INTF_INIT                               | */
+/*                     INTF_NOTIFY                             | */
+                       0;
+
+       btc_dbg_type[BTC_MSG_ALGORITHM] =
+/*                     ALGO_BT_RSSI_STATE                      | */
+/*                     ALGO_WIFI_RSSI_STATE                    | */
+/*                     ALGO_BT_MONITOR                         | */
+/*                     ALGO_TRACE                              | */
+/*                     ALGO_TRACE_FW                           | */
+/*                     ALGO_TRACE_FW_DETAIL                    | */
+/*                     ALGO_TRACE_FW_EXEC                      | */
+/*                     ALGO_TRACE_SW                           | */
+/*                     ALGO_TRACE_SW_DETAIL                    | */
+/*                     ALGO_TRACE_SW_EXEC                      | */
+                       0;
+}
+
+static bool halbtc_is_bt40(struct rtl_priv *adapter)
+{
+       struct rtl_priv *rtlpriv = adapter;
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       bool is_ht40 = true;
+       enum ht_channel_width bw = rtlphy->current_chan_bw;
+
+       if (bw == HT_CHANNEL_WIDTH_20)
+               is_ht40 = false;
+       else if (bw == HT_CHANNEL_WIDTH_20_40)
+               is_ht40 = true;
+
+       return is_ht40;
+}
+
+static bool halbtc_legacy(struct rtl_priv *adapter)
+{
+       struct rtl_priv *rtlpriv = adapter;
+       struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+       bool is_legacy = false;
+
+       if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B))
+               is_legacy = true;
+
+       return is_legacy;
+}
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter)
+{
+       struct rtl_priv *rtlpriv = adapter;
+
+       if (rtlpriv->link_info.tx_busy_traffic)
+               return true;
+       else
+               return false;
+}
+
+static u32 halbtc_get_wifi_bw(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv =
+               (struct rtl_priv *)btcoexist->adapter;
+       u32 wifi_bw = BTC_WIFI_BW_HT20;
+
+       if (halbtc_is_bt40(rtlpriv)) {
+               wifi_bw = BTC_WIFI_BW_HT40;
+       } else {
+               if (halbtc_legacy(rtlpriv))
+                       wifi_bw = BTC_WIFI_BW_LEGACY;
+               else
+                       wifi_bw = BTC_WIFI_BW_HT20;
+       }
+       return wifi_bw;
+}
+
+static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_phy  *rtlphy = &(rtlpriv->phy);
+       u8 chnl = 1;
+
+       if (rtlphy->current_channel != 0)
+               chnl = rtlphy->current_channel;
+       BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
+                 "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+       return chnl;
+}
+
+static void halbtc_leave_lps(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv;
+       struct rtl_ps_ctl *ppsc;
+       bool ap_enable = false;
+
+       rtlpriv = btcoexist->adapter;
+       ppsc = rtl_psc(rtlpriv);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+                          &ap_enable);
+
+       if (ap_enable) {
+               pr_info("halbtc_leave_lps()<--dont leave lps under AP mode\n");
+               return;
+       }
+
+       btcoexist->bt_info.bt_ctrl_lps = true;
+       btcoexist->bt_info.bt_lps_on = false;
+}
+
+static void halbtc_enter_lps(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv;
+       struct rtl_ps_ctl *ppsc;
+       bool ap_enable = false;
+
+       rtlpriv = btcoexist->adapter;
+       ppsc = rtl_psc(rtlpriv);
+
+       btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+                          &ap_enable);
+
+       if (ap_enable) {
+               pr_info("halbtc_enter_lps()<--dont enter lps under AP mode\n");
+               return;
+       }
+
+       btcoexist->bt_info.bt_ctrl_lps = true;
+       btcoexist->bt_info.bt_lps_on = false;
+}
+
+static void halbtc_normal_lps(struct btc_coexist *btcoexist)
+{
+       if (btcoexist->bt_info.bt_ctrl_lps) {
+               btcoexist->bt_info.bt_lps_on = false;
+               btcoexist->bt_info.bt_ctrl_lps = false;
+       }
+}
+
+static void halbtc_leave_low_power(void)
+{
+}
+
+static void halbtc_nomal_low_power(void)
+{
+}
+
+static void halbtc_disable_low_power(void)
+{
+}
+
+static void halbtc_aggregation_check(void)
+{
+}
+
+static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist)
+{
+       return 0;
+}
+
+static s32 halbtc_get_wifi_rssi(struct rtl_priv *adapter)
+{
+       struct rtl_priv *rtlpriv = adapter;
+       s32     undec_sm_pwdb = 0;
+
+       if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+               undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+       else /* associated entry pwdb */
+               undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+       return undec_sm_pwdb;
+}
+
+static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtlpriv);
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       bool *bool_tmp = (bool *)out_buf;
+       int *s32_tmp = (int *)out_buf;
+       u32 *u32_tmp = (u32 *)out_buf;
+       u8 *u8_tmp = (u8 *)out_buf;
+       bool tmp = false;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return false;
+
+       switch (get_type) {
+       case BTC_GET_BL_HS_OPERATION:
+               *bool_tmp = false;
+               break;
+       case BTC_GET_BL_HS_CONNECTING:
+               *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_CONNECTED:
+               if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+                       tmp = true;
+               *bool_tmp = tmp;
+               break;
+       case BTC_GET_BL_WIFI_BUSY:
+               if (halbtc_is_wifi_busy(rtlpriv))
+                       *bool_tmp = true;
+               else
+                       *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_SCAN:
+               if (mac->act_scanning)
+                       *bool_tmp = true;
+               else
+                       *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_LINK:
+               if (mac->link_state == MAC80211_LINKING)
+                       *bool_tmp = true;
+               else
+                       *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_ROAM:      /*TODO*/
+               if (mac->link_state == MAC80211_LINKING)
+                       *bool_tmp = true;
+               else
+                       *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_4_WAY_PROGRESS:    /*TODO*/
+                       *bool_tmp = false;
+
+               break;
+       case BTC_GET_BL_WIFI_UNDER_5G:
+               *bool_tmp = false; /*TODO*/
+
+       case BTC_GET_BL_WIFI_DHCP:      /*TODO*/
+               break;
+       case BTC_GET_BL_WIFI_SOFTAP_IDLE:
+               *bool_tmp = true;
+               break;
+       case BTC_GET_BL_WIFI_SOFTAP_LINKING:
+               *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_IN_EARLY_SUSPEND:
+               *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
+               *bool_tmp = false;
+               break;
+       case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
+               if (NO_ENCRYPTION == rtlpriv->sec.pairwise_enc_algorithm)
+                       *bool_tmp = false;
+               else
+                       *bool_tmp = true;
+               break;
+       case BTC_GET_BL_WIFI_UNDER_B_MODE:
+               *bool_tmp = false; /*TODO*/
+               break;
+       case BTC_GET_BL_EXT_SWITCH:
+               *bool_tmp = false;
+               break;
+       case BTC_GET_S4_WIFI_RSSI:
+               *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+               break;
+       case BTC_GET_S4_HS_RSSI:        /*TODO*/
+               *s32_tmp = halbtc_get_wifi_rssi(rtlpriv);
+               break;
+       case BTC_GET_U4_WIFI_BW:
+               *u32_tmp = halbtc_get_wifi_bw(btcoexist);
+               break;
+       case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
+               if (halbtc_is_wifi_uplink(rtlpriv))
+                       *u32_tmp = BTC_WIFI_TRAFFIC_TX;
+               else
+                       *u32_tmp = BTC_WIFI_TRAFFIC_RX;
+               break;
+       case BTC_GET_U4_WIFI_FW_VER:
+               *u32_tmp = rtlhal->fw_version;
+               break;
+       case BTC_GET_U4_BT_PATCH_VER:
+               *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
+               break;
+       case BTC_GET_U1_WIFI_DOT11_CHNL:
+               *u8_tmp = rtlphy->current_channel;
+               break;
+       case BTC_GET_U1_WIFI_CENTRAL_CHNL:
+               *u8_tmp = halbtc_get_wifi_central_chnl(btcoexist);
+               break;
+       case BTC_GET_U1_WIFI_HS_CHNL:
+               *u8_tmp = 1;/*BT_OperateChnl(rtlpriv);*/
+               break;
+       case BTC_GET_U1_MAC_PHY_MODE:
+               *u8_tmp = BTC_MP_UNKNOWN;
+               break;
+
+               /************* 1Ant **************/
+       case BTC_GET_U1_LPS_MODE:
+               *u8_tmp = btcoexist->pwr_mode_val[0];
+               break;
+
+       default:
+               break;
+       }
+
+       return true;
+}
+
+static bool halbtc_set(void *void_btcoexist, u8 set_type, void *in_buf)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)void_btcoexist;
+       bool *bool_tmp = (bool *)in_buf;
+       u8 *u8_tmp = (u8 *)in_buf;
+       u32 *u32_tmp = (u32 *)in_buf;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return false;
+
+       switch (set_type) {
+       /* set some bool type variables. */
+       case BTC_SET_BL_BT_DISABLE:
+               btcoexist->bt_info.bt_disabled = *bool_tmp;
+               break;
+       case BTC_SET_BL_BT_TRAFFIC_BUSY:
+               btcoexist->bt_info.bt_busy = *bool_tmp;
+               break;
+       case BTC_SET_BL_BT_LIMITED_DIG:
+               btcoexist->bt_info.limited_dig = *bool_tmp;
+               break;
+       case BTC_SET_BL_FORCE_TO_ROAM:
+               btcoexist->bt_info.force_to_roam = *bool_tmp;
+               break;
+       case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
+               btcoexist->bt_info.reject_agg_pkt = *bool_tmp;
+               break;
+       case BTC_SET_BL_BT_CTRL_AGG_SIZE:
+               btcoexist->bt_info.b_bt_ctrl_buf_size = *bool_tmp;
+               break;
+       case BTC_SET_BL_INC_SCAN_DEV_NUM:
+               btcoexist->bt_info.increase_scan_dev_num = *bool_tmp;
+               break;
+               /* set some u1Byte type variables. */
+       case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
+               btcoexist->bt_info.rssi_adjust_for_agc_table_on = *u8_tmp;
+               break;
+       case BTC_SET_U1_AGG_BUF_SIZE:
+               btcoexist->bt_info.agg_buf_size = *u8_tmp;
+               break;
+               /* the following are some action which will be triggered */
+       case BTC_SET_ACT_GET_BT_RSSI:
+               /*BTHCI_SendGetBtRssiEvent(rtlpriv);*/
+               break;
+       case BTC_SET_ACT_AGGREGATE_CTRL:
+               halbtc_aggregation_check();
+               break;
+
+               /* 1Ant */
+       case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
+               btcoexist->bt_info.rssi_adjust_for_1ant_coex_type = *u8_tmp;
+               break;
+       case BTC_SET_UI_SCAN_SIG_COMPENSATION:
+       /*      rtlpriv->mlmepriv.scan_compensation = *u8_tmp;  */
+               break;
+       case BTC_SET_U1_1ANT_LPS:
+               btcoexist->bt_info.lps_1ant = *u8_tmp;
+               break;
+       case BTC_SET_U1_1ANT_RPWM:
+               btcoexist->bt_info.rpwm_1ant = *u8_tmp;
+               break;
+       /* the following are some action which will be triggered  */
+       case BTC_SET_ACT_LEAVE_LPS:
+               halbtc_leave_lps(btcoexist);
+               break;
+       case BTC_SET_ACT_ENTER_LPS:
+               halbtc_enter_lps(btcoexist);
+               break;
+       case BTC_SET_ACT_NORMAL_LPS:
+               halbtc_normal_lps(btcoexist);
+               break;
+       case BTC_SET_ACT_DISABLE_LOW_POWER:
+               halbtc_disable_low_power();
+               break;
+       case BTC_SET_ACT_UPDATE_ra_mask:
+               btcoexist->bt_info.ra_mask = *u32_tmp;
+               break;
+       case BTC_SET_ACT_SEND_MIMO_PS:
+               break;
+       case BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT:
+               btcoexist->bt_info.force_exec_pwr_cmd_cnt++;
+               break;
+       case BTC_SET_ACT_CTRL_BT_INFO: /*wait for 8812/8821*/
+               break;
+       case BTC_SET_ACT_CTRL_BT_COEX:
+               break;
+       default:
+               break;
+       }
+
+       return true;
+}
+
+static void halbtc_display_coex_statistics(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_bt_link_info(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_bt_fw_info(struct btc_coexist *btcoexist)
+{
+}
+
+static void halbtc_display_fw_pwr_mode_cmd(struct btc_coexist *btcoexist)
+{
+}
+
+/************************************************************
+ *             IO related function
+ ************************************************************/
+static u8 halbtc_read_1byte(void *bt_context, u32 reg_addr)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       return  rtl_read_byte(rtlpriv, reg_addr);
+}
+
+static u16 halbtc_read_2byte(void *bt_context, u32 reg_addr)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       return  rtl_read_word(rtlpriv, reg_addr);
+}
+
+static u32 halbtc_read_4byte(void *bt_context, u32 reg_addr)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       return  rtl_read_dword(rtlpriv, reg_addr);
+}
+
+static void halbtc_write_1byte(void *bt_context, u32 reg_addr, u8 data)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_bitmask_write_1byte(void *bt_context, u32 reg_addr,
+                                      u32 bit_mask, u8 data)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 original_value, bit_shift = 0;
+       u8 i;
+
+       if (bit_mask != MASKDWORD) {/*if not "double word" write*/
+               original_value = rtl_read_byte(rtlpriv, reg_addr);
+               for (i = 0; i <= 7; i++) {
+                       if ((bit_mask>>i) & 0x1)
+                               break;
+               }
+               bit_shift = i;
+               data = (original_value & (~bit_mask)) |
+                       ((data << bit_shift) & bit_mask);
+       }
+       rtl_write_byte(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_write_2byte(void *bt_context, u32 reg_addr, u16 data)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       rtl_write_word(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_write_4byte(void *bt_context, u32 reg_addr, u32 data)
+{
+       struct btc_coexist *btcoexist =
+               (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       rtl_write_dword(rtlpriv, reg_addr, data);
+}
+
+static void halbtc_set_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask,
+                            u32 data)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       rtl_set_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask, data);
+}
+
+static u32 halbtc_get_bbreg(void *bt_context, u32 reg_addr, u32 bit_mask)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       return rtl_get_bbreg(rtlpriv->mac80211.hw, reg_addr, bit_mask);
+}
+
+static void halbtc_set_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+                            u32 bit_mask, u32 data)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       rtl_set_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask, data);
+}
+
+static u32 halbtc_get_rfreg(void *bt_context, u8 rf_path, u32 reg_addr,
+                           u32 bit_mask)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       return rtl_get_rfreg(rtlpriv->mac80211.hw, rf_path, reg_addr, bit_mask);
+}
+
+static void halbtc_fill_h2c_cmd(void *bt_context, u8 element_id,
+                               u32 cmd_len, u8 *cmd_buf)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, element_id,
+                                       cmd_len, cmd_buf);
+}
+
+static void halbtc_display_dbg_msg(void *bt_context, u8 disp_type)
+{
+       struct btc_coexist *btcoexist = (struct btc_coexist *)bt_context;
+       switch (disp_type) {
+       case BTC_DBG_DISP_COEX_STATISTICS:
+               halbtc_display_coex_statistics(btcoexist);
+               break;
+       case BTC_DBG_DISP_BT_LINK_INFO:
+               halbtc_display_bt_link_info(btcoexist);
+               break;
+       case BTC_DBG_DISP_BT_FW_VER:
+               halbtc_display_bt_fw_info(btcoexist);
+               break;
+       case BTC_DBG_DISP_FW_PWR_MODE_CMD:
+               halbtc_display_fw_pwr_mode_cmd(btcoexist);
+               break;
+       default:
+               break;
+       }
+}
+
+/*****************************************************************
+ *         Extern functions called by other module
+ *****************************************************************/
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter)
+{
+       struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+       btcoexist->statistics.cnt_bind++;
+
+       halbtc_dbg_init();
+
+       if (btcoexist->binded)
+               return false;
+       else
+               btcoexist->binded = true;
+
+#if (defined(CONFIG_PCI_HCI))
+       btcoexist->chip_interface = BTC_INTF_PCI;
+#elif (defined(CONFIG_USB_HCI))
+       btcoexist->chip_interface = BTC_INTF_USB;
+#elif (defined(CONFIG_SDIO_HCI))
+       btcoexist->chip_interface = BTC_INTF_SDIO;
+#elif (defined(CONFIG_GSPI_HCI))
+       btcoexist->chip_interface = BTC_INTF_GSPI;
+#else
+       btcoexist->chip_interface = BTC_INTF_UNKNOWN;
+#endif
+
+       if (NULL == btcoexist->adapter)
+               btcoexist->adapter = adapter;
+
+       btcoexist->stack_info.profile_notified = false;
+
+       btcoexist->btc_read_1byte = halbtc_read_1byte;
+       btcoexist->btc_write_1byte = halbtc_write_1byte;
+       btcoexist->btc_write_1byte_bitmask = halbtc_bitmask_write_1byte;
+       btcoexist->btc_read_2byte = halbtc_read_2byte;
+       btcoexist->btc_write_2byte = halbtc_write_2byte;
+       btcoexist->btc_read_4byte = halbtc_read_4byte;
+       btcoexist->btc_write_4byte = halbtc_write_4byte;
+
+       btcoexist->btc_set_bb_reg = halbtc_set_bbreg;
+       btcoexist->btc_get_bb_reg = halbtc_get_bbreg;
+
+       btcoexist->btc_set_rf_reg = halbtc_set_rfreg;
+       btcoexist->btc_get_rf_reg = halbtc_get_rfreg;
+
+       btcoexist->btc_fill_h2c = halbtc_fill_h2c_cmd;
+       btcoexist->btc_disp_dbg_msg = halbtc_display_dbg_msg;
+
+       btcoexist->btc_get = halbtc_get;
+       btcoexist->btc_set = halbtc_set;
+
+       btcoexist->cli_buf = &btc_dbg_buf[0];
+
+       btcoexist->bt_info.b_bt_ctrl_buf_size = false;
+       btcoexist->bt_info.agg_buf_size = 5;
+
+       btcoexist->bt_info.increase_scan_dev_num = false;
+       return true;
+}
+
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+
+       btcoexist->statistics.cnt_init_hw_config++;
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_init_hwconfig(btcoexist);
+}
+
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+
+       btcoexist->statistics.cnt_init_coex_dm++;
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_init_coex_dm(btcoexist);
+
+       btcoexist->initilized = true;
+}
+
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 ips_type;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_ips_notify++;
+       if (btcoexist->manual_control)
+               return;
+
+       if (ERFOFF == type)
+               ips_type = BTC_IPS_ENTER;
+       else
+               ips_type = BTC_IPS_LEAVE;
+
+       halbtc_leave_low_power();
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_ips_notify(btcoexist, ips_type);
+
+       halbtc_nomal_low_power();
+}
+
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 lps_type;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_lps_notify++;
+       if (btcoexist->manual_control)
+               return;
+
+       if (EACTIVE == type)
+               lps_type = BTC_LPS_DISABLE;
+       else
+               lps_type = BTC_LPS_ENABLE;
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_lps_notify(btcoexist, lps_type);
+}
+
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 scan_type;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_scan_notify++;
+       if (btcoexist->manual_control)
+               return;
+
+       if (type)
+               scan_type = BTC_SCAN_START;
+       else
+               scan_type = BTC_SCAN_FINISH;
+
+       halbtc_leave_low_power();
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_scan_notify(btcoexist, scan_type);
+
+       halbtc_nomal_low_power();
+}
+
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 asso_type;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_connect_notify++;
+       if (btcoexist->manual_control)
+               return;
+
+       if (action)
+               asso_type = BTC_ASSOCIATE_START;
+       else
+               asso_type = BTC_ASSOCIATE_FINISH;
+
+       halbtc_leave_low_power();
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_connect_notify(btcoexist, asso_type);
+}
+
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+                                enum _RT_MEDIA_STATUS media_status)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 status;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_media_status_notify++;
+       if (btcoexist->manual_control)
+               return;
+
+       if (RT_MEDIA_CONNECT == media_status)
+               status = BTC_MEDIA_CONNECT;
+       else
+               status = BTC_MEDIA_DISCONNECT;
+
+       halbtc_leave_low_power();
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               btc8723b_med_stat_notify(btcoexist, status);
+
+       halbtc_nomal_low_power();
+}
+
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 packet_type;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_special_packet_notify++;
+       if (btcoexist->manual_control)
+               return;
+
+       packet_type = BTC_PACKET_DHCP;
+
+       halbtc_leave_low_power();
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_special_packet_notify(btcoexist,
+                                                        packet_type);
+
+       halbtc_nomal_low_power();
+}
+
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist,
+                            u8 *tmp_buf, u8 length)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_bt_info_notify++;
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_bt_info_notify(btcoexist, tmp_buf, length);
+}
+
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 stack_op_type;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_stack_operation_notify++;
+       if (btcoexist->manual_control)
+               return;
+
+       stack_op_type = BTC_STACK_OP_NONE;
+
+       halbtc_leave_low_power();
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_stack_operation_notify(btcoexist,
+                                                         stack_op_type);
+
+       halbtc_nomal_low_power();
+}
+
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_halt_notify(btcoexist);
+}
+
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
+{
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+}
+
+void exhalbtc_periodical(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_periodical++;
+
+       halbtc_leave_low_power();
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_periodical(btcoexist);
+
+       halbtc_nomal_low_power();
+}
+
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist,
+                         u8 code, u8 len, u8 *data)
+{
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+       btcoexist->statistics.cnt_dbg_ctrl++;
+}
+
+void exhalbtc_stack_update_profile_info(void)
+{
+}
+
+void exhalbtc_update_min_bt_rssi(char bt_rssi)
+{
+       struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+
+       btcoexist->stack_info.min_bt_rssi = bt_rssi;
+}
+
+void exhalbtc_set_hci_version(u16 hci_version)
+{
+       struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+
+       btcoexist->stack_info.hci_version = hci_version;
+}
+
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version)
+{
+       struct btc_coexist *btcoexist = &gl_bt_coexist;
+
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+
+       btcoexist->bt_info.bt_real_fw_ver = bt_patch_version;
+       btcoexist->bt_info.bt_hci_ver = bt_hci_version;
+}
+
+void exhalbtc_set_bt_exist(bool bt_exist)
+{
+       gl_bt_coexist.board_info.bt_exist = bt_exist;
+}
+
+void exhalbtc_set_chip_type(u8 chip_type)
+{
+       switch (chip_type) {
+       default:
+       case BT_2WIRE:
+       case BT_ISSC_3WIRE:
+       case BT_ACCEL:
+       case BT_RTL8756:
+               gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_UNDEF;
+               break;
+       case BT_CSR_BC4:
+               gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC4;
+               break;
+       case BT_CSR_BC8:
+               gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_CSR_BC8;
+               break;
+       case BT_RTL8723A:
+               gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723A;
+               break;
+       case BT_RTL8821A:
+               gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8821;
+               break;
+       case BT_RTL8723B:
+               gl_bt_coexist.board_info.bt_chip_type = BTC_CHIP_RTL8723B;
+               break;
+       }
+}
+
+void exhalbtc_set_ant_num(u8 type, u8 ant_num)
+{
+       if (BT_COEX_ANT_TYPE_PG == type) {
+               gl_bt_coexist.board_info.pg_ant_num = ant_num;
+               gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+       } else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+               gl_bt_coexist.board_info.btdm_ant_num = ant_num;
+       }
+}
+
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       if (!halbtc_is_bt_coexist_available(btcoexist))
+               return;
+
+       if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE)
+               ex_halbtc8723b2ant_display_coex_info(btcoexist);
+}
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
new file mode 100644 (file)
index 0000000..871fc3c
--- /dev/null
@@ -0,0 +1,559 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2012  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+#ifndef        __HALBTC_OUT_SRC_H__
+#define __HALBTC_OUT_SRC_H__
+
+#include       "../wifi.h"
+
+#define                NORMAL_EXEC                             false
+#define                FORCE_EXEC                              true
+
+#define                BTC_RF_A                                RF90_PATH_A
+#define                BTC_RF_B                                RF90_PATH_B
+#define                BTC_RF_C                                RF90_PATH_C
+#define                BTC_RF_D                                RF90_PATH_D
+
+#define                BTC_SMSP                                SINGLEMAC_SINGLEPHY
+#define                BTC_DMDP                                DUALMAC_DUALPHY
+#define                BTC_DMSP                                DUALMAC_SINGLEPHY
+#define                BTC_MP_UNKNOWN                          0xff
+
+#define                IN
+#define                OUT
+
+#define                BT_TMP_BUF_SIZE                         100
+
+#define                BT_COEX_ANT_TYPE_PG                     0
+#define                BT_COEX_ANT_TYPE_ANTDIV                 1
+#define                BT_COEX_ANT_TYPE_DETECTED               2
+
+#define                BTC_MIMO_PS_STATIC                      0
+#define                BTC_MIMO_PS_DYNAMIC                     1
+
+#define                BTC_RATE_DISABLE                        0
+#define                BTC_RATE_ENABLE                         1
+
+#define                BTC_ANT_PATH_WIFI                       0
+#define                BTC_ANT_PATH_BT                         1
+#define                BTC_ANT_PATH_PTA                        2
+
+enum btc_chip_interface {
+       BTC_INTF_UNKNOWN        = 0,
+       BTC_INTF_PCI            = 1,
+       BTC_INTF_USB            = 2,
+       BTC_INTF_SDIO           = 3,
+       BTC_INTF_GSPI           = 4,
+       BTC_INTF_MAX
+};
+
+enum BTC_CHIP_TYPE {
+       BTC_CHIP_UNDEF          = 0,
+       BTC_CHIP_CSR_BC4        = 1,
+       BTC_CHIP_CSR_BC8        = 2,
+       BTC_CHIP_RTL8723A       = 3,
+       BTC_CHIP_RTL8821        = 4,
+       BTC_CHIP_RTL8723B       = 5,
+       BTC_CHIP_MAX
+};
+
+enum BTC_MSG_TYPE {
+       BTC_MSG_INTERFACE       = 0x0,
+       BTC_MSG_ALGORITHM       = 0x1,
+       BTC_MSG_MAX
+};
+extern u32 btc_dbg_type[];
+
+/* following is for BTC_MSG_INTERFACE */
+#define                INTF_INIT                               BIT0
+#define                INTF_NOTIFY                             BIT2
+
+/* following is for BTC_ALGORITHM */
+#define                ALGO_BT_RSSI_STATE                      BIT0
+#define                ALGO_WIFI_RSSI_STATE                    BIT1
+#define                ALGO_BT_MONITOR                         BIT2
+#define                ALGO_TRACE                              BIT3
+#define                ALGO_TRACE_FW                           BIT4
+#define                ALGO_TRACE_FW_DETAIL                    BIT5
+#define                ALGO_TRACE_FW_EXEC                      BIT6
+#define                ALGO_TRACE_SW                           BIT7
+#define                ALGO_TRACE_SW_DETAIL                    BIT8
+#define                ALGO_TRACE_SW_EXEC                      BIT9
+
+#define                BT_COEX_ANT_TYPE_PG                     0
+#define                BT_COEX_ANT_TYPE_ANTDIV                 1
+#define                BT_COEX_ANT_TYPE_DETECTED               2
+#define                BTC_MIMO_PS_STATIC                      0
+#define                BTC_MIMO_PS_DYNAMIC                     1
+#define                BTC_RATE_DISABLE                        0
+#define                BTC_RATE_ENABLE                         1
+#define                BTC_ANT_PATH_WIFI                       0
+#define                BTC_ANT_PATH_BT                         1
+#define                BTC_ANT_PATH_PTA                        2
+
+
+#define        CL_SPRINTF      snprintf
+#define        CL_PRINTF       printk
+
+#define        BTC_PRINT(dbgtype, dbgflag, printstr, ...)              \
+       do {                                                    \
+               if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+                       printk(printstr, ##__VA_ARGS__);        \
+               }                                               \
+       } while (0)
+
+#define        BTC_PRINT_F(dbgtype, dbgflag, printstr, ...)            \
+       do {                                                    \
+               if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
+                       pr_info("%s: ", __func__);      \
+                       printk(printstr, ##__VA_ARGS__);        \
+               }                                               \
+       } while (0)
+
+#define        BTC_PRINT_ADDR(dbgtype, dbgflag, printstr, _ptr)        \
+       do {                                                    \
+               if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {        \
+                       int __i;                                \
+                       u8 *__ptr = (u8 *)_ptr;                 \
+                       printk printstr;                        \
+                       for (__i = 0; __i < 6; __i++)           \
+                               printk("%02X%s", __ptr[__i], (__i == 5) ? \
+                                      "" : "-");               \
+                       pr_info("\n");                          \
+               }                                               \
+       } while (0)
+
+#define BTC_PRINT_DATA(dbgtype, dbgflag, _titlestring, _hexdata, _hexdatalen) \
+       do {                                                            \
+               if (unlikely(btc_dbg_type[dbgtype] & dbgflag))  {       \
+                       int __i;                                        \
+                       u8 *__ptr = (u8 *)_hexdata;                     \
+                       printk(_titlestring);                           \
+                       for (__i = 0; __i < (int)_hexdatalen; __i++) {  \
+                               printk("%02X%s", __ptr[__i], (((__i + 1) % 4) \
+                                                       == 0) ? "  " : " ");\
+                               if (((__i + 1) % 16) == 0)              \
+                                       printk("\n");                   \
+                       }                                               \
+                       pr_debug("\n");                                 \
+               }                                                       \
+       } while (0)
+
+#define BTC_ANT_PATH_WIFI      0
+#define BTC_ANT_PATH_BT                1
+#define BTC_ANT_PATH_PTA       2
+
+enum btc_power_save_type {
+       BTC_PS_WIFI_NATIVE = 0,
+       BTC_PS_LPS_ON = 1,
+       BTC_PS_LPS_OFF = 2,
+       BTC_PS_LPS_MAX
+};
+
+struct btc_board_info {
+       /* The following is some board information */
+       u8 bt_chip_type;
+       u8 pg_ant_num;  /* pg ant number */
+       u8 btdm_ant_num;        /* ant number for btdm */
+       u8 btdm_ant_pos;
+       bool bt_exist;
+};
+
+enum btc_dbg_opcode {
+       BTC_DBG_SET_COEX_NORMAL = 0x0,
+       BTC_DBG_SET_COEX_WIFI_ONLY = 0x1,
+       BTC_DBG_SET_COEX_BT_ONLY = 0x2,
+       BTC_DBG_MAX
+};
+
+enum btc_rssi_state {
+       BTC_RSSI_STATE_HIGH = 0x0,
+       BTC_RSSI_STATE_MEDIUM = 0x1,
+       BTC_RSSI_STATE_LOW = 0x2,
+       BTC_RSSI_STATE_STAY_HIGH = 0x3,
+       BTC_RSSI_STATE_STAY_MEDIUM = 0x4,
+       BTC_RSSI_STATE_STAY_LOW = 0x5,
+       BTC_RSSI_MAX
+};
+
+enum btc_wifi_role {
+       BTC_ROLE_STATION = 0x0,
+       BTC_ROLE_AP = 0x1,
+       BTC_ROLE_IBSS = 0x2,
+       BTC_ROLE_HS_MODE = 0x3,
+       BTC_ROLE_MAX
+};
+
+enum btc_wifi_bw_mode {
+       BTC_WIFI_BW_LEGACY = 0x0,
+       BTC_WIFI_BW_HT20 = 0x1,
+       BTC_WIFI_BW_HT40 = 0x2,
+       BTC_WIFI_BW_MAX
+};
+
+enum btc_wifi_traffic_dir {
+       BTC_WIFI_TRAFFIC_TX = 0x0,
+       BTC_WIFI_TRAFFIC_RX = 0x1,
+       BTC_WIFI_TRAFFIC_MAX
+};
+
+enum btc_wifi_pnp {
+       BTC_WIFI_PNP_WAKE_UP = 0x0,
+       BTC_WIFI_PNP_SLEEP = 0x1,
+       BTC_WIFI_PNP_MAX
+};
+
+
+enum btc_get_type {
+       /* type bool */
+       BTC_GET_BL_HS_OPERATION,
+       BTC_GET_BL_HS_CONNECTING,
+       BTC_GET_BL_WIFI_CONNECTED,
+       BTC_GET_BL_WIFI_BUSY,
+       BTC_GET_BL_WIFI_SCAN,
+       BTC_GET_BL_WIFI_LINK,
+       BTC_GET_BL_WIFI_DHCP,
+       BTC_GET_BL_WIFI_SOFTAP_IDLE,
+       BTC_GET_BL_WIFI_SOFTAP_LINKING,
+       BTC_GET_BL_WIFI_IN_EARLY_SUSPEND,
+       BTC_GET_BL_WIFI_ROAM,
+       BTC_GET_BL_WIFI_4_WAY_PROGRESS,
+       BTC_GET_BL_WIFI_UNDER_5G,
+       BTC_GET_BL_WIFI_AP_MODE_ENABLE,
+       BTC_GET_BL_WIFI_ENABLE_ENCRYPTION,
+       BTC_GET_BL_WIFI_UNDER_B_MODE,
+       BTC_GET_BL_EXT_SWITCH,
+
+       /* type s4Byte */
+       BTC_GET_S4_WIFI_RSSI,
+       BTC_GET_S4_HS_RSSI,
+
+       /* type u32 */
+       BTC_GET_U4_WIFI_BW,
+       BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
+       BTC_GET_U4_WIFI_FW_VER,
+       BTC_GET_U4_BT_PATCH_VER,
+
+       /* type u1Byte */
+       BTC_GET_U1_WIFI_DOT11_CHNL,
+       BTC_GET_U1_WIFI_CENTRAL_CHNL,
+       BTC_GET_U1_WIFI_HS_CHNL,
+       BTC_GET_U1_MAC_PHY_MODE,
+
+       /* for 1Ant */
+       BTC_GET_U1_LPS_MODE,
+       BTC_GET_BL_BT_SCO_BUSY,
+
+       /* for test mode */
+       BTC_GET_DRIVER_TEST_CFG,
+       BTC_GET_MAX
+};
+
+
+enum btc_set_type {
+       /* type bool */
+       BTC_SET_BL_BT_DISABLE,
+       BTC_SET_BL_BT_TRAFFIC_BUSY,
+       BTC_SET_BL_BT_LIMITED_DIG,
+       BTC_SET_BL_FORCE_TO_ROAM,
+       BTC_SET_BL_TO_REJ_AP_AGG_PKT,
+       BTC_SET_BL_BT_CTRL_AGG_SIZE,
+       BTC_SET_BL_INC_SCAN_DEV_NUM,
+
+       /* type u1Byte */
+       BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON,
+       BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE,
+       BTC_SET_UI_SCAN_SIG_COMPENSATION,
+       BTC_SET_U1_AGG_BUF_SIZE,
+
+       /* type trigger some action */
+       BTC_SET_ACT_GET_BT_RSSI,
+       BTC_SET_ACT_AGGREGATE_CTRL,
+
+       /********* for 1Ant **********/
+       /* type bool */
+       BTC_SET_BL_BT_SCO_BUSY,
+       /* type u1Byte */
+       BTC_SET_U1_1ANT_LPS,
+       BTC_SET_U1_1ANT_RPWM,
+       /* type trigger some action */
+       BTC_SET_ACT_LEAVE_LPS,
+       BTC_SET_ACT_ENTER_LPS,
+       BTC_SET_ACT_NORMAL_LPS,
+       BTC_SET_ACT_INC_FORCE_EXEC_PWR_CMD_CNT,
+       BTC_SET_ACT_DISABLE_LOW_POWER,
+       BTC_SET_ACT_UPDATE_ra_mask,
+       BTC_SET_ACT_SEND_MIMO_PS,
+       /* BT Coex related */
+       BTC_SET_ACT_CTRL_BT_INFO,
+       BTC_SET_ACT_CTRL_BT_COEX,
+       /***************************/
+       BTC_SET_MAX
+};
+
+enum btc_dbg_disp_type {
+       BTC_DBG_DISP_COEX_STATISTICS = 0x0,
+       BTC_DBG_DISP_BT_LINK_INFO = 0x1,
+       BTC_DBG_DISP_BT_FW_VER = 0x2,
+       BTC_DBG_DISP_FW_PWR_MODE_CMD = 0x3,
+       BTC_DBG_DISP_MAX
+};
+
+enum btc_notify_type_ips {
+       BTC_IPS_LEAVE = 0x0,
+       BTC_IPS_ENTER = 0x1,
+       BTC_IPS_MAX
+};
+
+enum btc_notify_type_lps {
+       BTC_LPS_DISABLE = 0x0,
+       BTC_LPS_ENABLE = 0x1,
+       BTC_LPS_MAX
+};
+
+enum btc_notify_type_scan {
+       BTC_SCAN_FINISH = 0x0,
+       BTC_SCAN_START = 0x1,
+       BTC_SCAN_MAX
+};
+
+enum btc_notify_type_associate {
+       BTC_ASSOCIATE_FINISH = 0x0,
+       BTC_ASSOCIATE_START = 0x1,
+       BTC_ASSOCIATE_MAX
+};
+
+enum btc_notify_type_media_status {
+       BTC_MEDIA_DISCONNECT = 0x0,
+       BTC_MEDIA_CONNECT = 0x1,
+       BTC_MEDIA_MAX
+};
+
+enum btc_notify_type_special_packet {
+       BTC_PACKET_UNKNOWN = 0x0,
+       BTC_PACKET_DHCP = 0x1,
+       BTC_PACKET_ARP = 0x2,
+       BTC_PACKET_EAPOL = 0x3,
+       BTC_PACKET_MAX
+};
+
+enum btc_notify_type_stack_operation {
+       BTC_STACK_OP_NONE = 0x0,
+       BTC_STACK_OP_INQ_PAGE_PAIR_START = 0x1,
+       BTC_STACK_OP_INQ_PAGE_PAIR_FINISH = 0x2,
+       BTC_STACK_OP_MAX
+};
+
+
+typedef u8 (*bfp_btc_r1)(void *btc_context, u32 reg_addr);
+
+typedef u16 (*bfp_btc_r2)(void *btc_context, u32 reg_addr);
+
+typedef u32 (*bfp_btc_r4)(void *btc_context, u32 reg_addr);
+
+typedef void (*bfp_btc_w1)(void *btc_context, u32 reg_addr, u8 data);
+
+typedef void (*bfp_btc_w1_bit_mak)(void *btc_context, u32 reg_addr,
+                                  u32 bit_mask, u8 data1b);
+
+typedef void (*bfp_btc_w2)(void *btc_context, u32 reg_addr, u16 data);
+
+typedef void (*bfp_btc_w4)(void *btc_context, u32 reg_addr, u32 data);
+
+typedef void (*bfp_btc_wr_1byte_bit_mask)(void *btc_context, u32 reg_addr,
+                                         u8 bit_mask, u8 data);
+
+typedef void (*bfp_btc_set_bb_reg)(void *btc_context, u32 reg_addr,
+                                  u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_bb_reg)(void *btc_context, u32 reg_addr,
+                                 u32 bit_mask);
+
+typedef void (*bfp_btc_set_rf_reg)(void *btc_context, u8 rf_path, u32 reg_addr,
+                                  u32 bit_mask, u32 data);
+
+typedef u32 (*bfp_btc_get_rf_reg)(void *btc_context, u8 rf_path,
+                                 u32 reg_addr, u32 bit_mask);
+
+typedef void (*bfp_btc_fill_h2c)(void *btc_context, u8 element_id,
+                                u32 cmd_len, u8 *cmd_buffer);
+
+typedef        bool (*bfp_btc_get)(void *btcoexist, u8 get_type, void *out_buf);
+
+typedef        bool (*bfp_btc_set)(void *btcoexist, u8 set_type, void *in_buf);
+
+typedef void (*bfp_btc_disp_dbg_msg)(void *btcoexist, u8 disp_type);
+
+struct btc_bt_info {
+       bool bt_disabled;
+       u8 rssi_adjust_for_agc_table_on;
+       u8 rssi_adjust_for_1ant_coex_type;
+       bool bt_busy;
+       u8 agg_buf_size;
+       bool limited_dig;
+       bool reject_agg_pkt;
+       bool b_bt_ctrl_buf_size;
+       bool increase_scan_dev_num;
+       u16 bt_hci_ver;
+       u16 bt_real_fw_ver;
+       u8 bt_fw_ver;
+
+       /* the following is for 1Ant solution */
+       bool bt_ctrl_lps;
+       bool bt_pwr_save_mode;
+       bool bt_lps_on;
+       bool force_to_roam;
+       u8 force_exec_pwr_cmd_cnt;
+       u8 lps_1ant;
+       u8 rpwm_1ant;
+       u32 ra_mask;
+};
+
+struct btc_stack_info {
+       bool profile_notified;
+       u16 hci_version;        /* stack hci version */
+       u8 num_of_link;
+       bool bt_link_exist;
+       bool sco_exist;
+       bool acl_exist;
+       bool a2dp_exist;
+       bool hid_exist;
+       u8 num_of_hid;
+       bool pan_exist;
+       bool unknown_acl_exist;
+       char min_bt_rssi;
+};
+
+struct btc_statistics {
+       u32 cnt_bind;
+       u32 cnt_init_hw_config;
+       u32 cnt_init_coex_dm;
+       u32 cnt_ips_notify;
+       u32 cnt_lps_notify;
+       u32 cnt_scan_notify;
+       u32 cnt_connect_notify;
+       u32 cnt_media_status_notify;
+       u32 cnt_special_packet_notify;
+       u32 cnt_bt_info_notify;
+       u32 cnt_periodical;
+       u32 cnt_stack_operation_notify;
+       u32 cnt_dbg_ctrl;
+};
+
+struct btc_bt_link_info {
+       bool bt_link_exist;
+       bool sco_exist;
+       bool sco_only;
+       bool a2dp_exist;
+       bool a2dp_only;
+       bool hid_exist;
+       bool hid_only;
+       bool pan_exist;
+       bool pan_only;
+};
+
+enum btc_antenna_pos {
+       BTC_ANTENNA_AT_MAIN_PORT = 0x1,
+       BTC_ANTENNA_AT_AUX_PORT = 0x2,
+};
+
+struct btc_coexist {
+       /* make sure only one adapter can bind the data context  */
+       bool binded;
+       /* default adapter */
+       void *adapter;
+       struct btc_board_info board_info;
+       /* some bt info referenced by non-bt module */
+       struct btc_bt_info bt_info;
+       struct btc_stack_info stack_info;
+       enum btc_chip_interface chip_interface;
+       struct btc_bt_link_info bt_link_info;
+
+       bool initilized;
+       bool stop_coex_dm;
+       bool manual_control;
+       u8 *cli_buf;
+       struct btc_statistics statistics;
+       u8 pwr_mode_val[10];
+
+       /* function pointers - io related */
+       bfp_btc_r1 btc_read_1byte;
+       bfp_btc_w1 btc_write_1byte;
+       bfp_btc_w1_bit_mak btc_write_1byte_bitmask;
+       bfp_btc_r2 btc_read_2byte;
+       bfp_btc_w2 btc_write_2byte;
+       bfp_btc_r4 btc_read_4byte;
+       bfp_btc_w4 btc_write_4byte;
+
+       bfp_btc_set_bb_reg btc_set_bb_reg;
+       bfp_btc_get_bb_reg btc_get_bb_reg;
+
+
+       bfp_btc_set_rf_reg btc_set_rf_reg;
+       bfp_btc_get_rf_reg btc_get_rf_reg;
+
+       bfp_btc_fill_h2c btc_fill_h2c;
+
+       bfp_btc_disp_dbg_msg btc_disp_dbg_msg;
+
+       bfp_btc_get btc_get;
+       bfp_btc_set btc_set;
+};
+
+bool halbtc_is_wifi_uplink(struct rtl_priv *adapter);
+
+extern struct btc_coexist gl_bt_coexist;
+
+bool exhalbtc_initlize_variables(struct rtl_priv *adapter);
+void exhalbtc_init_hw_config(struct btc_coexist *btcoexist);
+void exhalbtc_init_coex_dm(struct btc_coexist *btcoexist);
+void exhalbtc_ips_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_lps_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_scan_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_connect_notify(struct btc_coexist *btcoexist, u8 action);
+void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist,
+                                enum _RT_MEDIA_STATUS media_status);
+void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type);
+void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf,
+                            u8 length);
+void exhalbtc_stack_operation_notify(struct btc_coexist *btcoexist, u8 type);
+void exhalbtc_halt_notify(struct btc_coexist *btcoexist);
+void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state);
+void exhalbtc_periodical(struct btc_coexist *btcoexist);
+void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len,
+                         u8 *data);
+void exhalbtc_stack_update_profile_info(void);
+void exhalbtc_set_hci_version(u16 hci_version);
+void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+void exhalbtc_update_min_bt_rssi(char bt_rssi);
+void exhalbtc_set_bt_exist(bool bt_exist);
+void exhalbtc_set_chip_type(u8 chip_type);
+void exhalbtc_set_ant_num(u8 type, u8 ant_num);
+void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+                                 u8 *rssi_wifi, u8 *rssi_bt);
+void exhalbtc_lps_leave(struct btc_coexist *btcoexist);
+void exhalbtc_low_wifi_traffic_notify(struct btc_coexist *btcoexist);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
new file mode 100644 (file)
index 0000000..0ab94fe
--- /dev/null
@@ -0,0 +1,218 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2013  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "rtl_btc.h"
+#include "halbt_precomp.h"
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+static struct rtl_btc_ops rtl_btc_operation = {
+       .btc_init_variables = rtl_btc_init_variables,
+       .btc_init_hal_vars = rtl_btc_init_hal_vars,
+       .btc_init_hw_config = rtl_btc_init_hw_config,
+       .btc_ips_notify = rtl_btc_ips_notify,
+       .btc_scan_notify = rtl_btc_scan_notify,
+       .btc_connect_notify = rtl_btc_connect_notify,
+       .btc_mediastatus_notify = rtl_btc_mediastatus_notify,
+       .btc_periodical = rtl_btc_periodical,
+       .btc_halt_notify = rtl_btc_halt_notify,
+       .btc_btinfo_notify = rtl_btc_btinfo_notify,
+       .btc_is_limited_dig = rtl_btc_is_limited_dig,
+       .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo,
+       .btc_is_bt_disabled = rtl_btc_is_bt_disabled,
+};
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv)
+{
+       exhalbtc_initlize_variables(rtlpriv);
+}
+
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+{
+       u8 ant_num;
+       u8 bt_exist;
+       u8 bt_type;
+
+       ant_num = rtl_get_hwpg_ant_num(rtlpriv);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                "%s, antNum is %d\n", __func__, ant_num);
+
+       bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                "%s, bt_exist is %d\n", __func__, bt_exist);
+       exhalbtc_set_bt_exist(bt_exist);
+
+       bt_type = rtl_get_hwpg_bt_type(rtlpriv);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s, bt_type is %d\n",
+                __func__, bt_type);
+       exhalbtc_set_chip_type(bt_type);
+
+       exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
+}
+
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+{
+       exhalbtc_init_hw_config(&gl_bt_coexist);
+       exhalbtc_init_coex_dm(&gl_bt_coexist);
+}
+
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type)
+{
+       exhalbtc_ips_notify(&gl_bt_coexist, type);
+}
+
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype)
+{
+       exhalbtc_scan_notify(&gl_bt_coexist, scantype);
+}
+
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action)
+{
+       exhalbtc_connect_notify(&gl_bt_coexist, action);
+}
+
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
+                               enum _RT_MEDIA_STATUS mstatus)
+{
+       exhalbtc_mediastatus_notify(&gl_bt_coexist, mstatus);
+}
+
+void rtl_btc_periodical(struct rtl_priv *rtlpriv)
+{
+       exhalbtc_periodical(&gl_bt_coexist);
+}
+
+void rtl_btc_halt_notify(void)
+{
+       exhalbtc_halt_notify(&gl_bt_coexist);
+}
+
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
+{
+       exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length);
+}
+
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv)
+{
+       return gl_bt_coexist.bt_info.limited_dig;
+}
+
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv)
+{
+       bool bt_change_edca = false;
+       u32 cur_edca_val;
+       u32 edca_bt_hs_uplink = 0x5ea42b, edca_bt_hs_downlink = 0x5ea42b;
+       u32 edca_hs;
+       u32 edca_addr = 0x504;
+
+       cur_edca_val = rtl_read_dword(rtlpriv, edca_addr);
+       if (halbtc_is_wifi_uplink(rtlpriv)) {
+               if (cur_edca_val != edca_bt_hs_uplink) {
+                       edca_hs = edca_bt_hs_uplink;
+                       bt_change_edca = true;
+               }
+       } else {
+               if (cur_edca_val != edca_bt_hs_downlink) {
+                       edca_hs = edca_bt_hs_downlink;
+                       bt_change_edca = true;
+               }
+       }
+
+       if (bt_change_edca)
+               rtl_write_dword(rtlpriv, edca_addr, edca_hs);
+
+       return true;
+}
+
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv)
+{
+       if (gl_bt_coexist.bt_info.bt_disabled)
+               return true;
+       else
+               return false;
+}
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
+{
+       return &rtl_btc_operation;
+}
+EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+       u8 num;
+
+       if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+               num = 2;
+       else
+               num = 1;
+
+       return num;
+}
+
+enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       enum _RT_MEDIA_STATUS    m_status = RT_MEDIA_DISCONNECT;
+
+       u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
+
+       if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+               m_status = RT_MEDIA_CONNECT;
+
+       return m_status;
+}
+
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
+{
+       return rtlpriv->btcoexist.btc_info.btcoexist;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+       return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger    <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
+
+static int __init rtl_btcoexist_module_init(void)
+{
+       return 0;
+}
+
+static void __exit rtl_btcoexist_module_exit(void)
+{
+       return;
+}
+
+module_init(rtl_btcoexist_module_init);
+module_exit(rtl_btcoexist_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.h
new file mode 100644 (file)
index 0000000..805b22c
--- /dev/null
@@ -0,0 +1,52 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2010  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL_BTC_H__
+#define __RTL_BTC_H__
+
+#include "halbt_precomp.h"
+
+void rtl_btc_init_variables(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv);
+void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv);
+void rtl_btc_ips_notify(struct rtl_priv *rtlpriv, u8 type);
+void rtl_btc_scan_notify(struct rtl_priv *rtlpriv, u8 scantype);
+void rtl_btc_connect_notify(struct rtl_priv *rtlpriv, u8 action);
+void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv,
+                               enum _RT_MEDIA_STATUS mstatus);
+void rtl_btc_periodical(struct rtl_priv *rtlpriv);
+void rtl_btc_halt_notify(void);
+void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length);
+bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv);
+bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv);
+
+struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw);
+
+#endif
index 2d337a0c3df027c86742af4367c0c00fe64d2f49..4ec424f26672028550ab8b19b944d451766ee08c 100644 (file)
 
 #include <linux/export.h>
 
+void rtl_addr_delay(u32 addr)
+{
+       if (addr == 0xfe)
+               mdelay(50);
+       else if (addr == 0xfd)
+               mdelay(5);
+       else if (addr == 0xfc)
+               mdelay(1);
+       else if (addr == 0xfb)
+               udelay(50);
+       else if (addr == 0xfa)
+               udelay(5);
+       else if (addr == 0xf9)
+               udelay(1);
+}
+EXPORT_SYMBOL(rtl_addr_delay);
+
+void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
+                    u32 mask, u32 data)
+{
+       if (addr == 0xfe) {
+               mdelay(50);
+       } else if (addr == 0xfd) {
+               mdelay(5);
+       } else if (addr == 0xfc) {
+               mdelay(1);
+       } else if (addr == 0xfb) {
+               udelay(50);
+       } else if (addr == 0xfa) {
+               udelay(5);
+       } else if (addr == 0xf9) {
+               udelay(1);
+       } else {
+               rtl_set_rfreg(hw, rfpath, addr, mask, data);
+               udelay(1);
+       }
+}
+EXPORT_SYMBOL(rtl_rfreg_delay);
+
+void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
+{
+       if (addr == 0xfe) {
+               mdelay(50);
+       } else if (addr == 0xfd) {
+               mdelay(5);
+       } else if (addr == 0xfc) {
+               mdelay(1);
+       } else if (addr == 0xfb) {
+               udelay(50);
+       } else if (addr == 0xfa) {
+               udelay(5);
+       } else if (addr == 0xf9) {
+               udelay(1);
+       } else {
+               rtl_set_bbreg(hw, addr, MASKDWORD, data);
+               udelay(1);
+       }
+}
+EXPORT_SYMBOL(rtl_bb_delay);
+
 void rtl_fw_cb(const struct firmware *firmware, void *context)
 {
        struct ieee80211_hw *hw = context;
@@ -475,20 +535,40 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u32 rx_conf;
 
        *new_flags &= RTL_SUPPORTED_FILTERS;
        if (!changed_flags)
                return;
 
+       /* if ssid not set to hw don't check bssid
+        * here just used for linked scanning, & linked
+        * and nolink check bssid is set in set network_type */
+       if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
+               (mac->link_state >= MAC80211_LINKED)) {
+               if (mac->opmode != NL80211_IFTYPE_AP &&
+                   mac->opmode != NL80211_IFTYPE_MESH_POINT) {
+                       if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+                               rtlpriv->cfg->ops->set_chk_bssid(hw, false);
+                       } else {
+                               rtlpriv->cfg->ops->set_chk_bssid(hw, true);
+                       }
+               }
+       }
+
+       /* must be called after set_chk_bssid since that function modifies the
+        * RCR register too. */
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
+
        /*TODO: we disable broadcase now, so enable here */
        if (changed_flags & FIF_ALLMULTI) {
                if (*new_flags & FIF_ALLMULTI) {
-                       mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
+                       rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
                            rtlpriv->cfg->maps[MAC_RCR_AB];
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Enable receive multicast frame\n");
                } else {
-                       mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
+                       rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
                                          rtlpriv->cfg->maps[MAC_RCR_AB]);
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Disable receive multicast frame\n");
@@ -497,39 +577,25 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
 
        if (changed_flags & FIF_FCSFAIL) {
                if (*new_flags & FIF_FCSFAIL) {
-                       mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+                       rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Enable receive FCS error frame\n");
                } else {
-                       mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
+                       rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Disable receive FCS error frame\n");
                }
        }
 
-       /* if ssid not set to hw don't check bssid
-        * here just used for linked scanning, & linked
-        * and nolink check bssid is set in set network_type */
-       if ((changed_flags & FIF_BCN_PRBRESP_PROMISC) &&
-               (mac->link_state >= MAC80211_LINKED)) {
-               if (mac->opmode != NL80211_IFTYPE_AP &&
-                   mac->opmode != NL80211_IFTYPE_MESH_POINT) {
-                       if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
-                               rtlpriv->cfg->ops->set_chk_bssid(hw, false);
-                       } else {
-                               rtlpriv->cfg->ops->set_chk_bssid(hw, true);
-                       }
-               }
-       }
 
        if (changed_flags & FIF_CONTROL) {
                if (*new_flags & FIF_CONTROL) {
-                       mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
+                       rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
 
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Enable receive control frame\n");
                } else {
-                       mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
+                       rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Disable receive control frame\n");
                }
@@ -537,15 +603,17 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
 
        if (changed_flags & FIF_OTHER_BSS) {
                if (*new_flags & FIF_OTHER_BSS) {
-                       mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
+                       rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Enable receive other BSS's frame\n");
                } else {
-                       mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
+                       rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
                        RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                                 "Disable receive other BSS's frame\n");
                }
        }
+
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&rx_conf));
 }
 static int rtl_op_sta_add(struct ieee80211_hw *hw,
                         struct ieee80211_vif *vif,
@@ -738,6 +806,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                                rtlpriv->cfg->ops->linked_set_reg(hw);
                        rcu_read_lock();
                        sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
+                       if (!sta) {
+                               pr_err("ieee80211_find_sta returned NULL\n");
+                               rcu_read_unlock();
+                               goto out;
+                       }
 
                        if (vif->type == NL80211_IFTYPE_STATION && sta)
                                rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
@@ -892,7 +965,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
 
                        mac->basic_rates = basic_rates;
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
-                                       (u8 *) (&basic_rates));
+                                       (u8 *)(&basic_rates));
                }
                rcu_read_unlock();
        }
@@ -906,6 +979,11 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                if (bss_conf->assoc) {
                        if (ppsc->fwctrl_lps) {
                                u8 mstatus = RT_MEDIA_CONNECT;
+                               u8 keep_alive = 10;
+                               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                HW_VAR_KEEP_ALIVE,
+                                                &keep_alive);
+
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                      HW_VAR_H2C_FW_JOINBSSRPT,
                                                      &mstatus);
index 2fe46a1b4f1f12c3e77ba985d59c4aaff1d88357..027e75374dcc9467c4b01040060d6defaa6d6f55 100644 (file)
@@ -41,5 +41,9 @@
 
 extern const struct ieee80211_ops rtl_ops;
 void rtl_fw_cb(const struct firmware *firmware, void *context);
+void rtl_addr_delay(u32 addr);
+void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
+                    u32 mask, u32 data);
+void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
 
 #endif
index d7aa165fe6776cb64e178f3ec6db0eeb7e9c255c..dae55257f0e8bde44fb772173f7d573fceab6d65 100644 (file)
@@ -811,19 +811,19 @@ done:
                if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
                        return;
                tmp_one = 1;
-               rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
                                            HW_DESC_RXBUFF_ADDR,
                                            (u8 *)&bufferaddress);
-               rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
                                            HW_DESC_RXPKT_LEN,
                                            (u8 *)&rtlpci->rxbuffersize);
 
                if (index == rtlpci->rxringcount - 1)
-                       rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
+                       rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
                                                    HW_DESC_RXERO,
                                                    &tmp_one);
 
-               rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, HW_DESC_RXOWN,
                                            &tmp_one);
 
                index = (index + 1) % rtlpci->rxringcount;
@@ -983,6 +983,8 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        struct sk_buff *pskb = NULL;
        struct rtl_tx_desc *pdesc = NULL;
        struct rtl_tcb_desc tcb_desc;
+       /*This is for new trx flow*/
+       struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
        u8 temp_one = 1;
 
        memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
@@ -1004,11 +1006,12 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
        info = IEEE80211_SKB_CB(pskb);
        pdesc = &ring->desc[0];
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
-               info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
+                                       (u8 *)pbuffer_desc, info, NULL, pskb,
+                                       BEACON_QUEUE, &tcb_desc);
 
        __skb_queue_tail(&ring->queue, pskb);
 
-       rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
+       rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
                                    &temp_one);
 
        return;
@@ -1066,7 +1069,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
        mac->current_ampdu_factor = 3;
 
        /*QOS*/
-       rtlpci->acm_method = eAcmWay2_SW;
+       rtlpci->acm_method = EACMWAY2_SW;
 
        /*task */
        tasklet_init(&rtlpriv->works.irq_tasklet,
@@ -1113,7 +1116,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
                                              ((i + 1) % entries) *
                                              sizeof(*ring);
 
-               rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)&(ring[i]),
                                            true, HW_DESC_TX_NEXTDESC_ADDR,
                                            (u8 *)&nextdescaddress);
        }
@@ -1188,19 +1191,19 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
                                dev_kfree_skb_any(skb);
                                return 1;
                        }
-                       rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+                       rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                                    HW_DESC_RXBUFF_ADDR,
                                                    (u8 *)&bufferaddress);
-                       rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
+                       rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                                    HW_DESC_RXPKT_LEN,
                                                    (u8 *)&rtlpci->
                                                    rxbuffersize);
-                       rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+                       rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                                    HW_DESC_RXOWN,
                                                    &tmp_one);
                }
 
-               rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
                                            HW_DESC_RXERO, &tmp_one);
        }
        return 0;
@@ -1331,7 +1334,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
 
                        for (i = 0; i < rtlpci->rxringcount; i++) {
                                entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
-                               rtlpriv->cfg->ops->set_desc((u8 *) entry,
+                               rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry,
                                                            false,
                                                            HW_DESC_RXOWN,
                                                            &tmp_one);
@@ -1424,6 +1427,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct rtl8192_tx_ring *ring;
        struct rtl_tx_desc *pdesc;
+       struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
        u8 idx;
        u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
        unsigned long flags;
@@ -1464,17 +1468,22 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
                idx = 0;
 
        pdesc = &ring->desc[idx];
-       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
-                       true, HW_DESC_OWN);
+       if (rtlpriv->use_new_trx_flow) {
+               ptx_bd_desc = &ring->buffer_desc[idx];
+       } else {
+               own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
+                               true, HW_DESC_OWN);
 
-       if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-                        "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
-                        hw_queue, ring->idx, idx,
-                        skb_queue_len(&ring->queue));
+               if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
+                                hw_queue, ring->idx, idx,
+                                skb_queue_len(&ring->queue));
 
-               spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-               return skb->len;
+                       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+                                              flags);
+                       return skb->len;
+               }
        }
 
        if (ieee80211_is_data_qos(fc)) {
@@ -1494,17 +1503,20 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
                rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
 
        rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
-                       info, sta, skb, hw_queue, ptcb_desc);
+                       (u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc);
 
        __skb_queue_tail(&ring->queue, skb);
 
-       rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
-                                   HW_DESC_OWN, &temp_one);
-
+       if (rtlpriv->use_new_trx_flow) {
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
+                                           HW_DESC_OWN, &hw_queue);
+       } else {
+               rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
+                                           HW_DESC_OWN, &temp_one);
+       }
 
        if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
            hw_queue != BEACON_QUEUE) {
-
                RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
                         "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
                         hw_queue, ring->idx, idx,
@@ -1841,6 +1853,65 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
        return true;
 }
 
+static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+       int ret;
+
+       ret = pci_enable_msi(rtlpci->pdev);
+       if (ret < 0)
+               return ret;
+
+       ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+                         IRQF_SHARED, KBUILD_MODNAME, hw);
+       if (ret < 0) {
+               pci_disable_msi(rtlpci->pdev);
+               return ret;
+       }
+
+       rtlpci->using_msi = true;
+
+       RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+                "MSI Interrupt Mode!\n");
+       return 0;
+}
+
+static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+       int ret;
+
+       ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
+                         IRQF_SHARED, KBUILD_MODNAME, hw);
+       if (ret < 0)
+               return ret;
+
+       rtlpci->using_msi = false;
+       RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
+                "Pin-based Interrupt Mode!\n");
+       return 0;
+}
+
+static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
+{
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
+       int ret;
+
+       if (rtlpci->msi_support) {
+               ret = rtl_pci_intr_mode_msi(hw);
+               if (ret < 0)
+                       ret = rtl_pci_intr_mode_legacy(hw);
+       } else {
+               ret = rtl_pci_intr_mode_legacy(hw);
+       }
+       return ret;
+}
+
 int rtl_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *id)
 {
@@ -1983,8 +2054,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
        }
 
        rtlpci = rtl_pcidev(pcipriv);
-       err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
-                         IRQF_SHARED, KBUILD_MODNAME, hw);
+       err = rtl_pci_intr_mode_decide(hw);
        if (err) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                         "%s: failed to register IRQ handler\n",
@@ -2052,6 +2122,9 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
                rtlpci->irq_alloc = 0;
        }
 
+       if (rtlpci->using_msi)
+               pci_disable_msi(rtlpci->pdev);
+
        list_del(&rtlpriv->list);
        if (rtlpriv->io.pci_mem_start != 0) {
                pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
index d3262ec45d230f0f8abd1744501a004006c48198..90174a814a6d26fc6e384171afc54324b98a0abd 100644 (file)
@@ -137,12 +137,22 @@ struct rtl_tx_cmd_desc {
        u32 dword[16];
 } __packed;
 
+/* In new TRX flow, Buffer_desc is new concept
+ * But TX wifi info == TX descriptor in old flow
+ * RX wifi info == RX descriptor in old flow
+ */
+struct rtl_tx_buffer_desc {
+       u32 dword[8]; /*seg = 4*/
+} __packed;
+
 struct rtl8192_tx_ring {
        struct rtl_tx_desc *desc;
        dma_addr_t dma;
        unsigned int idx;
        unsigned int entries;
        struct sk_buff_head queue;
+       /*add for new trx flow*/
+       struct rtl_tx_buffer_desc *buffer_desc; /*tx buffer descriptor*/
 };
 
 struct rtl8192_rx_ring {
@@ -199,6 +209,10 @@ struct rtl_pci {
 
        u16 shortretry_limit;
        u16 longretry_limit;
+
+       /* MSI support */
+       bool msi_support;
+       bool using_msi;
 };
 
 struct mp_adapter {
index d1c0191a195b909e5095fde8807eb671b307e374..50504942ded151f40a198099b6fd31013774ca93 100644 (file)
 #include "base.h"
 #include "ps.h"
 
+/*     Description:
+ *             This routine deals with the Power Configuration CMD
+ *              parsing for RTL8723/RTL8188E Series IC.
+ *     Assumption:
+ *             We should follow specific format that was released from HW SD.
+ */
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+                             u8 faversion, u8 interface_type,
+                             struct wlan_pwr_cfg pwrcfgcmd[])
+{
+       struct wlan_pwr_cfg cfg_cmd = {0};
+       bool polling_bit = false;
+       u32 ary_idx = 0;
+       u8 value = 0;
+       u32 offset = 0;
+       u32 polling_count = 0;
+       u32 max_polling_cnt = 5000;
+
+       do {
+               cfg_cmd = pwrcfgcmd[ary_idx];
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                       "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x),"
+                       "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+                       GET_PWR_CFG_OFFSET(cfg_cmd),
+                                          GET_PWR_CFG_CUT_MASK(cfg_cmd),
+                       GET_PWR_CFG_FAB_MASK(cfg_cmd),
+                                            GET_PWR_CFG_INTF_MASK(cfg_cmd),
+                       GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+                       GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+
+               if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
+                   (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
+                   (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
+                       switch (GET_PWR_CFG_CMD(cfg_cmd)) {
+                       case PWR_CMD_READ:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                       "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
+                               break;
+                       case PWR_CMD_WRITE:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                       "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
+                               offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+                               /*Read the value from system register*/
+                               value = rtl_read_byte(rtlpriv, offset);
+                               value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
+                               value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
+                                         GET_PWR_CFG_MASK(cfg_cmd));
+
+                               /*Write the value back to sytem register*/
+                               rtl_write_byte(rtlpriv, offset, value);
+                               break;
+                       case PWR_CMD_POLLING:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                       "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
+                               polling_bit = false;
+                               offset = GET_PWR_CFG_OFFSET(cfg_cmd);
+
+                               do {
+                                       value = rtl_read_byte(rtlpriv, offset);
+
+                                       value &= GET_PWR_CFG_MASK(cfg_cmd);
+                                       if (value ==
+                                           (GET_PWR_CFG_VALUE(cfg_cmd)
+                                           & GET_PWR_CFG_MASK(cfg_cmd)))
+                                               polling_bit = true;
+                                       else
+                                               udelay(10);
+
+                                       if (polling_count++ > max_polling_cnt)
+                                               return false;
+                               } while (!polling_bit);
+                               break;
+                       case PWR_CMD_DELAY:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                       "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+                               if (GET_PWR_CFG_VALUE(cfg_cmd) ==
+                                   PWRSEQ_DELAY_US)
+                                       udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+                               else
+                                       mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
+                               break;
+                       case PWR_CMD_END:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                        "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+                               return true;
+                       default:
+                               RT_ASSERT(false,
+                                        "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
+                               break;
+                       }
+
+               }
+               ary_idx++;
+       } while (1);
+
+       return true;
+}
+EXPORT_SYMBOL(rtl_hal_pwrseqcmdparsing);
+
 bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -659,7 +759,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
                           unsigned int len)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct ieee80211_mgmt *mgmt = (void *)data;
+       struct ieee80211_mgmt *mgmt = data;
        struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
        u8 *pos, *end, *ie;
        u16 noa_len;
@@ -758,7 +858,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
                              unsigned int len)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct ieee80211_mgmt *mgmt = (void *)data;
+       struct ieee80211_mgmt *mgmt = data;
        struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
        u8 noa_num, index, i, noa_index = 0;
        u8 *pos, *end, *ie;
@@ -850,9 +950,8 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
        switch (p2p_ps_state) {
        case P2P_PS_DISABLE:
                p2pinfo->p2p_ps_state = p2p_ps_state;
-               rtlpriv->cfg->ops->set_hw_reg(hw,
-                                HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
-                                (u8 *)(&p2p_ps_state));
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+                                             &p2p_ps_state);
 
                p2pinfo->noa_index = 0;
                p2pinfo->ctwindow = 0;
@@ -864,7 +963,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
                                rtlps->smart_ps = 2;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                         HW_VAR_H2C_FW_PWRMODE,
-                                        (u8 *)(&rtlps->pwr_mode));
+                                        &rtlps->pwr_mode);
                        }
                }
                break;
@@ -877,12 +976,12 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
                                        rtlps->smart_ps = 0;
                                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                                 HW_VAR_H2C_FW_PWRMODE,
-                                                (u8 *)(&rtlps->pwr_mode));
+                                                &rtlps->pwr_mode);
                                }
                        }
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
-                                (u8 *)(&p2p_ps_state));
+                                &p2p_ps_state);
                }
                break;
        case P2P_PS_SCAN:
@@ -892,7 +991,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
                        p2pinfo->p2p_ps_state = p2p_ps_state;
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                 HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
-                                (u8 *)(&p2p_ps_state));
+                                &p2p_ps_state);
                }
                break;
        default:
@@ -912,7 +1011,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       struct ieee80211_hdr *hdr = (void *)data;
+       struct ieee80211_hdr *hdr = data;
 
        if (!mac->p2p)
                return;
index 88bd76ea88f7621dd92cb4ff495ee22e9dc53912..3bd41f958974308e6b9a3b25a9670541cd3f56eb 100644 (file)
 
 #define MAX_SW_LPS_SLEEP_INTV  5
 
+/*---------------------------------------------
+ * 3 The value of cmd: 4 bits
+ *---------------------------------------------
+ */
+#define    PWR_CMD_READ                0x00
+#define    PWR_CMD_WRITE       0x01
+#define    PWR_CMD_POLLING     0x02
+#define    PWR_CMD_DELAY       0x03
+#define    PWR_CMD_END         0x04
+
+/* define the base address of each block */
+#define        PWR_BASEADDR_MAC        0x00
+#define        PWR_BASEADDR_USB        0x01
+#define        PWR_BASEADDR_PCIE       0x02
+#define        PWR_BASEADDR_SDIO       0x03
+
+#define        PWR_FAB_ALL_MSK         (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+#define        PWR_CUT_TESTCHIP_MSK    BIT(0)
+#define        PWR_CUT_A_MSK           BIT(1)
+#define        PWR_CUT_B_MSK           BIT(2)
+#define        PWR_CUT_C_MSK           BIT(3)
+#define        PWR_CUT_D_MSK           BIT(4)
+#define        PWR_CUT_E_MSK           BIT(5)
+#define        PWR_CUT_F_MSK           BIT(6)
+#define        PWR_CUT_G_MSK           BIT(7)
+#define        PWR_CUT_ALL_MSK         0xFF
+#define PWR_INTF_SDIO_MSK      BIT(0)
+#define PWR_INTF_USB_MSK       BIT(1)
+#define PWR_INTF_PCI_MSK       BIT(2)
+#define PWR_INTF_ALL_MSK       (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+enum pwrseq_delay_unit {
+       PWRSEQ_DELAY_US,
+       PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+       u16 offset;
+       u8 cut_msk;
+       u8 fab_msk:4;
+       u8 interface_msk:4;
+       u8 base:4;
+       u8 cmd:4;
+       u8 msk;
+       u8 value;
+};
+
+#define        GET_PWR_CFG_OFFSET(__PWR_CMD)   (__PWR_CMD.offset)
+#define        GET_PWR_CFG_CUT_MASK(__PWR_CMD) (__PWR_CMD.cut_msk)
+#define        GET_PWR_CFG_FAB_MASK(__PWR_CMD) (__PWR_CMD.fab_msk)
+#define        GET_PWR_CFG_INTF_MASK(__PWR_CMD)        (__PWR_CMD.interface_msk)
+#define        GET_PWR_CFG_BASE(__PWR_CMD)     (__PWR_CMD.base)
+#define        GET_PWR_CFG_CMD(__PWR_CMD)      (__PWR_CMD.cmd)
+#define        GET_PWR_CFG_MASK(__PWR_CMD)     (__PWR_CMD.msk)
+#define        GET_PWR_CFG_VALUE(__PWR_CMD)    (__PWR_CMD.value)
+
+bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+                             u8 fab_version, u8 interface_type,
+                             struct wlan_pwr_cfg pwrcfgcmd[]);
+
 bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
                         enum rf_pwrstate state_toset, u32 changesource);
 bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
index a98acefb8c06a3802290c9130de368b8effcab83..ee28a1a3d0100deeb0b3a50296109e814528a85e 100644 (file)
@@ -260,8 +260,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
        kfree(rate_priv);
 }
 
-static struct rate_control_ops rtl_rate_ops = {
-       .module = NULL,
+static const struct rate_control_ops rtl_rate_ops = {
        .name = "rtl_rc",
        .alloc = rtl_rate_alloc,
        .free = rtl_rate_free,
index 5b194e97f4b3947e4a4af5c0b057c9b4689f87da..a85419a37651fcdb372607acefdb22b0f690fb31 100644 (file)
@@ -5,7 +5,6 @@ rtl8188ee-objs :=               \
                led.o           \
                phy.o           \
                pwrseq.o        \
-               pwrseqcmd.o     \
                rf.o            \
                sw.o            \
                table.o         \
index a6184b6e1d57ff50bca6a4f4aa16ac2871dc295f..f8daa61cf1c32df10b6ff69428d8aa4e434f090d 100644 (file)
@@ -235,7 +235,7 @@ void rtl88e_dm_txpower_track_adjust(struct ieee80211_hw *hw,
        u8 pwr_val = 0;
        u8 cck_base = rtldm->swing_idx_cck_base;
        u8 cck_val = rtldm->swing_idx_cck;
-       u8 ofdm_base = rtldm->swing_idx_ofdm_base;
+       u8 ofdm_base = rtldm->swing_idx_ofdm_base[0];
        u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
 
        if (type == 0) {
@@ -726,7 +726,7 @@ static void rtl88e_dm_pwdb_monitor(struct ieee80211_hw *hw)
        static u64 last_rx;
        long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
 
-       if (rtlhal->oem_id == RT_CID_819x_HP) {
+       if (rtlhal->oem_id == RT_CID_819X_HP) {
                u64 cur_txok_cnt = 0;
                u64 cur_rxok_cnt = 0;
                cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok;
@@ -851,9 +851,8 @@ static void rtl88e_dm_check_edca_turbo(struct ieee80211_hw *hw)
        } else {
                if (rtlpriv->dm.current_turbo_edca) {
                        u8 tmp = AC0_BE;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_AC_PARAM,
-                                                     (u8 *)(&tmp));
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+                                                     &tmp);
                        rtlpriv->dm.current_turbo_edca = false;
                }
        }
@@ -912,7 +911,7 @@ static void rtl88e_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
        for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
                if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
                        ofdm_old[0] = (u8) i;
-                       rtldm->swing_idx_ofdm_base = (u8)i;
+                       rtldm->swing_idx_ofdm_base[0] = (u8)i;
                        RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
                                 "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
                                 ROFDM0_XATXIQIMBAL,
index 557bc5b8327eef6d9b998fcc90e52270d84e6077..4f9376ad473966c9b19c986080e73e047a40adde 100644 (file)
@@ -119,7 +119,7 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
                             enum version_8188e version, u8 *buffer, u32 size)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 *buf_ptr = (u8 *)buffer;
+       u8 *buf_ptr = buffer;
        u32 page_no, remain;
        u32 page, offset;
 
@@ -213,7 +213,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                return 1;
 
        pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
-       pfwdata = (u8 *)rtlhal->pfirmware;
+       pfwdata = rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
        RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                 "normal Firmware SIZE %d\n", fwsize);
index e06971be7df77e047e8d869d046a126d0e987d14..94cd9df98381008e53f6ecb60cbcb7f56a6aef1b 100644 (file)
@@ -41,7 +41,6 @@
 #include "fw.h"
 #include "led.h"
 #include "hw.h"
-#include "pwrseqcmd.h"
 #include "pwrseq.h"
 
 #define LLT_CONFIG             5
@@ -148,8 +147,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
        }
 
        if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
-               rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
-                                             (u8 *)(&rpwm_val));
+               rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
                if (FW_PS_IS_ACK(rpwm_val)) {
                        isr_regaddr = REG_HISR;
                        content = rtl_read_dword(rtlpriv, isr_regaddr);
@@ -226,7 +224,7 @@ static void _rtl88ee_set_fw_clock_off(struct ieee80211_hw *hw,
                        rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
                        rtl_write_word(rtlpriv, REG_HISR, 0x0100);
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
-                                                     (u8 *)(&rpwm_val));
+                                                     &rpwm_val);
                        spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
                        rtlhal->fw_clk_change_in_progress = false;
                        spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
@@ -274,15 +272,14 @@ static void _rtl88ee_fwlps_leave(struct ieee80211_hw *hw)
                _rtl88ee_set_fw_clock_on(hw, rpwm_val, false);
                rtlhal->allow_sw_to_change_hwclc = false;
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
-                                             (u8 *)(&fw_pwrmode));
+                                             &fw_pwrmode);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
                                              (u8 *)(&fw_current_inps));
        } else {
                rpwm_val = FW_PS_STATE_ALL_ON_88E;      /* RF on */
-               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
-                                             (u8 *)(&rpwm_val));
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
-                                             (u8 *)(&fw_pwrmode));
+                                             &fw_pwrmode);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
                                              (u8 *)(&fw_current_inps));
        }
@@ -301,7 +298,7 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
                                              (u8 *)(&fw_current_inps));
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
-                                             (u8 *)(&ppsc->fwctrl_psmode));
+                                             &ppsc->fwctrl_psmode);
                rtlhal->allow_sw_to_change_hwclc = true;
                _rtl88ee_set_fw_clock_off(hw, rpwm_val);
        } else {
@@ -309,9 +306,8 @@ static void _rtl88ee_fwlps_enter(struct ieee80211_hw *hw)
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
                                              (u8 *)(&fw_current_inps));
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
-                                             (u8 *)(&ppsc->fwctrl_psmode));
-               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
-                                             (u8 *)(&rpwm_val));
+                                             &ppsc->fwctrl_psmode);
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
        }
 }
 
@@ -420,12 +416,12 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
 
                for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
-                                                     (u8 *)(&e_aci));
+                                                     &e_aci);
                }
                break; }
        case HW_VAR_ACK_PREAMBLE:{
                u8 reg_tmp;
-               u8 short_preamble = (bool) (*(u8 *)val);
+               u8 short_preamble = (bool)*val;
                reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL+2);
                if (short_preamble) {
                        reg_tmp |= 0x02;
@@ -436,13 +432,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break; }
        case HW_VAR_WPA_CONFIG:
-               rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *)val));
+               rtl_write_byte(rtlpriv, REG_SECCFG, *val);
                break;
        case HW_VAR_AMPDU_MIN_SPACE:{
                u8 min_spacing_to_set;
                u8 sec_min_space;
 
-               min_spacing_to_set = *((u8 *)val);
+               min_spacing_to_set = *val;
                if (min_spacing_to_set <= 7) {
                        sec_min_space = 0;
 
@@ -465,7 +461,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HW_VAR_SHORTGI_DENSITY:{
                u8 density_to_set;
 
-               density_to_set = *((u8 *)val);
+               density_to_set = *val;
                mac->min_space_cfg |= (density_to_set << 3);
 
                RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -483,7 +479,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
 
                reg = regtoset_normal;
 
-               factor = *((u8 *)val);
+               factor = *val;
                if (factor <= 3) {
                        factor = (1 << (factor + 2));
                        if (factor > 0xf)
@@ -506,15 +502,15 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break; }
        case HW_VAR_AC_PARAM:{
-               u8 e_aci = *((u8 *)val);
+               u8 e_aci = *val;
                rtl88e_dm_init_edca_turbo(hw);
 
-               if (rtlpci->acm_method != eAcmWay2_SW)
+               if (rtlpci->acm_method != EACMWAY2_SW)
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
-                                                     (u8 *)(&e_aci));
+                                                     &e_aci);
                break; }
        case HW_VAR_ACM_CTRL:{
-               u8 e_aci = *((u8 *)val);
+               u8 e_aci = *val;
                union aci_aifsn *p_aci_aifsn =
                    (union aci_aifsn *)(&(mac->ac[0].aifs));
                u8 acm = p_aci_aifsn->f.acm;
@@ -567,7 +563,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                rtlpci->receive_config = ((u32 *)(val))[0];
                break;
        case HW_VAR_RETRY_LIMIT:{
-               u8 retry_limit = ((u8 *)(val))[0];
+               u8 retry_limit = *val;
 
                rtl_write_word(rtlpriv, REG_RL,
                               retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -580,7 +576,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                rtlefuse->efuse_usedbytes = *((u16 *)val);
                break;
        case HW_VAR_EFUSE_USAGE:
-               rtlefuse->efuse_usedpercentage = *((u8 *)val);
+               rtlefuse->efuse_usedpercentage = *val;
                break;
        case HW_VAR_IO_CMD:
                rtl88e_phy_set_io_cmd(hw, (*(enum io_type *)val));
@@ -592,15 +588,13 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                udelay(1);
 
                if (rpwm_val & BIT(7)) {
-                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
-                                      (*(u8 *)val));
+                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
                } else {
-                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
-                                      ((*(u8 *)val) | BIT(7)));
+                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
                }
                break; }
        case HW_VAR_H2C_FW_PWRMODE:
-               rtl88e_set_fw_pwrmode_cmd(hw, (*(u8 *)val));
+               rtl88e_set_fw_pwrmode_cmd(hw, *val);
                break;
        case HW_VAR_FW_PSMODE_STATUS:
                ppsc->fw_current_inpsmode = *((bool *)val);
@@ -617,7 +611,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        _rtl88ee_fwlps_leave(hw);
                 break; }
        case HW_VAR_H2C_FW_JOINBSSRPT:{
-               u8 mstatus = (*(u8 *)val);
+               u8 mstatus = *val;
                u8 tmp, tmp_reg422, uval;
                u8 count = 0, dlbcn_count = 0;
                bool recover = false;
@@ -668,10 +662,10 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        }
                        rtl_write_byte(rtlpriv, REG_CR + 1, (tmp & ~(BIT(0))));
                }
-               rtl88e_set_fw_joinbss_report_cmd(hw, (*(u8 *)val));
+               rtl88e_set_fw_joinbss_report_cmd(hw, *val);
                break; }
        case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
-               rtl88e_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+               rtl88e_set_p2p_ps_offload_cmd(hw, *val);
                break;
        case HW_VAR_AID:{
                u16 u2btmp;
@@ -681,7 +675,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                               mac->assoc_id));
                break; }
        case HW_VAR_CORRECT_TSF:{
-               u8 btype_ibss = ((u8 *)(val))[0];
+               u8 btype_ibss = *val;
 
                if (btype_ibss == true)
                        _rtl88ee_stop_tx_beacon(hw);
@@ -815,11 +809,11 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
 
        rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
        /* HW Power on sequence */
-       if (!rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
-                                       PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
-                                       Rtl8188E_NIC_ENABLE_FLOW)) {
+       if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
+                                     PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
+                                     Rtl8188E_NIC_ENABLE_FLOW)) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
-                        "init MAC Fail as rtl88_hal_pwrseqcmdparsing\n");
+                        "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
                return false;
        }
 
@@ -1025,9 +1019,20 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
        bool rtstatus = true;
        int err = 0;
        u8 tmp_u1b, u1byte;
+       unsigned long flags;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Rtl8188EE hw init\n");
        rtlpriv->rtlhal.being_init_adapter = true;
+       /* As this function can take a very long time (up to 350 ms)
+        * and can be called with irqs disabled, reenable the irqs
+        * to let the other devices continue being serviced.
+        *
+        * It is safe doing so since our own interrupts will only be enabled
+        * in a subsequent step.
+        */
+       local_save_flags(flags);
+       local_irq_enable();
+
        rtlpriv->intf_ops->disable_aspm(hw);
 
        tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CLKR+1);
@@ -1043,7 +1048,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
        if (rtstatus != true) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
                err = 1;
-               return err;
+               goto exit;
        }
 
        err = rtl88e_download_fw(hw, false);
@@ -1051,8 +1056,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
                         "Failed to download FW. Init HW without FW now..\n");
                err = 1;
-               rtlhal->fw_ready = false;
-               return err;
+               goto exit;
        } else {
                rtlhal->fw_ready = true;
        }
@@ -1097,7 +1101,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
        if (ppsc->rfpwr_state == ERFON) {
                if ((rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV) ||
                    ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) &&
-                   (rtlhal->oem_id == RT_CID_819x_HP))) {
+                   (rtlhal->oem_id == RT_CID_819X_HP))) {
                        rtl88e_phy_set_rfpath_switch(hw, true);
                        rtlpriv->dm.fat_table.rx_idle_ant = MAIN_ANT;
                } else {
@@ -1135,10 +1139,12 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
        }
        rtl_write_byte(rtlpriv, REG_NAV_CTRL+2,  ((30000+127)/128));
        rtl88e_dm_init(hw);
+exit:
+       local_irq_restore(flags);
        rtlpriv->rtlhal.being_init_adapter = false;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "end of Rtl8188EE hw init %x\n",
                 err);
-       return 0;
+       return err;
 }
 
 static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
@@ -1235,12 +1241,13 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
 void rtl88ee_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       u32 reg_rcr = rtlpci->receive_config;
+       u32 reg_rcr;
 
        if (rtlpriv->psc.rfpwr_state != ERFON)
                return;
 
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
        if (check_bssid == true) {
                reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1345,9 +1352,9 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
        }
        rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+1, 0xFF);
 
-       rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
-                                  PWR_INTF_PCI_MSK,
-                                  Rtl8188E_NIC_LPS_ENTER_FLOW);
+       rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+                                PWR_INTF_PCI_MSK,
+                                Rtl8188E_NIC_LPS_ENTER_FLOW);
 
        rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
 
@@ -1361,8 +1368,8 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
        u1b_tmp = rtl_read_byte(rtlpriv, REG_32K_CTRL);
        rtl_write_byte(rtlpriv, REG_32K_CTRL, (u1b_tmp & (~BIT(0))));
 
-       rtl88_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
-                                  PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
+       rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+                                PWR_INTF_PCI_MSK, Rtl8188E_NIC_DISABLE_FLOW);
 
        u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL+1);
        rtl_write_byte(rtlpriv, REG_RSV_CTRL+1, (u1b_tmp & (~BIT(3))));
@@ -1816,7 +1823,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
        /*customer ID*/
-       rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+       rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
        if (rtlefuse->eeprom_oemid == 0xFF)
                rtlefuse->eeprom_oemid = 0;
 
@@ -1833,7 +1840,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                 "dev_addr: %pM\n", rtlefuse->dev_addr);
        /*channel plan */
-       rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+       rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
        /* set channel paln to world wide 13 */
        rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
        /*tx power*/
@@ -1845,7 +1852,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
                                                 rtlefuse->autoload_failflag,
                                                 hwinfo);
        /*board type*/
-       rtlefuse->board_type = (((*(u8 *)&hwinfo[jj]) & 0xE0) >> 5);
+       rtlefuse->board_type = (hwinfo[jj] & 0xE0) >> 5;
        /*Wake on wlan*/
        rtlefuse->wowlan_enable = ((hwinfo[kk] & 0x40) >> 6);
        /*parse xtal*/
@@ -1872,15 +1879,15 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
                case EEPROM_CID_DEFAULT:
                        if (rtlefuse->eeprom_did == 0x8179) {
                                if (rtlefuse->eeprom_svid == 0x1025) {
-                                       rtlhal->oem_id = RT_CID_819x_Acer;
+                                       rtlhal->oem_id = RT_CID_819X_ACER;
                                } else if ((rtlefuse->eeprom_svid == 0x10EC &&
                                            rtlefuse->eeprom_smid == 0x0179) ||
                                            (rtlefuse->eeprom_svid == 0x17AA &&
                                            rtlefuse->eeprom_smid == 0x0179)) {
-                                       rtlhal->oem_id = RT_CID_819x_Lenovo;
+                                       rtlhal->oem_id = RT_CID_819X_LENOVO;
                                } else if (rtlefuse->eeprom_svid == 0x103c &&
                                         rtlefuse->eeprom_smid == 0x197d) {
-                                       rtlhal->oem_id = RT_CID_819x_HP;
+                                       rtlhal->oem_id = RT_CID_819X_HP;
                                } else {
                                        rtlhal->oem_id = RT_CID_DEFAULT;
                                }
@@ -1892,7 +1899,7 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
                        rtlhal->oem_id = RT_CID_TOSHIBA;
                        break;
                case EEPROM_CID_QMI:
-                       rtlhal->oem_id = RT_CID_819x_QMI;
+                       rtlhal->oem_id = RT_CID_819X_QMI;
                        break;
                case EEPROM_CID_WHQL:
                default:
@@ -1911,14 +1918,14 @@ static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
        pcipriv->ledctl.led_opendrain = true;
 
        switch (rtlhal->oem_id) {
-       case RT_CID_819x_HP:
+       case RT_CID_819X_HP:
                pcipriv->ledctl.led_opendrain = true;
                break;
-       case RT_CID_819x_Lenovo:
+       case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
        case RT_CID_TOSHIBA:
        case RT_CID_CCX:
-       case RT_CID_819x_Acer:
+       case RT_CID_819X_ACER:
        case RT_CID_WHQL:
        default:
                break;
@@ -2211,8 +2218,7 @@ void rtl88ee_update_channel_access_setting(struct ieee80211_hw *hw)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        u16 sifs_timer;
 
-       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
-                                     (u8 *)&mac->slot_time);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
        if (!mac->ht_enable)
                sifs_timer = 0x0a0a;
        else
index d67f9c731cc4600e57d774e7125d80c945e196b6..1cd6c16d597ee40a01e3b529ea4faca80f22c773 100644 (file)
@@ -29,6 +29,7 @@
 
 #include "../wifi.h"
 #include "../pci.h"
+#include "../core.h"
 #include "../ps.h"
 #include "reg.h"
 #include "def.h"
@@ -151,18 +152,7 @@ static bool config_bb_with_pgheader(struct ieee80211_hw *hw,
                        v2 = table_pg[i + 1];
 
                        if (v1 < 0xcdcdcdcd) {
-                               if (table_pg[i] == 0xfe)
-                                       mdelay(50);
-                               else if (table_pg[i] == 0xfd)
-                                       mdelay(5);
-                               else if (table_pg[i] == 0xfc)
-                                       mdelay(1);
-                               else if (table_pg[i] == 0xfb)
-                                       udelay(50);
-                               else if (table_pg[i] == 0xfa)
-                                       udelay(5);
-                               else if (table_pg[i] == 0xf9)
-                                       udelay(1);
+                               rtl_addr_delay(table_pg[i]);
 
                                store_pwrindex_offset(hw, table_pg[i],
                                                      table_pg[i + 1],
@@ -672,24 +662,9 @@ static void _rtl8188e_config_rf_reg(struct ieee80211_hw *hw,
                                    u32 addr, u32 data, enum radio_path rfpath,
                                    u32 regaddr)
 {
-       if (addr == 0xffe) {
-               mdelay(50);
-       } else if (addr == 0xfd) {
-               mdelay(5);
-       } else if (addr == 0xfc) {
-               mdelay(1);
-       } else if (addr == 0xfb) {
-               udelay(50);
-       } else if (addr == 0xfa) {
-               udelay(5);
-       } else if (addr == 0xf9) {
-               udelay(1);
-       } else {
-               rtl_set_rfreg(hw, rfpath, regaddr,
-                             RFREG_OFFSET_MASK,
-                             data);
-               udelay(1);
-       }
+       rtl_rfreg_delay(hw, rfpath, regaddr,
+                       RFREG_OFFSET_MASK,
+                       data);
 }
 
 static void rtl88_config_s(struct ieee80211_hw *hw,
@@ -702,28 +677,6 @@ static void rtl88_config_s(struct ieee80211_hw *hw,
                                addr | maskforphyset);
 }
 
-static void _rtl8188e_config_bb_reg(struct ieee80211_hw *hw,
-                                   u32 addr, u32 data)
-{
-       if (addr == 0xfe) {
-               mdelay(50);
-       } else if (addr == 0xfd) {
-               mdelay(5);
-       } else if (addr == 0xfc) {
-               mdelay(1);
-       } else if (addr == 0xfb) {
-               udelay(50);
-       } else if (addr == 0xfa) {
-               udelay(5);
-       } else if (addr == 0xf9) {
-               udelay(1);
-       } else {
-               rtl_set_bbreg(hw, addr, MASKDWORD, data);
-               udelay(1);
-       }
-}
-
-
 #define NEXT_PAIR(v1, v2, i)                           \
        do {                                            \
                i += 2; v1 = array_table[i];            \
@@ -795,7 +748,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
                v1 = array_table[i];
                v2 = array_table[i + 1];
                if (v1 < 0xcdcdcdcd) {
-                       _rtl8188e_config_bb_reg(hw, v1, v2);
+                       rtl_bb_delay(hw, v1, v2);
                } else {/*This line is the start line of branch.*/
                        if (!check_cond(hw, array_table[i])) {
                                /*Discard the following (offset, data) pairs*/
@@ -811,7 +764,7 @@ static void set_baseband_phy_config(struct ieee80211_hw *hw)
                                while (v2 != 0xDEAD &&
                                       v2 != 0xCDEF &&
                                       v2 != 0xCDCD && i < arraylen - 2) {
-                                       _rtl8188e_config_bb_reg(hw, v1, v2);
+                                       rtl_bb_delay(hw, v1, v2);
                                        NEXT_PAIR(v1, v2, i);
                                }
 
@@ -1002,7 +955,7 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                        }
                }
 
-               if (rtlhal->oem_id == RT_CID_819x_HP)
+               if (rtlhal->oem_id == RT_CID_819X_HP)
                        rtl88_config_s(hw, 0x52, 0x7E4BD);
 
                break;
index 028ec6dd52b4920c7e8db9ad2c2a98e92f509c43..32e135ab9a63fcda64369ea3b0776c0e13175d68 100644 (file)
@@ -30,7 +30,6 @@
 #ifndef __RTL8723E_PWRSEQ_H__
 #define __RTL8723E_PWRSEQ_H__
 
-#include "pwrseqcmd.h"
 /*
        Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
        There are 6 HW Power States:
index d849abf7d94a864980de709265b4af238773ba4f..7af85cfa8f8706f0c67c0bf2082d11889fa662d9 100644 (file)
 #define        BWORD1                                  0xc
 #define        BWORD                                   0xf
 
-#define        MASKBYTE0                               0xff
-#define        MASKBYTE1                               0xff00
-#define        MASKBYTE2                               0xff0000
-#define        MASKBYTE3                               0xff000000
-#define        MASKHWORD                               0xffff0000
-#define        MASKLWORD                               0x0000ffff
-#define        MASKDWORD                               0xffffffff
-#define        MASK12BITS                              0xfff
-#define        MASKH4BITS                              0xf0000000
-#define MASKOFDM_D                             0xffc00000
-#define        MASKCCK                                 0x3f3f3f3f
-
-#define        MASK4BITS                               0x0f
-#define        MASK20BITS                              0xfffff
-#define RFREG_OFFSET_MASK                      0xfffff
-
 #define        BENABLE                                 0x1
 #define        BDISABLE                                0x0
 
index 347af1e4f438e57cf2c37b8975169a7c92941681..1b4101bf9974e8243124f70f40e295e841a3f0a1 100644 (file)
@@ -93,6 +93,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
        u8 tid;
 
        rtl8188ee_bt_reg_init(hw);
+       rtlpci->msi_support = true;
 
        rtlpriv->dm.dm_initialgain_enable = 1;
        rtlpriv->dm.dm_flag = 0;
index aece6c9cccf1b50febc16049f4fd962c5c834aa7..06ef47cd62038cc9695078441d545be0ca1b9348 100644 (file)
@@ -452,7 +452,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
                        /* During testing, hdr was NULL */
                        return false;
                }
-               if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+               if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
                    (ieee80211_has_protected(hdr->frame_control)))
                        rx_status->flag &= ~RX_FLAG_DECRYPTED;
                else
@@ -489,16 +489,15 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
 
 void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info,
-                         struct ieee80211_sta *sta,
-                         struct sk_buff *skb,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta, struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-       u8 *pdesc = (u8 *)pdesc_tx;
+       u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
        unsigned int buf_len = 0;
@@ -717,7 +716,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
 
        SET_TX_DESC_OWN(pdesc, 1);
 
-       SET_TX_DESC_PKT_SIZE((u8 *)pdesc, (u16)(skb->len));
+       SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
 
        SET_TX_DESC_FIRST_SEG(pdesc, 1);
        SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -734,7 +733,8 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
                      pdesc, TX_DESC_SIZE);
 }
 
-void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val)
 {
        if (istx == true) {
                switch (desc_name) {
index 21ca33a7c770b83e338bd735084d417444810efc..8c2609412d2cc26d92b7efcccf85ef0281def89d 100644 (file)
@@ -777,15 +777,15 @@ struct rx_desc_88e {
 
 void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info,
-                         struct ieee80211_sta *sta,
-                         struct sk_buff *skb,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+                         struct ieee80211_sta *sta, struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
 bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
                           struct rtl_stats *status,
                           struct ieee80211_rx_status *rx_status,
                           u8 *pdesc, struct sk_buff *skb);
-void rtl88ee_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val);
 u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name);
 void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
 void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
index 2eb0b38384dd7cef1323f5817be961954d31f514..55adf043aef7e250759d9c993b224217ad974d37 100644 (file)
@@ -319,7 +319,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        u8 e_aci = *(val);
                        rtl92c_dm_init_edca_turbo(hw);
 
-                       if (rtlpci->acm_method != eAcmWay2_SW)
+                       if (rtlpci->acm_method != EACMWAY2_SW)
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                              HW_VAR_ACM_CTRL,
                                                              (&e_aci));
@@ -476,7 +476,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
-               rtl92c_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+               rtl92c_set_p2p_ps_offload_cmd(hw, *val);
                break;
        case HW_VAR_AID:{
                        u16 u2btmp;
@@ -521,30 +521,32 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                                (u8 *)(&fw_current_inps));
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                HW_VAR_H2C_FW_PWRMODE,
-                                               (u8 *)(&ppsc->fwctrl_psmode));
+                                               &ppsc->fwctrl_psmode);
 
                                rtlpriv->cfg->ops->set_hw_reg(hw,
-                                               HW_VAR_SET_RPWM,
-                                               (u8 *)(&rpwm_val));
+                                                             HW_VAR_SET_RPWM,
+                                                             &rpwm_val);
                        } else {
                                rpwm_val = 0x0C;        /* RF on */
                                fw_pwrmode = FW_PS_ACTIVE_MODE;
                                fw_current_inps = false;
                                rtlpriv->cfg->ops->set_hw_reg(hw,
-                                               HW_VAR_SET_RPWM,
-                                               (u8 *)(&rpwm_val));
+                                                             HW_VAR_SET_RPWM,
+                                                             &rpwm_val);
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                HW_VAR_H2C_FW_PWRMODE,
-                                               (u8 *)(&fw_pwrmode));
+                                               &fw_pwrmode);
 
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                HW_VAR_FW_PSMODE_STATUS,
                                                (u8 *)(&fw_current_inps));
                        }
                break; }
+       case HW_VAR_KEEP_ALIVE:
+               break;
        default:
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case not processed\n");
+                        "switch case %d not processed\n", variable);
                break;
        }
 }
@@ -1214,11 +1216,13 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
 void rtl92ce_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+       u32 reg_rcr;
 
        if (rtlpriv->psc.rfpwr_state != ERFON)
                return;
 
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
        if (check_bssid) {
                reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1734,7 +1738,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
                        if (rtlefuse->eeprom_did == 0x8176) {
                                if ((rtlefuse->eeprom_svid == 0x103C &&
                                     rtlefuse->eeprom_smid == 0x1629))
-                                       rtlhal->oem_id = RT_CID_819x_HP;
+                                       rtlhal->oem_id = RT_CID_819X_HP;
                                else
                                        rtlhal->oem_id = RT_CID_DEFAULT;
                        } else {
@@ -1745,7 +1749,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
                        rtlhal->oem_id = RT_CID_TOSHIBA;
                        break;
                case EEPROM_CID_QMI:
-                       rtlhal->oem_id = RT_CID_819x_QMI;
+                       rtlhal->oem_id = RT_CID_819X_QMI;
                        break;
                case EEPROM_CID_WHQL:
                default:
@@ -1764,14 +1768,14 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
        switch (rtlhal->oem_id) {
-       case RT_CID_819x_HP:
+       case RT_CID_819X_HP:
                pcipriv->ledctl.led_opendrain = true;
                break;
-       case RT_CID_819x_Lenovo:
+       case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
        case RT_CID_TOSHIBA:
        case RT_CID_CCX:
-       case RT_CID_819x_Acer:
+       case RT_CID_819X_ACER:
        case RT_CID_WHQL:
        default:
                break;
index 73262ca3864b2c219a14e3c8085cbdcb32fc32c3..98b22303c84d1849da10221e80cfce7d87568c5a 100644 (file)
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../pci.h"
 #include "../ps.h"
+#include "../core.h"
 #include "reg.h"
 #include "def.h"
 #include "hw.h"
@@ -198,18 +199,7 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
        }
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_reg_arraylen; i = i + 2) {
-                       if (phy_regarray_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray_table[i]);
                        rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
                                      phy_regarray_table[i + 1]);
                        udelay(1);
@@ -245,18 +235,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
 
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
-                       if (phy_regarray_table_pg[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table_pg[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray_table_pg[i]);
 
                        _rtl92c_store_pwrIndex_diffrate_offset(hw,
                                               phy_regarray_table_pg[i],
@@ -305,46 +284,16 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        switch (rfpath) {
        case RF90_PATH_A:
                for (i = 0; i < radioa_arraylen; i = i + 2) {
-                       if (radioa_array_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (radioa_array_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (radioa_array_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (radioa_array_table[i] == 0xfb)
-                               udelay(50);
-                       else if (radioa_array_table[i] == 0xfa)
-                               udelay(5);
-                       else if (radioa_array_table[i] == 0xf9)
-                               udelay(1);
-                       else {
-                               rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
-                                             RFREG_OFFSET_MASK,
-                                             radioa_array_table[i + 1]);
-                               udelay(1);
-                       }
+                       rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+                                       RFREG_OFFSET_MASK,
+                                       radioa_array_table[i + 1]);
                }
                break;
        case RF90_PATH_B:
                for (i = 0; i < radiob_arraylen; i = i + 2) {
-                       if (radiob_array_table[i] == 0xfe) {
-                               mdelay(50);
-                       } else if (radiob_array_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (radiob_array_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (radiob_array_table[i] == 0xfb)
-                               udelay(50);
-                       else if (radiob_array_table[i] == 0xfa)
-                               udelay(5);
-                       else if (radiob_array_table[i] == 0xf9)
-                               udelay(1);
-                       else {
-                               rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
-                                             RFREG_OFFSET_MASK,
-                                             radiob_array_table[i + 1]);
-                               udelay(1);
-                       }
+                       rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+                                       RFREG_OFFSET_MASK,
+                                       radiob_array_table[i + 1]);
                }
                break;
        case RF90_PATH_C:
@@ -355,6 +304,8 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "switch case not processed\n");
                break;
+       default:
+               break;
        }
        return true;
 }
index 8922ecb47ad245cdb7d1b759f44c558f97e52173..ed703a1b3b7c1c54490efb803e804000a2d43342 100644 (file)
 #define        BWORD1                                  0xc
 #define        BWORD                                   0xf
 
-#define        MASKBYTE0                               0xff
-#define        MASKBYTE1                               0xff00
-#define        MASKBYTE2                               0xff0000
-#define        MASKBYTE3                               0xff000000
-#define        MASKHWORD                               0xffff0000
-#define        MASKLWORD                               0x0000ffff
-#define        MASKDWORD                               0xffffffff
-#define        MASK12BITS                              0xfff
-#define        MASKH4BITS                              0xf0000000
-#define MASKOFDM_D                             0xffc00000
-#define        MASKCCK                                 0x3f3f3f3f
-
-#define        MASK4BITS                               0x0f
-#define        MASK20BITS                              0xfffff
-#define RFREG_OFFSET_MASK                      0xfffff
-
 #define        BENABLE                                 0x1
 #define        BDISABLE                                0x0
 
index 52abf0a862fa70f26ec26ff62d2017f0a11ce5c9..8f04817cb7ec810bc70dea859895788fd9259687 100644 (file)
@@ -393,7 +393,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
                        /* In testing, hdr was NULL here */
                        return false;
                }
-               if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+               if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
                    (ieee80211_has_protected(hdr->frame_control)))
                        rx_status->flag &= ~RX_FLAG_DECRYPTED;
                else
@@ -426,7 +426,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
 
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *tcb_desc)
@@ -666,7 +666,8 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
                      "H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
 }
 
-void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val)
 {
        if (istx) {
                switch (desc_name) {
index a7cdd514cb2e2bfd6a86aa629a997a3d1c5f5db8..9a39ec4204dda77ef6e268be0396d3399ffc33f8 100644 (file)
@@ -711,8 +711,8 @@ struct rx_desc_92c {
 } __packed;
 
 void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
-                         struct ieee80211_hdr *hdr,
-                         u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_hdr *hdr, u8 *pdesc,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
@@ -720,7 +720,8 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
                           struct rtl_stats *stats,
                           struct ieee80211_rx_status *rx_status,
                           u8 *pdesc, struct sk_buff *skb);
-void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val);
 u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
 void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
 void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
index 468bf73cc883f41bf26222cc7aa52e17c5ed96a3..68b5c7e92cfbc2c6a76580a2d1dbb3297730c64f 100644 (file)
@@ -394,7 +394,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
                        if (rtlefuse->eeprom_did == 0x8176) {
                                if ((rtlefuse->eeprom_svid == 0x103C &&
                                     rtlefuse->eeprom_smid == 0x1629))
-                                       rtlhal->oem_id = RT_CID_819x_HP;
+                                       rtlhal->oem_id = RT_CID_819X_HP;
                                else
                                        rtlhal->oem_id = RT_CID_DEFAULT;
                        } else {
@@ -405,7 +405,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
                        rtlhal->oem_id = RT_CID_TOSHIBA;
                        break;
                case EEPROM_CID_QMI:
-                       rtlhal->oem_id = RT_CID_819x_QMI;
+                       rtlhal->oem_id = RT_CID_819X_QMI;
                        break;
                case EEPROM_CID_WHQL:
                default:
@@ -423,14 +423,14 @@ static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
        switch (rtlhal->oem_id) {
-       case RT_CID_819x_HP:
+       case RT_CID_819X_HP:
                usb_priv->ledctl.led_opendrain = true;
                break;
-       case RT_CID_819x_Lenovo:
+       case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
        case RT_CID_TOSHIBA:
        case RT_CID_CCX:
-       case RT_CID_819x_Acer:
+       case RT_CID_819X_ACER:
        case RT_CID_WHQL:
        default:
                break;
@@ -985,6 +985,17 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        int err = 0;
        static bool iqk_initialized;
+       unsigned long flags;
+
+       /* As this function can take a very long time (up to 350 ms)
+        * and can be called with irqs disabled, reenable the irqs
+        * to let the other devices continue being serviced.
+        *
+        * It is safe doing so since our own interrupts will only be enabled
+        * in a subsequent step.
+        */
+       local_save_flags(flags);
+       local_irq_enable();
 
        rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
        err = _rtl92cu_init_mac(hw);
@@ -997,7 +1008,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
                         "Failed to download FW. Init HW without FW now..\n");
                err = 1;
-               return err;
+               goto exit;
        }
        rtlhal->last_hmeboxnum = 0; /* h2c */
        _rtl92cu_phy_param_tab_init(hw);
@@ -1034,6 +1045,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
        _InitPABias(hw);
        _update_mac_setting(hw);
        rtl92c_dm_init(hw);
+exit:
+       local_irq_restore(flags);
        return err;
 }
 
@@ -1379,11 +1392,13 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+       u32 reg_rcr;
 
        if (rtlpriv->psc.rfpwr_state != ERFON)
                return;
 
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
        if (check_bssid) {
                u8 tmp;
                if (IS_NORMAL_CHIP(rtlhal->version)) {
@@ -1795,7 +1810,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                          e_aci);
                                break;
                        }
-                       if (rtlusb->acm_method != eAcmWay2_SW)
+                       if (rtlusb->acm_method != EACMWAY2_SW)
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                         HW_VAR_ACM_CTRL, &e_aci);
                        break;
index 0c09240eadccdee5fe6fd6ab266c561d7019684b..9831ff1128ca93385377a302187ee5c0d887b56d 100644 (file)
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../pci.h"
 #include "../ps.h"
+#include "../core.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -188,18 +189,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
        }
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_reg_arraylen; i = i + 2) {
-                       if (phy_regarray_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray_table[i]);
                        rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
                                      phy_regarray_table[i + 1]);
                        udelay(1);
@@ -236,18 +226,7 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
        phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata;
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
-                       if (phy_regarray_table_pg[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table_pg[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray_table_pg[i]);
                        _rtl92c_store_pwrIndex_diffrate_offset(hw,
                                                  phy_regarray_table_pg[i],
                                                  phy_regarray_table_pg[i + 1],
@@ -294,46 +273,16 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        switch (rfpath) {
        case RF90_PATH_A:
                for (i = 0; i < radioa_arraylen; i = i + 2) {
-                       if (radioa_array_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (radioa_array_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (radioa_array_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (radioa_array_table[i] == 0xfb)
-                               udelay(50);
-                       else if (radioa_array_table[i] == 0xfa)
-                               udelay(5);
-                       else if (radioa_array_table[i] == 0xf9)
-                               udelay(1);
-                       else {
-                               rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
-                                             RFREG_OFFSET_MASK,
-                                             radioa_array_table[i + 1]);
-                               udelay(1);
-                       }
+                       rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+                                       RFREG_OFFSET_MASK,
+                                       radioa_array_table[i + 1]);
                }
                break;
        case RF90_PATH_B:
                for (i = 0; i < radiob_arraylen; i = i + 2) {
-                       if (radiob_array_table[i] == 0xfe) {
-                               mdelay(50);
-                       } else if (radiob_array_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (radiob_array_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (radiob_array_table[i] == 0xfb)
-                               udelay(50);
-                       else if (radiob_array_table[i] == 0xfa)
-                               udelay(5);
-                       else if (radiob_array_table[i] == 0xf9)
-                               udelay(1);
-                       else {
-                               rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
-                                             RFREG_OFFSET_MASK,
-                                             radiob_array_table[i + 1]);
-                               udelay(1);
-                       }
+                       rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+                                       RFREG_OFFSET_MASK,
+                                       radiob_array_table[i + 1]);
                }
                break;
        case RF90_PATH_C:
@@ -344,6 +293,8 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "switch case not processed\n");
                break;
+       default:
+               break;
        }
        return true;
 }
index 1bc21ccfa71b85f671afebb8d2826e43be45f9e9..035e0dc3922caf99311cb42c3dadea8e255c587c 100644 (file)
@@ -495,7 +495,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
 
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff *skb,
                          u8 queue_index,
index 725c53accc5839bbd30af97e753ab6b8d040ee48..fd8051dcd98a0295c4838750c6e4a3aded6920cb 100644 (file)
@@ -420,7 +420,7 @@ struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *,
                                           struct sk_buff_head *);
 void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff *skb,
                          u8 queue_index,
index 7908e1c85819409091abd71ba5b57595947ff909..304c443b89b261b3a633f276962e35ef536998f7 100644 (file)
@@ -194,15 +194,15 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
        rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
        rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
 
-       ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, BMASKDWORD);
+       ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
        falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
        falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, BMASKDWORD);
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
        falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, BMASKDWORD);
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
        falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
        falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
-       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, BMASKDWORD);
+       ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
        falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
        falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
                                      falsealm_cnt->cnt_rate_illegal +
@@ -214,9 +214,9 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
        if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
                /* hold cck counter */
                rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
-               ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, BMASKBYTE0);
+               ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
                falsealm_cnt->cnt_cck_fail = ret_value;
-               ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, BMASKBYTE3);
+               ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
                falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
                rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
        } else {
@@ -331,11 +331,11 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
        if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
                if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
                        rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
-                       rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
+                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
                        rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
                } else {
                        rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
-                       rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
+                       rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
                        rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
                }
                de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
@@ -722,7 +722,7 @@ static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
        RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
                 "===> Rx Gain %x\n", u4tmp);
        for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
-               rtl_set_rfreg(hw, i, 0x3C, BRFREGOFFSETMASK,
+               rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
                              (rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
 }
 
@@ -737,7 +737,7 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
        /* Query CCK default setting From 0xa24 */
        rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
        temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
-                                BMASKDWORD) & BMASKCCK;
+                                MASKDWORD) & MASKCCK;
        rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
        for (i = 0; i < CCK_TABLE_LENGTH; i++) {
                if (rtlpriv->dm.cck_inch14) {
@@ -896,9 +896,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
                rf = 1;
        if (thermalvalue) {
                ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
-                                     BMASKDWORD) & BMASKOFDM_D;
+                                     MASKDWORD) & MASKOFDM_D;
                for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
-                       if (ele_d == (ofdmswing_table[i] & BMASKOFDM_D)) {
+                       if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
                                ofdm_index_old[0] = (u8) i;
 
                                RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
@@ -910,10 +910,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
                }
                if (is2t) {
                        ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
-                                             BMASKDWORD) & BMASKOFDM_D;
+                                             MASKDWORD) & MASKOFDM_D;
                        for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
                                if (ele_d ==
-                                   (ofdmswing_table[i] & BMASKOFDM_D)) {
+                                   (ofdmswing_table[i] & MASKOFDM_D)) {
                                        ofdm_index_old[1] = (u8) i;
                                        RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
                                                 DBG_LOUD,
@@ -1091,10 +1091,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
                                value32 = (ele_d << 22) | ((ele_c & 0x3F) <<
                                          16) | ele_a;
                                rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
-                                             BMASKDWORD, value32);
+                                             MASKDWORD, value32);
 
                                value32 = (ele_c & 0x000003C0) >> 6;
-                               rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+                               rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
                                              value32);
 
                                value32 = ((val_x * ele_d) >> 7) & 0x01;
@@ -1103,10 +1103,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
 
                        } else {
                                rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
-                                             BMASKDWORD,
+                                             MASKDWORD,
                                              ofdmswing_table
                                              [(u8)ofdm_index[0]]);
-                               rtl_set_bbreg(hw, ROFDM0_XCTxAFE, BMASKH4BITS,
+                               rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS,
                                              0x00);
                                rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
                                              BIT(24), 0x00);
@@ -1204,21 +1204,21 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
                                                  ele_a;
                                        rtl_set_bbreg(hw,
                                                      ROFDM0_XBTxIQIMBALANCE,
-                                                     BMASKDWORD, value32);
+                                                     MASKDWORD, value32);
                                        value32 = (ele_c & 0x000003C0) >> 6;
                                        rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
-                                                     BMASKH4BITS, value32);
+                                                     MASKH4BITS, value32);
                                        value32 = ((val_x * ele_d) >> 7) & 0x01;
                                        rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
                                                      BIT(28), value32);
                                } else {
                                        rtl_set_bbreg(hw,
                                                      ROFDM0_XBTxIQIMBALANCE,
-                                                     BMASKDWORD,
+                                                     MASKDWORD,
                                                      ofdmswing_table
                                                      [(u8) ofdm_index[1]]);
                                        rtl_set_bbreg(hw, ROFDM0_XDTxAFE,
-                                                     BMASKH4BITS, 0x00);
+                                                     MASKH4BITS, 0x00);
                                        rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
                                                      BIT(28), 0x00);
                                }
@@ -1229,10 +1229,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
                        }
                        RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
                                 "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
-                                rtl_get_bbreg(hw, 0xc80, BMASKDWORD),
-                                rtl_get_bbreg(hw, 0xc94, BMASKDWORD),
+                                rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+                                rtl_get_bbreg(hw, 0xc94, MASKDWORD),
                                 rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
-                                              BRFREGOFFSETMASK));
+                                              RFREG_OFFSET_MASK));
                }
                if ((delta_iqk > rtlefuse->delta_iqk) &&
                    (rtlefuse->delta_iqk != 0)) {
index c4a7db9135d6e3850dcd8490e5e9165807178b64..2b08671004a0aa88b4c6ea270ebc4e7c8368aee6 100644 (file)
@@ -318,7 +318,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HW_VAR_AC_PARAM: {
                u8 e_aci = *val;
                rtl92d_dm_init_edca_turbo(hw);
-               if (rtlpci->acm_method != eAcmWay2_SW)
+               if (rtlpci->acm_method != EACMWAY2_SW)
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
                                                      &e_aci);
                break;
@@ -985,9 +985,9 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
        /* set default value after initialize RF,  */
        rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
        rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
-                       RF_CHNLBW, BRFREGOFFSETMASK);
+                       RF_CHNLBW, RFREG_OFFSET_MASK);
        rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
-                       RF_CHNLBW, BRFREGOFFSETMASK);
+                       RF_CHNLBW, RFREG_OFFSET_MASK);
 
        /*---- Set CCK and OFDM Block "ON"----*/
        if (rtlhal->current_bandtype == BAND_ON_2_4G)
@@ -1035,7 +1035,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
 
                                tmp_rega = rtl_get_rfreg(hw,
                                                  (enum radio_path)RF90_PATH_A,
-                                                 0x2a, BMASKDWORD);
+                                                 0x2a, MASKDWORD);
 
                                if (((tmp_rega & BIT(11)) == BIT(11)))
                                        break;
@@ -1138,11 +1138,13 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
 void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       u32 reg_rcr = rtlpci->receive_config;
+       u32 reg_rcr;
 
        if (rtlpriv->psc.rfpwr_state != ERFON)
                return;
+
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
        if (check_bssid) {
                reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
@@ -1332,13 +1334,13 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
        /* c. ========RF OFF sequence==========  */
        /* 0x88c[23:20] = 0xf. */
        rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
-       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
 
        /* APSD_CTRL 0x600[7:0] = 0x40 */
        rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
 
        /* Close antenna 0,0xc04,0xd04 */
-       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0);
+       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0);
        rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0);
 
        /*  SYS_FUNC_EN 0x02[7:0] = 0xE2   reset BB state machine */
index 13196cc4b1d380279e7ce3096b5861ba14087b81..3d1f0dd4e52d89825710544078895752895b2c76 100644 (file)
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../pci.h"
 #include "../ps.h"
+#include "../core.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -242,7 +243,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
        else if (rtlhal->during_mac0init_radiob)
                /* mac0 use phy1 write radio_b. */
                dbi_direct = BIT(3) | BIT(2);
-       if (bitmask != BMASKDWORD) {
+       if (bitmask != MASKDWORD) {
                if (rtlhal->during_mac1init_radioa ||
                    rtlhal->during_mac0init_radiob)
                        originalvalue = rtl92de_read_dword_dbi(hw,
@@ -275,20 +276,20 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
        u32 retvalue;
 
        newoffset = offset;
-       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD);
+       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
        if (rfpath == RF90_PATH_A)
                tmplong2 = tmplong;
        else
-               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD);
+               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
        tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
                (newoffset << 23) | BLSSIREADEDGE;
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
                tmplong & (~BLSSIREADEDGE));
        udelay(10);
-       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, BMASKDWORD, tmplong2);
+       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
        udelay(50);
        udelay(50);
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, BMASKDWORD,
+       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
                tmplong | BLSSIREADEDGE);
        udelay(10);
        if (rfpath == RF90_PATH_A)
@@ -321,7 +322,7 @@ static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
        newoffset = offset;
        /* T65 RF */
        data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
-       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, BMASKDWORD, data_and_addr);
+       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
        RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
                 rfpath, pphyreg->rf3wire_offset, data_and_addr);
 }
@@ -362,7 +363,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
                return;
        spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
        if (rtlphy->rf_mode != RF_OP_BY_FW) {
-               if (bitmask != BRFREGOFFSETMASK) {
+               if (bitmask != RFREG_OFFSET_MASK) {
                        original_value = _rtl92d_phy_rf_serial_read(hw,
                                rfpath, regaddr);
                        bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
@@ -567,19 +568,8 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                 " ===> phy:Rtl819XPHY_REG_Array_PG\n");
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_reg_arraylen; i = i + 2) {
-                       if (phy_regarray_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table[i] == 0xf9)
-                               udelay(1);
-                       rtl_set_bbreg(hw, phy_regarray_table[i], BMASKDWORD,
+                       rtl_addr_delay(phy_regarray_table[i]);
+                       rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
                                      phy_regarray_table[i + 1]);
                        udelay(1);
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
@@ -591,7 +581,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                if (rtlhal->interfaceindex == 0) {
                        for (i = 0; i < agctab_arraylen; i = i + 2) {
                                rtl_set_bbreg(hw, agctab_array_table[i],
-                                       BMASKDWORD,
+                                       MASKDWORD,
                                        agctab_array_table[i + 1]);
                                /* Add 1us delay between BB/RF register
                                 * setting. */
@@ -607,7 +597,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                        if (rtlhal->current_bandtype == BAND_ON_2_4G) {
                                for (i = 0; i < agctab_arraylen; i = i + 2) {
                                        rtl_set_bbreg(hw, agctab_array_table[i],
-                                               BMASKDWORD,
+                                               MASKDWORD,
                                                agctab_array_table[i + 1]);
                                        /* Add 1us delay between BB/RF register
                                         * setting. */
@@ -623,7 +613,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
                                for (i = 0; i < agctab_5garraylen; i = i + 2) {
                                        rtl_set_bbreg(hw,
                                                agctab_5garray_table[i],
-                                               BMASKDWORD,
+                                               MASKDWORD,
                                                agctab_5garray_table[i + 1]);
                                        /* Add 1us delay between BB/RF registeri
                                         * setting. */
@@ -705,18 +695,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
        phy_regarray_table_pg = rtl8192de_phy_reg_array_pg;
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
-                       if (phy_regarray_table_pg[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table_pg[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray_table_pg[i]);
                        _rtl92d_store_pwrindex_diffrate_offset(hw,
                                phy_regarray_table_pg[i],
                                phy_regarray_table_pg[i + 1],
@@ -843,54 +822,16 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        switch (rfpath) {
        case RF90_PATH_A:
                for (i = 0; i < radioa_arraylen; i = i + 2) {
-                       if (radioa_array_table[i] == 0xfe) {
-                               mdelay(50);
-                       } else if (radioa_array_table[i] == 0xfd) {
-                               /* delay_ms(5); */
-                               mdelay(5);
-                       } else if (radioa_array_table[i] == 0xfc) {
-                               /* delay_ms(1); */
-                               mdelay(1);
-                       } else if (radioa_array_table[i] == 0xfb) {
-                               udelay(50);
-                       } else if (radioa_array_table[i] == 0xfa) {
-                               udelay(5);
-                       } else if (radioa_array_table[i] == 0xf9) {
-                               udelay(1);
-                       } else {
-                               rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
-                                             BRFREGOFFSETMASK,
-                                             radioa_array_table[i + 1]);
-                               /*  Add 1us delay between BB/RF register set. */
-                               udelay(1);
-                       }
+                       rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+                                       RFREG_OFFSET_MASK,
+                                       radioa_array_table[i + 1]);
                }
                break;
        case RF90_PATH_B:
                for (i = 0; i < radiob_arraylen; i = i + 2) {
-                       if (radiob_array_table[i] == 0xfe) {
-                               /* Delay specific ms. Only RF configuration
-                                * requires delay. */
-                               mdelay(50);
-                       } else if (radiob_array_table[i] == 0xfd) {
-                               /* delay_ms(5); */
-                               mdelay(5);
-                       } else if (radiob_array_table[i] == 0xfc) {
-                               /* delay_ms(1); */
-                               mdelay(1);
-                       } else if (radiob_array_table[i] == 0xfb) {
-                               udelay(50);
-                       } else if (radiob_array_table[i] == 0xfa) {
-                               udelay(5);
-                       } else if (radiob_array_table[i] == 0xf9) {
-                               udelay(1);
-                       } else {
-                               rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
-                                             BRFREGOFFSETMASK,
-                                             radiob_array_table[i + 1]);
-                               /*  Add 1us delay between BB/RF register set. */
-                               udelay(1);
-                       }
+                       rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+                                       RFREG_OFFSET_MASK,
+                                       radiob_array_table[i + 1]);
                }
                break;
        case RF90_PATH_C:
@@ -911,13 +852,13 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
 
        rtlphy->default_initialgain[0] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, BMASKBYTE0);
+           (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
        rtlphy->default_initialgain[1] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, BMASKBYTE0);
+           (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
        rtlphy->default_initialgain[2] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, BMASKBYTE0);
+           (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
        rtlphy->default_initialgain[3] =
-           (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, BMASKBYTE0);
+           (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                 "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
                 rtlphy->default_initialgain[0],
@@ -925,9 +866,9 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
                 rtlphy->default_initialgain[2],
                 rtlphy->default_initialgain[3]);
        rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
-                                             BMASKBYTE0);
+                                             MASKBYTE0);
        rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
-                                             BMASKDWORD);
+                                             MASKDWORD);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                 "Default framesync (0x%x) = 0x%x\n",
                 ROFDM0_RXDETECTOR3, rtlphy->framesync);
@@ -1106,7 +1047,7 @@ static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
 {
        rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0);
        rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0);
-       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x00);
+       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x00);
        rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0);
 }
 
@@ -1168,7 +1109,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        u32 imr_num = MAX_RF_IMR_INDEX;
-       u32 rfmask = BRFREGOFFSETMASK;
+       u32 rfmask = RFREG_OFFSET_MASK;
        u8 group, i;
        unsigned long flag = 0;
 
@@ -1211,7 +1152,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
                        for (i = 0; i < imr_num; i++) {
                                rtl_set_rfreg(hw, (enum radio_path)rfpath,
                                              rf_reg_for_5g_swchnl_normal[i],
-                                             BRFREGOFFSETMASK,
+                                             RFREG_OFFSET_MASK,
                                              rf_imr_param_normal[0][0][i]);
                        }
                        rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
@@ -1329,7 +1270,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
                        if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) {
                                rtl_set_rfreg(hw, (enum radio_path)path,
                                              rf_reg_for_c_cut_5g[i],
-                                             BRFREGOFFSETMASK, 0xE439D);
+                                             RFREG_OFFSET_MASK, 0xE439D);
                        } else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) {
                                u4tmp2 = (rf_reg_pram_c_5g[index][i] &
                                     0x7FF) | (u4tmp << 11);
@@ -1337,11 +1278,11 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
                                        u4tmp2 &= ~(BIT(7) | BIT(6));
                                rtl_set_rfreg(hw, (enum radio_path)path,
                                              rf_reg_for_c_cut_5g[i],
-                                             BRFREGOFFSETMASK, u4tmp2);
+                                             RFREG_OFFSET_MASK, u4tmp2);
                        } else {
                                rtl_set_rfreg(hw, (enum radio_path)path,
                                              rf_reg_for_c_cut_5g[i],
-                                             BRFREGOFFSETMASK,
+                                             RFREG_OFFSET_MASK,
                                              rf_reg_pram_c_5g[index][i]);
                        }
                        RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1351,7 +1292,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
                                 path, index,
                                 rtl_get_rfreg(hw, (enum radio_path)path,
                                               rf_reg_for_c_cut_5g[i],
-                                              BRFREGOFFSETMASK));
+                                              RFREG_OFFSET_MASK));
                }
                if (need_pwr_down)
                        _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1381,7 +1322,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
                                     i++) {
                                        rtl_set_rfreg(hw, rfpath,
                                                rf_for_c_cut_5g_internal_pa[i],
-                                               BRFREGOFFSETMASK,
+                                               RFREG_OFFSET_MASK,
                                                rf_pram_c_5g_int_pa[index][i]);
                                        RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
                                                 "offset 0x%x value 0x%x path %d index %d\n",
@@ -1422,13 +1363,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
                        if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7)
                                rtl_set_rfreg(hw, (enum radio_path)path,
                                        rf_reg_for_c_cut_2g[i],
-                                       BRFREGOFFSETMASK,
+                                       RFREG_OFFSET_MASK,
                                        (rf_reg_param_for_c_cut_2g[index][i] |
                                        BIT(17)));
                        else
                                rtl_set_rfreg(hw, (enum radio_path)path,
                                              rf_reg_for_c_cut_2g[i],
-                                             BRFREGOFFSETMASK,
+                                             RFREG_OFFSET_MASK,
                                              rf_reg_param_for_c_cut_2g
                                              [index][i]);
                        RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
@@ -1438,14 +1379,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
                                 rf_reg_mask_for_c_cut_2g[i], path, index,
                                 rtl_get_rfreg(hw, (enum radio_path)path,
                                               rf_reg_for_c_cut_2g[i],
-                                              BRFREGOFFSETMASK));
+                                              RFREG_OFFSET_MASK));
                }
                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                        "cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
                        rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
 
                rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
-                             BRFREGOFFSETMASK,
+                             RFREG_OFFSET_MASK,
                              rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
                if (need_pwr_down)
                        _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1493,41 +1434,41 @@ static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
        /* path-A IQK setting */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path-A IQK setting!\n");
        if (rtlhal->interfaceindex == 0) {
-               rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c1f);
-               rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c1f);
+               rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
+               rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
        } else {
-               rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x10008c22);
-               rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c22);
        }
-       rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140102);
-       rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x28160206);
+       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
+       rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160206);
        /* path-B IQK setting */
        if (configpathb) {
-               rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x10008c22);
-               rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x10008c22);
-               rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140102);
-               rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x28160206);
+               rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
+               rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
+               rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160206);
        }
        /* LO calibration setting */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "LO calibration setting!\n");
-       rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
        /* One shot, path A LOK & IQK */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "One shot, path A LOK & IQK!\n");
-       rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
-       rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
        /* delay x ms */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                "Delay %d ms for One shot, path A LOK & IQK\n",
                IQK_DELAY_TIME);
        mdelay(IQK_DELAY_TIME);
        /* Check failed */
-       regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+       regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xeac = 0x%x\n", regeac);
-       rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+       rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xe94 = 0x%x\n", rege94);
-       rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+       rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xe9c = 0x%x\n", rege9c);
-       regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+       regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xea4 = 0x%x\n", regea4);
        if (!(regeac & BIT(28)) && (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
            (((rege9c & 0x03FF0000) >> 16) != 0x42))
@@ -1563,42 +1504,42 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path A IQK!\n");
        /* path-A IQK setting */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path-A IQK setting!\n");
-       rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
-       rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
-       rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82140307);
-       rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68160960);
+       rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
+       rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
+       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140307);
+       rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68160960);
        /* path-B IQK setting */
        if (configpathb) {
-               rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
-               rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
-               rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82110000);
-               rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68110000);
+               rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
+               rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
+               rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82110000);
+               rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68110000);
        }
        /* LO calibration setting */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "LO calibration setting!\n");
-       rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
        /* path-A PA on */
-       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x07000f60);
-       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD, 0x66e60e30);
+       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x07000f60);
+       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, 0x66e60e30);
        for (i = 0; i < retrycount; i++) {
                /* One shot, path A LOK & IQK */
                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                        "One shot, path A LOK & IQK!\n");
-               rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf9000000);
-               rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+               rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+               rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
                /* delay x ms */
                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                        "Delay %d ms for One shot, path A LOK & IQK.\n",
                        IQK_DELAY_TIME);
                mdelay(IQK_DELAY_TIME * 10);
                /* Check failed */
-               regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+               regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xeac = 0x%x\n", regeac);
-               rege94 = rtl_get_bbreg(hw, 0xe94, BMASKDWORD);
+               rege94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xe94 = 0x%x\n", rege94);
-               rege9c = rtl_get_bbreg(hw, 0xe9c, BMASKDWORD);
+               rege9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xe9c = 0x%x\n", rege9c);
-               regea4 = rtl_get_bbreg(hw, 0xea4, BMASKDWORD);
+               regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xea4 = 0x%x\n", regea4);
                if (!(regeac & TxOKBit) &&
                     (((rege94 & 0x03FF0000) >> 16) != 0x142)) {
@@ -1620,9 +1561,9 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
                }
        }
        /* path A PA off */
-       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
                      rtlphy->iqk_bb_backup[0]);
-       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BMASKDWORD,
+       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD,
                      rtlphy->iqk_bb_backup[1]);
        return result;
 }
@@ -1637,22 +1578,22 @@ static u8 _rtl92d_phy_pathb_iqk(struct ieee80211_hw *hw)
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path B IQK!\n");
        /* One shot, path B LOK & IQK */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "One shot, path A LOK & IQK!\n");
-       rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000002);
-       rtl_set_bbreg(hw, 0xe60, BMASKDWORD, 0x00000000);
+       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
+       rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
        /* delay x ms  */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                "Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME);
        mdelay(IQK_DELAY_TIME);
        /* Check failed */
-       regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+       regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xeac = 0x%x\n", regeac);
-       regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+       regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xeb4 = 0x%x\n", regeb4);
-       regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+       regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xebc = 0x%x\n", regebc);
-       regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+       regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xec4 = 0x%x\n", regec4);
-       regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+       regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xecc = 0x%x\n", regecc);
        if (!(regeac & BIT(31)) && (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
            (((regebc & 0x03FF0000) >> 16) != 0x42))
@@ -1680,31 +1621,31 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path B IQK!\n");
        /* path-A IQK setting */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path-A IQK setting!\n");
-       rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x18008c1f);
-       rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x18008c1f);
-       rtl_set_bbreg(hw, 0xe38, BMASKDWORD, 0x82110000);
-       rtl_set_bbreg(hw, 0xe3c, BMASKDWORD, 0x68110000);
+       rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x18008c1f);
+       rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x18008c1f);
+       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82110000);
+       rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x68110000);
 
        /* path-B IQK setting */
-       rtl_set_bbreg(hw, 0xe50, BMASKDWORD, 0x18008c2f);
-       rtl_set_bbreg(hw, 0xe54, BMASKDWORD, 0x18008c2f);
-       rtl_set_bbreg(hw, 0xe58, BMASKDWORD, 0x82140307);
-       rtl_set_bbreg(hw, 0xe5c, BMASKDWORD, 0x68160960);
+       rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x18008c2f);
+       rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x18008c2f);
+       rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140307);
+       rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x68160960);
 
        /* LO calibration setting */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "LO calibration setting!\n");
-       rtl_set_bbreg(hw, 0xe4c, BMASKDWORD, 0x00462911);
+       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
 
        /* path-B PA on */
-       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD, 0x0f600700);
-       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD, 0x061f0d30);
+       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x0f600700);
+       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, 0x061f0d30);
 
        for (i = 0; i < retrycount; i++) {
                /* One shot, path B LOK & IQK */
                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                        "One shot, path A LOK & IQK!\n");
-               rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xfa000000);
-               rtl_set_bbreg(hw, 0xe48, BMASKDWORD, 0xf8000000);
+               rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xfa000000);
+               rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
 
                /* delay x ms */
                RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -1712,15 +1653,15 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
                mdelay(IQK_DELAY_TIME * 10);
 
                /* Check failed */
-               regeac = rtl_get_bbreg(hw, 0xeac, BMASKDWORD);
+               regeac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xeac = 0x%x\n", regeac);
-               regeb4 = rtl_get_bbreg(hw, 0xeb4, BMASKDWORD);
+               regeb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xeb4 = 0x%x\n", regeb4);
-               regebc = rtl_get_bbreg(hw, 0xebc, BMASKDWORD);
+               regebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xebc = 0x%x\n", regebc);
-               regec4 = rtl_get_bbreg(hw, 0xec4, BMASKDWORD);
+               regec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xec4 = 0x%x\n", regec4);
-               regecc = rtl_get_bbreg(hw, 0xecc, BMASKDWORD);
+               regecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "0xecc = 0x%x\n", regecc);
                if (!(regeac & BIT(31)) &&
                    (((regeb4 & 0x03FF0000) >> 16) != 0x142))
@@ -1738,9 +1679,9 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
        }
 
        /* path B PA off */
-       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BMASKDWORD,
+       rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
                      rtlphy->iqk_bb_backup[0]);
-       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BMASKDWORD,
+       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD,
                      rtlphy->iqk_bb_backup[2]);
        return result;
 }
@@ -1754,7 +1695,7 @@ static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
 
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Save ADDA parameters.\n");
        for (i = 0; i < regnum; i++)
-               adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], BMASKDWORD);
+               adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
 }
 
 static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
@@ -1779,7 +1720,7 @@ static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                "Reload ADDA power saving parameters !\n");
        for (i = 0; i < regnum; i++)
-               rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, adda_backup[i]);
+               rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, adda_backup[i]);
 }
 
 static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
@@ -1807,7 +1748,7 @@ static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
                pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
                    0x04db25a4 : 0x0b1b25a4;
        for (i = 0; i < IQK_ADDA_REG_NUM; i++)
-               rtl_set_bbreg(hw, adda_reg[i], BMASKDWORD, pathon);
+               rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
 }
 
 static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
@@ -1830,9 +1771,9 @@ static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path-A standby mode!\n");
 
-       rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x0);
-       rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD, 0x00010000);
-       rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+       rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, 0x00010000);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
 }
 
 static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
@@ -1843,8 +1784,8 @@ static void _rtl92d_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                "BB Switch to %s mode!\n", pi_mode ? "PI" : "SI");
        mode = pi_mode ? 0x01000100 : 0x01000000;
-       rtl_set_bbreg(hw, 0x820, BMASKDWORD, mode);
-       rtl_set_bbreg(hw, 0x828, BMASKDWORD, mode);
+       rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+       rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
 }
 
 static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
@@ -1875,7 +1816,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
 
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "IQK for 2.4G :Start!!!\n");
        if (t == 0) {
-               bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+               bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "==>0x%08x\n", bbvalue);
                RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
                        is2t ? "2T2R" : "1T1R");
@@ -1898,40 +1839,40 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
                _rtl92d_phy_pimode_switch(hw, true);
 
        rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
-       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
-       rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
-       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22204000);
+       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+       rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22204000);
        rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
        if (is2t) {
-               rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, BMASKDWORD,
+               rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD,
                              0x00010000);
-               rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, BMASKDWORD,
+               rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, MASKDWORD,
                              0x00010000);
        }
        /* MAC settings */
        _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
                                            rtlphy->iqk_mac_backup);
        /* Page B init */
-       rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+       rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
        if (is2t)
-               rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+               rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
        /* IQ calibration setting */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "IQK setting!\n");
-       rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
-       rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x01007c00);
-       rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+       rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+       rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
        for (i = 0; i < retrycount; i++) {
                patha_ok = _rtl92d_phy_patha_iqk(hw, is2t);
                if (patha_ok == 0x03) {
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                "Path A IQK Success!!\n");
-                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
                                        0x3FF0000) >> 16;
-                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
                                        0x3FF0000) >> 16;
-                       result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+                       result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
                                        0x3FF0000) >> 16;
-                       result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+                       result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
                                        0x3FF0000) >> 16;
                        break;
                } else if (i == (retrycount - 1) && patha_ok == 0x01) {
@@ -1939,9 +1880,9 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                "Path A IQK Only  Tx Success!!\n");
 
-                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
                                        0x3FF0000) >> 16;
-                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
                                        0x3FF0000) >> 16;
                }
        }
@@ -1957,22 +1898,22 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
                                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                        "Path B IQK Success!!\n");
                                result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
-                                              BMASKDWORD) & 0x3FF0000) >> 16;
+                                              MASKDWORD) & 0x3FF0000) >> 16;
                                result[t][5] = (rtl_get_bbreg(hw, 0xebc,
-                                              BMASKDWORD) & 0x3FF0000) >> 16;
+                                              MASKDWORD) & 0x3FF0000) >> 16;
                                result[t][6] = (rtl_get_bbreg(hw, 0xec4,
-                                              BMASKDWORD) & 0x3FF0000) >> 16;
+                                              MASKDWORD) & 0x3FF0000) >> 16;
                                result[t][7] = (rtl_get_bbreg(hw, 0xecc,
-                                              BMASKDWORD) & 0x3FF0000) >> 16;
+                                              MASKDWORD) & 0x3FF0000) >> 16;
                                break;
                        } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
                                /* Tx IQK OK */
                                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                        "Path B Only Tx IQK Success!!\n");
                                result[t][4] = (rtl_get_bbreg(hw, 0xeb4,
-                                              BMASKDWORD) & 0x3FF0000) >> 16;
+                                              MASKDWORD) & 0x3FF0000) >> 16;
                                result[t][5] = (rtl_get_bbreg(hw, 0xebc,
-                                              BMASKDWORD) & 0x3FF0000) >> 16;
+                                              MASKDWORD) & 0x3FF0000) >> 16;
                        }
                }
                if (0x00 == pathb_ok)
@@ -1984,7 +1925,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                "IQK:Back to BB mode, load original value!\n");
 
-       rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
        if (t != 0) {
                /* Switch back BB to SI mode after finish IQ Calibration. */
                if (!rtlphy->rfpi_enable)
@@ -2004,8 +1945,8 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
                                                          rtlphy->iqk_bb_backup,
                                                          IQK_BB_REG_NUM - 1);
                /* load 0xe30 IQC default value */
-               rtl_set_bbreg(hw, 0xe30, BMASKDWORD, 0x01008c00);
-               rtl_set_bbreg(hw, 0xe34, BMASKDWORD, 0x01008c00);
+               rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+               rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
        }
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "<==\n");
 }
@@ -2042,7 +1983,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "IQK for 5G NORMAL:Start!!!\n");
        mdelay(IQK_DELAY_TIME * 20);
        if (t == 0) {
-               bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, BMASKDWORD);
+               bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "==>0x%08x\n", bbvalue);
                RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
                        is2t ? "2T2R" : "1T1R");
@@ -2072,38 +2013,38 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
        if (!rtlphy->rfpi_enable)
                _rtl92d_phy_pimode_switch(hw, true);
        rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(24), 0x00);
-       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKDWORD, 0x03a05600);
-       rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, BMASKDWORD, 0x000800e4);
-       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, BMASKDWORD, 0x22208000);
+       rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+       rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+       rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208000);
        rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
 
        /* Page B init */
-       rtl_set_bbreg(hw, 0xb68, BMASKDWORD, 0x0f600000);
+       rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
        if (is2t)
-               rtl_set_bbreg(hw, 0xb6c, BMASKDWORD, 0x0f600000);
+               rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x0f600000);
        /* IQ calibration setting  */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "IQK setting!\n");
-       rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0x80800000);
-       rtl_set_bbreg(hw, 0xe40, BMASKDWORD, 0x10007c00);
-       rtl_set_bbreg(hw, 0xe44, BMASKDWORD, 0x01004800);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+       rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x10007c00);
+       rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
        patha_ok = _rtl92d_phy_patha_iqk_5g_normal(hw, is2t);
        if (patha_ok == 0x03) {
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path A IQK Success!!\n");
-               result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+               result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
                                0x3FF0000) >> 16;
-               result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+               result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
                                0x3FF0000) >> 16;
-               result[t][2] = (rtl_get_bbreg(hw, 0xea4, BMASKDWORD) &
+               result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
                                0x3FF0000) >> 16;
-               result[t][3] = (rtl_get_bbreg(hw, 0xeac, BMASKDWORD) &
+               result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
                                0x3FF0000) >> 16;
        } else if (patha_ok == 0x01) {  /* Tx IQK OK */
                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                        "Path A IQK Only  Tx Success!!\n");
 
-               result[t][0] = (rtl_get_bbreg(hw, 0xe94, BMASKDWORD) &
+               result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
                                0x3FF0000) >> 16;
-               result[t][1] = (rtl_get_bbreg(hw, 0xe9c, BMASKDWORD) &
+               result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
                                0x3FF0000) >> 16;
        } else {
                RTPRINT(rtlpriv, FINIT, INIT_IQK,  "Path A IQK Fail!!\n");
@@ -2116,20 +2057,20 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
                if (pathb_ok == 0x03) {
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                "Path B IQK Success!!\n");
-                       result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+                       result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
                             0x3FF0000) >> 16;
-                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
                             0x3FF0000) >> 16;
-                       result[t][6] = (rtl_get_bbreg(hw, 0xec4, BMASKDWORD) &
+                       result[t][6] = (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
                             0x3FF0000) >> 16;
-                       result[t][7] = (rtl_get_bbreg(hw, 0xecc, BMASKDWORD) &
+                       result[t][7] = (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
                             0x3FF0000) >> 16;
                } else if (pathb_ok == 0x01) { /* Tx IQK OK */
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                "Path B Only Tx IQK Success!!\n");
-                       result[t][4] = (rtl_get_bbreg(hw, 0xeb4, BMASKDWORD) &
+                       result[t][4] = (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
                             0x3FF0000) >> 16;
-                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, BMASKDWORD) &
+                       result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
                             0x3FF0000) >> 16;
                } else {
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -2140,7 +2081,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
        /* Back to BB mode, load original value */
        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                "IQK:Back to BB mode, load original value!\n");
-       rtl_set_bbreg(hw, 0xe28, BMASKDWORD, 0);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
        if (t != 0) {
                if (is2t)
                        _rtl92d_phy_reload_adda_registers(hw, iqk_bb_reg,
@@ -2240,7 +2181,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
                return;
        } else if (iqk_ok) {
                oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
-                       BMASKDWORD) >> 22) & 0x3FF;     /* OFDM0_D */
+                       MASKDWORD) >> 22) & 0x3FF;      /* OFDM0_D */
                val_x = result[final_candidate][0];
                if ((val_x & 0x00000200) != 0)
                        val_x = val_x | 0xFFFFFC00;
@@ -2271,7 +2212,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
                                      ((val_y * oldval_0 >> 7) & 0x1));
                RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
                        rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE,
-                                     BMASKDWORD));
+                                     MASKDWORD));
                if (txonly) {
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,  "only Tx OK\n");
                        return;
@@ -2299,7 +2240,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
                return;
        } else if (iqk_ok) {
                oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE,
-                                         BMASKDWORD) >> 22) & 0x3FF;
+                                         MASKDWORD) >> 22) & 0x3FF;
                val_x = result[final_candidate][4];
                if ((val_x & 0x00000200) != 0)
                        val_x = val_x | 0xFFFFFC00;
@@ -2657,7 +2598,7 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
                rf_mode[index] = rtl_read_byte(rtlpriv, offset);
                /* 2. Set RF mode = standby mode */
                rtl_set_rfreg(hw, (enum radio_path)index, RF_AC,
-                             BRFREGOFFSETMASK, 0x010000);
+                             RFREG_OFFSET_MASK, 0x010000);
                if (rtlpci->init_ready) {
                        /* switch CV-curve control by LC-calibration */
                        rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
@@ -2667,16 +2608,16 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
                                      0x08000, 0x01);
                }
                u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6,
-                                 BRFREGOFFSETMASK);
+                                 RFREG_OFFSET_MASK);
                while ((!(u4tmp & BIT(11))) && timecount <= timeout) {
                        mdelay(50);
                        timecount += 50;
                        u4tmp = rtl_get_rfreg(hw, (enum radio_path)index,
-                                             RF_SYN_G6, BRFREGOFFSETMASK);
+                                             RF_SYN_G6, RFREG_OFFSET_MASK);
                }
                RTPRINT(rtlpriv, FINIT, INIT_IQK,
                        "PHY_LCK finish delay for %d ms=2\n", timecount);
-               u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, BRFREGOFFSETMASK);
+               u4tmp = rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK);
                if (index == 0 && rtlhal->interfaceindex == 0) {
                        RTPRINT(rtlpriv, FINIT, INIT_IQK,
                                "path-A / 5G LCK\n");
@@ -2696,9 +2637,9 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
                                      0x7f, i);
 
                        rtl_set_rfreg(hw, (enum radio_path)index, 0x4D,
-                               BRFREGOFFSETMASK, 0x0);
+                               RFREG_OFFSET_MASK, 0x0);
                        readval = rtl_get_rfreg(hw, (enum radio_path)index,
-                                         0x4F, BRFREGOFFSETMASK);
+                                         0x4F, RFREG_OFFSET_MASK);
                        curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5;
                        /* reg 0x4f [4:0] */
                        /* reg 0x50 [19:10] */
@@ -2912,7 +2853,7 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                                }
                                rtl_set_rfreg(hw, (enum radio_path)rfpath,
                                              currentcmd->para1,
-                                             BRFREGOFFSETMASK,
+                                             RFREG_OFFSET_MASK,
                                              rtlphy->rfreg_chnlval[rfpath]);
                                _rtl92d_phy_reload_imr_setting(hw, channel,
                                                               rfpath);
@@ -2960,7 +2901,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
        if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
            rtlhal->bandset == BAND_ON_BOTH) {
                ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
-                                         BMASKDWORD);
+                                         MASKDWORD);
                if (rtlphy->current_channel > 14 && !(ret_value & BIT(0)))
                        rtl92d_phy_switch_wirelessband(hw, BAND_ON_5G);
                else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0)))
@@ -3112,7 +3053,7 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
        /* a.   TXPAUSE 0x522[7:0] = 0xFF  Pause MAC TX queue  */
        rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
        /* b.   RF path 0 offset 0x00 = 0x00  disable RF  */
-       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
        /* c.   APSD_CTRL 0x600[7:0] = 0x40 */
        rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
        /* d. APSD_CTRL 0x600[7:0] = 0x00
@@ -3120,12 +3061,12 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
         * RF path 0 offset 0x00 = 0x00
         * APSD_CTRL 0x600[7:0] = 0x40
         * */
-       u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+       u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
        while (u4btmp != 0 && delay > 0) {
                rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
-               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, BRFREGOFFSETMASK, 0x00);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
                rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
-               u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, BRFREGOFFSETMASK);
+               u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
                delay--;
        }
        if (delay == 0) {
@@ -3468,9 +3409,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
                /* 5G LAN ON */
                rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
                /* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */
-               rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+               rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
                              0x40000100);
-               rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+               rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
                              0x40000100);
                if (rtlhal->macphymode == DUALMAC_DUALPHY) {
                        rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3524,16 +3465,16 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
                rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
                /* TX BB gain shift,Just for testchip,0xc80,0xc88 */
                if (rtlefuse->internal_pa_5g[0])
-                       rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+                       rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
                                      0x2d4000b5);
                else
-                       rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, BMASKDWORD,
+                       rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD,
                                      0x20000080);
                if (rtlefuse->internal_pa_5g[1])
-                       rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+                       rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
                                      0x2d4000b5);
                else
-                       rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, BMASKDWORD,
+                       rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD,
                                      0x20000080);
                if (rtlhal->macphymode == DUALMAC_DUALPHY) {
                        rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
@@ -3560,8 +3501,8 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
                }
        }
        /* update IQK related settings */
-       rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, BMASKDWORD, 0x40000100);
-       rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, BMASKDWORD, 0x40000100);
+       rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+       rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
        rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00);
        rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
                      BIT(26) | BIT(24), 0x00);
@@ -3590,7 +3531,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
        /* DMDP */
        if (rtlphy->rf_type == RF_1T1R) {
                /* Use antenna 0,0xc04,0xd04 */
-               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x11);
+               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x11);
                rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1);
 
                /* enable ad/da clock1 for dual-phy reg0x888 */
@@ -3612,7 +3553,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
        } else {
                /* Single PHY */
                /* Use antenna 0 & 1,0xc04,0xd04 */
-               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, BMASKBYTE0, 0x33);
+               rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
                rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3);
                /* disable ad/da clock1,0x888 */
                rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0);
@@ -3620,9 +3561,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
        for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
             rfpath++) {
                rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath,
-                                               RF_CHNLBW, BRFREGOFFSETMASK);
+                                               RF_CHNLBW, RFREG_OFFSET_MASK);
                rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C,
-                       BRFREGOFFSETMASK);
+                       RFREG_OFFSET_MASK);
        }
        for (i = 0; i < 2; i++)
                RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
index b7498c5bafc56f25e887831e9e2f9640e811794c..7f29b8d765b37cf22e1a8cf362144d4229a2f583 100644 (file)
 #define        BWORD1                                  0xc
 #define        BDWORD                                  0xf
 
-#define        BMASKBYTE0                              0xff
-#define        BMASKBYTE1                              0xff00
-#define        BMASKBYTE2                              0xff0000
-#define        BMASKBYTE3                              0xff000000
-#define        BMASKHWORD                              0xffff0000
-#define        BMASKLWORD                              0x0000ffff
-#define        BMASKDWORD                              0xffffffff
-#define        BMASK12BITS                             0xfff
-#define        BMASKH4BITS                             0xf0000000
-#define BMASKOFDM_D                            0xffc00000
-#define        BMASKCCK                                0x3f3f3f3f
-
-#define BRFREGOFFSETMASK                       0xfffff
-
 #endif
index 20144e0b4142d9f53e9f892a49c415c013160cd0..6a6ac540d5b505edf81bb34ee93e9effdd664230 100644 (file)
@@ -125,7 +125,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
        }
 
        tmpval = tx_agc[RF90_PATH_A] & 0xff;
-       rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, BMASKBYTE1, tmpval);
+       rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
        RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
                "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
                tmpval, RTXAGC_A_CCK1_MCS32);
@@ -135,7 +135,7 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
                tmpval, RTXAGC_B_CCK11_A_CCK2_11);
        tmpval = tx_agc[RF90_PATH_B] >> 24;
-       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, BMASKBYTE0, tmpval);
+       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
        RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
                "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
                tmpval, RTXAGC_B_CCK11_A_CCK2_11);
@@ -360,7 +360,7 @@ static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
                        regoffset = regoffset_a[index];
                else
                        regoffset = regoffset_b[index];
-               rtl_set_bbreg(hw, regoffset, BMASKDWORD, writeval);
+               rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
                RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
                        "Set 0x%x = %08x\n", regoffset, writeval);
                if (((get_rf_type(rtlphy) == RF_2T2R) &&
index 0eb0f4ae592054f7ebf1ee9182169fc57adbc11c..99c2ab5dfcebfe230a70da60c42a8427509dd9b9 100644 (file)
@@ -545,7 +545,7 @@ static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
 
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
                          struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                         struct ieee80211_tx_info *info,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff *skb,
                          u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -786,7 +786,8 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
        SET_TX_DESC_OWN(pdesc, 1);
 }
 
-void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val)
 {
        if (istx) {
                switch (desc_name) {
index c1b5dfb79d53ce2d2ebf843db65c14999d92196c..fb5cf0634e8d882ef9f308f631a3272fca69b5c3 100644 (file)
@@ -728,8 +728,8 @@ struct rx_desc_92d {
 } __packed;
 
 void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
-                         struct ieee80211_hdr *hdr,
-                         u8 *pdesc, struct ieee80211_tx_info *info,
+                         struct ieee80211_hdr *hdr, u8 *pdesc,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
@@ -737,7 +737,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
                           struct rtl_stats *stats,
                           struct ieee80211_rx_status *rx_status,
                           u8 *pdesc, struct sk_buff *skb);
-void rtl92de_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val);
 u32 rtl92de_get_desc(u8 *pdesc, bool istx, u8 desc_name);
 void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
 void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
index 4f461786a7eb24abb3ae49a1dbccc4311d54ea58..9098558d916dee6ae6daf3567eb91291ad234b72 100644 (file)
@@ -251,7 +251,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        u8 e_aci = *val;
                        rtl92s_dm_init_edca_turbo(hw);
 
-                       if (rtlpci->acm_method != eAcmWay2_SW)
+                       if (rtlpci->acm_method != EACMWAY2_SW)
                                rtlpriv->cfg->ops->set_hw_reg(hw,
                                                 HW_VAR_ACM_CTRL,
                                                 &e_aci);
@@ -413,20 +413,18 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        (u8 *)(&fw_current_inps));
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_H2C_FW_PWRMODE,
-                                       (u8 *)(&ppsc->fwctrl_psmode));
+                                       &ppsc->fwctrl_psmode);
 
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                       HW_VAR_SET_RPWM,
-                                       (u8 *)(&rpwm_val));
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+                                                     &rpwm_val);
                } else {
                        rpwm_val = 0x0C;        /* RF on */
                        fw_pwrmode = FW_PS_ACTIVE_MODE;
                        fw_current_inps = false;
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
-                                       (u8 *)(&rpwm_val));
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                       HW_VAR_H2C_FW_PWRMODE,
-                                       (u8 *)(&fw_pwrmode));
+                                                     &rpwm_val);
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+                                                     &fw_pwrmode);
 
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_FW_PSMODE_STATUS,
@@ -955,7 +953,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
        u8 tmp_byte = 0;
-
+       unsigned long flags;
        bool rtstatus = true;
        u8 tmp_u1b;
        int err = false;
@@ -967,6 +965,16 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
 
        rtlpci->being_init_adapter = true;
 
+       /* As this function can take a very long time (up to 350 ms)
+        * and can be called with irqs disabled, reenable the irqs
+        * to let the other devices continue being serviced.
+        *
+        * It is safe doing so since our own interrupts will only be enabled
+        * in a subsequent step.
+        */
+       local_save_flags(flags);
+       local_irq_enable();
+
        rtlpriv->intf_ops->disable_aspm(hw);
 
        /* 1. MAC Initialize */
@@ -984,7 +992,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
                         "Failed to download FW. Init HW without FW now... "
                         "Please copy FW into /lib/firmware/rtlwifi\n");
-               return 1;
+               err = 1;
+               goto exit;
        }
 
        /* After FW download, we have to reset MAC register */
@@ -997,7 +1006,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
        /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
        if (!rtl92s_phy_mac_config(hw)) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
-               return rtstatus;
+               err = rtstatus;
+               goto exit;
        }
 
        /* because last function modify RCR, so we update
@@ -1016,7 +1026,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
        /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
        if (!rtl92s_phy_bb_config(hw)) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
-               return rtstatus;
+               err = rtstatus;
+               goto exit;
        }
 
        /* 5. Initiailze RF RAIO_A.txt RF RAIO_B.txt */
@@ -1033,7 +1044,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
 
        if (!rtl92s_phy_rf_config(hw)) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
-               return rtstatus;
+               err = rtstatus;
+               goto exit;
        }
 
        /* After read predefined TXT, we must set BB/MAC/RF
@@ -1122,8 +1134,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
 
        rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_ON);
        rtl92s_dm_init(hw);
+exit:
+       local_irq_restore(flags);
        rtlpci->being_init_adapter = false;
-
        return err;
 }
 
@@ -1135,12 +1148,13 @@ void rtl92se_set_mac_addr(struct rtl_io *io, const u8 *addr)
 void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       u32 reg_rcr = rtlpci->receive_config;
+       u32 reg_rcr;
 
        if (rtlpriv->psc.rfpwr_state != ERFON)
                return;
 
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
        if (check_bssid) {
                reg_rcr |= (RCR_CBSSID);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
index 9c092e6eb3fe85d5707e7c652dc647ac8c486dcb..77c5b5f352441a3a2a2d0c26b5627266aa15d5d9 100644 (file)
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../pci.h"
 #include "../ps.h"
+#include "../core.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -833,18 +834,7 @@ static bool _rtl92s_phy_config_bb(struct ieee80211_hw *hw, u8 configtype)
 
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_reg_len; i = i + 2) {
-                       if (phy_reg_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_reg_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_reg_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_reg_table[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_reg_table[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_reg_table[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_reg_table[i]);
 
                        /* Add delay for ECS T20 & LG malow platform, */
                        udelay(1);
@@ -886,18 +876,7 @@ static bool _rtl92s_phy_set_bb_to_diff_rf(struct ieee80211_hw *hw,
 
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_regarray2xtxr_len; i = i + 3) {
-                       if (phy_regarray2xtxr_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray2xtxr_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray2xtxr_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray2xtxr_table[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray2xtxr_table[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray2xtxr_table[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray2xtxr_table[i]);
 
                        rtl92s_phy_set_bb_reg(hw, phy_regarray2xtxr_table[i],
                                phy_regarray2xtxr_table[i + 1],
@@ -920,18 +899,7 @@ static bool _rtl92s_phy_config_bb_with_pg(struct ieee80211_hw *hw,
 
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_pg_len; i = i + 3) {
-                       if (phy_table_pg[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_table_pg[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_table_pg[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_table_pg[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_table_pg[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_table_pg[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_table_pg[i]);
 
                        _rtl92s_store_pwrindex_diffrate_offset(hw,
                                        phy_table_pg[i],
@@ -1034,28 +1002,9 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
        switch (rfpath) {
        case RF90_PATH_A:
                for (i = 0; i < radio_a_tblen; i = i + 2) {
-                       if (radio_a_table[i] == 0xfe)
-                               /* Delay specific ms. Only RF configuration
-                                * requires delay. */
-                               mdelay(50);
-                       else if (radio_a_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (radio_a_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (radio_a_table[i] == 0xfb)
-                               udelay(50);
-                       else if (radio_a_table[i] == 0xfa)
-                               udelay(5);
-                       else if (radio_a_table[i] == 0xf9)
-                               udelay(1);
-                       else
-                               rtl92s_phy_set_rf_reg(hw, rfpath,
-                                                     radio_a_table[i],
-                                                     MASK20BITS,
-                                                     radio_a_table[i + 1]);
+                       rtl_rfreg_delay(hw, rfpath, radio_a_table[i],
+                                       MASK20BITS, radio_a_table[i + 1]);
 
-                       /* Add delay for ECS T20 & LG malow platform */
-                       udelay(1);
                }
 
                /* PA Bias current for inferiority IC */
@@ -1063,28 +1012,8 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
                break;
        case RF90_PATH_B:
                for (i = 0; i < radio_b_tblen; i = i + 2) {
-                       if (radio_b_table[i] == 0xfe)
-                               /* Delay specific ms. Only RF configuration
-                                * requires delay.*/
-                               mdelay(50);
-                       else if (radio_b_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (radio_b_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (radio_b_table[i] == 0xfb)
-                               udelay(50);
-                       else if (radio_b_table[i] == 0xfa)
-                               udelay(5);
-                       else if (radio_b_table[i] == 0xf9)
-                               udelay(1);
-                       else
-                               rtl92s_phy_set_rf_reg(hw, rfpath,
-                                                     radio_b_table[i],
-                                                     MASK20BITS,
-                                                     radio_b_table[i + 1]);
-
-                       /* Add delay for ECS T20 & LG malow platform */
-                       udelay(1);
+                       rtl_rfreg_delay(hw, rfpath, radio_b_table[i],
+                                       MASK20BITS, radio_b_table[i + 1]);
                }
                break;
        case RF90_PATH_C:
index c81c8359194007893412cd3d55f1da054b1bc645..e13043479b71a91fa8343f8124d3a29e4e08252c 100644 (file)
 
 #define        BTX_AGCRATECCK                          0x7f00
 
-#define        MASKBYTE0                               0xff
-#define        MASKBYTE1                               0xff00
-#define        MASKBYTE2                               0xff0000
-#define        MASKBYTE3                               0xff000000
-#define        MASKHWORD                               0xffff0000
-#define        MASKLWORD                               0x0000ffff
-#define        MASKDWORD                               0xffffffff
-
-#define        MAKS12BITS                              0xfffff
-#define        MASK20BITS                              0xfffff
-#define RFREG_OFFSET_MASK                      0xfffff
-
 #endif
index 92d38ab3c60e87861f992411e3774aeceed30f56..78a81c1e390bdd4052c64fd9cc4fe0a54b406aa8 100644 (file)
@@ -52,7 +52,7 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
        /* We only care about the path A for legacy. */
        if (rtlefuse->eeprom_version < 2) {
                pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf);
-       } else if (rtlefuse->eeprom_version >= 2) {
+       } else {
                legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff
                                                [RF90_PATH_A][chnl - 1];
 
index 27efbcdac6a979875976a7a74d1c15e07c1afa49..36b48be8329c08dad5474f43600f2b11d8fcf279 100644 (file)
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
                        /* during testing, hdr was NULL here */
                        return false;
                }
-               if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+               if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
                        (ieee80211_has_protected(hdr->frame_control)))
                        rx_status->flag &= ~RX_FLAG_DECRYPTED;
                else
@@ -336,7 +336,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
 
 void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
                struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-               struct ieee80211_tx_info *info,
+               u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                struct ieee80211_sta *sta,
                struct sk_buff *skb,
                u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
@@ -573,7 +573,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
        }
 }
 
-void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val)
 {
        if (istx) {
                switch (desc_name) {
index 64dd66f287c182a25949126d303925124a24bd71..5a13f17e3b41c7603e92646f7205af989607c59c 100644 (file)
@@ -29,8 +29,9 @@
 #ifndef __REALTEK_PCI92SE_TRX_H__
 #define __REALTEK_PCI92SE_TRX_H__
 
-void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
-                         u8 *pdesc, struct ieee80211_tx_info *info,
+void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
+                         struct ieee80211_hdr *hdr, u8 *pdesc,
+                         u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff *skb, u8 hw_queue,
                          struct rtl_tcb_desc *ptcb_desc);
@@ -39,7 +40,8 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg,
 bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
                           struct ieee80211_rx_status *rx_status, u8 *pdesc,
                           struct sk_buff *skb);
-void rtl92se_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                     u8 desc_name, u8 *val);
 u32 rtl92se_get_desc(u8 *pdesc, bool istx, u8 desc_name);
 void rtl92se_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
 
index 4ed731f09b1ff3f0d57a99e25b80b2dd0fe516b1..9c34a85fdb89f1acf280e20722d81ccb2b1b0511 100644 (file)
@@ -10,7 +10,6 @@ rtl8723ae-objs :=             \
                led.o           \
                phy.o           \
                pwrseq.o        \
-               pwrseqcmd.o     \
                rf.o            \
                sw.o            \
                table.o         \
index 8c110356dff9824bcfc729530aa260a7643a4521..debe261a7eeb9026f2940db194ff25f1b1d0fe8b 100644 (file)
 #define E_CUT_VERSION                  BIT(14)
 #define        RF_RL_ID                        (BIT(31)|BIT(30)|BIT(29)|BIT(28))
 
-enum version_8723e {
-       VERSION_TEST_UMC_CHIP_8723 = 0x0081,
-       VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
-       VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
-};
 
 /* MASK */
 #define IC_TYPE_MASK                   (BIT(0)|BIT(1)|BIT(2))
index a36eee28f9e7f7fe092b3f443a205e67292ef52c..25cc83058b01a25be6a9ea78d44ce744a22a89a0 100644 (file)
@@ -35,6 +35,7 @@
 #include "def.h"
 #include "phy.h"
 #include "dm.h"
+#include "../rtl8723com/dm_common.h"
 #include "fw.h"
 #include "hal_btc.h"
 
@@ -483,16 +484,6 @@ static void rtl8723ae_dm_dig(struct ieee80211_hw *hw)
        rtl8723ae_dm_ctrl_initgain_by_twoport(hw);
 }
 
-static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.dynamic_txpower_enable = false;
-
-       rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
-       rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
-}
-
 static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -585,19 +576,6 @@ void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw)
        }
 }
 
-static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw)
-{
-}
-
-void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm.current_turbo_edca = false;
-       rtlpriv->dm.is_any_nonbepkts = false;
-       rtlpriv->dm.is_cur_rdlstate = false;
-}
-
 static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -669,9 +647,8 @@ static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
        } else {
                if (rtlpriv->dm.current_turbo_edca) {
                        u8 tmp = AC0_BE;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_AC_PARAM,
-                                                     (u8 *) (&tmp));
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+                                                     &tmp);
                        rtlpriv->dm.current_turbo_edca = false;
                }
        }
@@ -778,17 +755,6 @@ static void rtl8723ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
        }
 }
 
-static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
-       rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
-       rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
-       rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
-       rtlpriv->dm_pstable.rssi_val_min = 0;
-}
-
 void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -905,11 +871,11 @@ void rtl8723ae_dm_init(struct ieee80211_hw *hw)
 
        rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
        rtl8723ae_dm_diginit(hw);
-       rtl8723ae_dm_init_dynamic_txpower(hw);
-       rtl8723ae_dm_init_edca_turbo(hw);
+       rtl8723_dm_init_dynamic_txpower(hw);
+       rtl8723_dm_init_edca_turbo(hw);
        rtl8723ae_dm_init_rate_adaptive_mask(hw);
        rtl8723ae_dm_initialize_txpower_tracking(hw);
-       rtl8723ae_dm_init_dynamic_bpowersaving(hw);
+       rtl8723_dm_init_dynamic_bb_powersaving(hw);
 }
 
 void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
@@ -930,7 +896,6 @@ void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
        if ((ppsc->rfpwr_state == ERFON) &&
            ((!fw_current_inpsmode) && fw_ps_awake) &&
            (!ppsc->rfchange_inprogress)) {
-               rtl8723ae_dm_pwdmonitor(hw);
                rtl8723ae_dm_dig(hw);
                rtl8723ae_dm_false_alarm_counter_statistics(hw);
                rtl8723ae_dm_dynamic_bpowersaving(hw);
index a372b0204456bbfee8f27205338697cd9dc8cba6..d253bb53d03e2ccde5b4f646e4fff6a154158836 100644 (file)
@@ -147,7 +147,6 @@ enum dm_dig_connect_e {
 void rtl8723ae_dm_init(struct ieee80211_hw *hw);
 void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw);
 void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw);
-void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
 void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
 void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
 void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw);
index ba1502b172a6a05f3fb34aa0e97c7ef6a7260ac1..728b7563ad36a32eb5b989a3ddcf5738088dfbf6 100644 (file)
 #include "reg.h"
 #include "def.h"
 #include "fw.h"
-
-static void _rtl8723ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 tmp;
-       if (enable) {
-               tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
-
-               tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
-               rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
-
-               tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
-               rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
-       } else {
-               tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
-               rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
-
-               rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
-       }
-}
-
-static void _rtl8723ae_fw_block_write(struct ieee80211_hw *hw,
-                                     const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blockSize = sizeof(u32);
-       u8 *bufferPtr = (u8 *) buffer;
-       u32 *pu4BytePtr = (u32 *) buffer;
-       u32 i, offset, blockCount, remainSize;
-
-       blockCount = size / blockSize;
-       remainSize = size % blockSize;
-
-       for (i = 0; i < blockCount; i++) {
-               offset = i * blockSize;
-               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-                               *(pu4BytePtr + i));
-       }
-
-       if (remainSize) {
-               offset = blockCount * blockSize;
-               bufferPtr += offset;
-               for (i = 0; i < remainSize; i++) {
-                       rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
-                                                offset + i), *(bufferPtr + i));
-               }
-       }
-}
-
-static void _rtl8723ae_fw_page_write(struct ieee80211_hw *hw,
-                                    u32 page, const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8) (page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-       _rtl8723ae_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl8723ae_write_fw(struct ieee80211_hw *hw,
-                               enum version_8723e version, u8 *buffer,
-                               u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 *bufferPtr = (u8 *) buffer;
-       u32 page_nums, remain_size;
-       u32 page, offset;
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
-
-       page_nums = size / FW_8192C_PAGE_SIZE;
-       remain_size = size % FW_8192C_PAGE_SIZE;
-
-       if (page_nums > 6) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Page numbers should not be greater then 6\n");
-       }
-
-       for (page = 0; page < page_nums; page++) {
-               offset = page * FW_8192C_PAGE_SIZE;
-               _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
-                                        FW_8192C_PAGE_SIZE);
-       }
-
-       if (remain_size) {
-               offset = page_nums * FW_8192C_PAGE_SIZE;
-               page = page_nums;
-               _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
-                                        remain_size);
-       }
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
-}
-
-static int _rtl8723ae_fw_free_to_go(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       int err = -EIO;
-       u32 counter = 0;
-       u32 value32;
-
-       do {
-               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-       } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
-                (!(value32 & FWDL_ChkSum_rpt)));
-
-       if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
-                        value32);
-               goto exit;
-       }
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
-       value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-       value32 |= MCUFWDL_RDY;
-       value32 &= ~WINTINI_RDY;
-       rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
-
-       counter = 0;
-
-       do {
-               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-               if (value32 & WINTINI_RDY) {
-                       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                                "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
-                                value32);
-                       err = 0;
-                       goto exit;
-               }
-
-               mdelay(FW_8192C_POLLING_DELAY);
-
-       } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
-
-       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
-
-exit:
-       return err;
-}
-
-int rtl8723ae_download_fw(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl8723ae_firmware_header *pfwheader;
-       u8 *pfwdata;
-       u32 fwsize;
-       int err;
-       enum version_8723e version = rtlhal->version;
-
-       if (!rtlhal->pfirmware)
-               return 1;
-
-       pfwheader = (struct rtl8723ae_firmware_header *)rtlhal->pfirmware;
-       pfwdata = (u8 *) rtlhal->pfirmware;
-       fwsize = rtlhal->fwsize;
-
-       if (IS_FW_HEADER_EXIST(pfwheader)) {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
-                        "Firmware Version(%d), Signature(%#x),Size(%d)\n",
-                        pfwheader->version, pfwheader->signature,
-                        (int)sizeof(struct rtl8723ae_firmware_header));
-
-               pfwdata = pfwdata + sizeof(struct rtl8723ae_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl8723ae_firmware_header);
-       }
-
-       if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
-               rtl8723ae_firmware_selfreset(hw);
-               rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
-       }
-       _rtl8723ae_enable_fw_download(hw, true);
-       _rtl8723ae_write_fw(hw, version, pfwdata, fwsize);
-       _rtl8723ae_enable_fw_download(hw, false);
-
-       err = _rtl8723ae_fw_free_to_go(hw);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Firmware is not ready to run!\n");
-       } else {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                        "Firmware is ready to run!\n");
-       }
-       return 0;
-}
+#include "../rtl8723com/fw_common.h"
 
 static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
 {
@@ -463,50 +271,6 @@ void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw,
        return;
 }
 
-void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
-{
-       u8 u1tmp;
-       u8 delay = 100;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
-       u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-
-       while (u1tmp & BIT(2)) {
-               delay--;
-               if (delay == 0)
-                       break;
-               udelay(50);
-               u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-       }
-       if (delay == 0) {
-               u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
-               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
-       }
-}
-
-void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 u1_h2c_set_pwrmode[3] = { 0 };
-       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-
-       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
-
-       SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
-       SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
-                                        (rtlpriv->mac80211.p2p) ?
-                                        ppsc->smart_ps : 1);
-       SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
-                                             ppsc->reg_max_lps_awakeintvl);
-
-       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
-                     "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
-                     u1_h2c_set_pwrmode, 3);
-       rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
-
-}
-
 static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
                                       struct sk_buff *skb)
 {
@@ -812,7 +576,6 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
                        rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
 
                        p2p_ps_offload->offload_en = 1;
-
                        if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
                                p2p_ps_offload->role = 1;
                                p2p_ps_offload->allstasleep = 0;
@@ -836,3 +599,24 @@ void rtl8723ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
        }
        rtl8723ae_fill_h2c_cmd(hw, H2C_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload);
 }
+
+void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 u1_h2c_set_pwrmode[3] = { 0 };
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+
+       SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
+       SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(u1_h2c_set_pwrmode,
+                                            (rtlpriv->mac80211.p2p) ?
+                                            ppsc->smart_ps : 1);
+       SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
+                                             ppsc->reg_max_lps_awakeintvl);
+
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+                     "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
+                     u1_h2c_set_pwrmode, 3);
+       rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
+}
index ed3b795e6980781a738f5883cb63401e28458970..d355b85dd9fe72aee073ec44c42908339b904e80 100644 (file)
@@ -34,7 +34,7 @@
 #define FW_8192C_END_ADDRESS                   0x3FFF
 #define FW_8192C_PAGE_SIZE                     4096
 #define FW_8192C_POLLING_DELAY                 5
-#define FW_8192C_POLLING_TIMEOUT_COUNT         1000
+#define FW_8192C_POLLING_TIMEOUT_COUNT         6000
 
 #define BEACON_PG                              0
 #define PSPOLL_PG                              2
@@ -65,21 +65,9 @@ struct rtl8723ae_firmware_header {
        u32 rsvd5;
 };
 
-enum rtl8192c_h2c_cmd {
-       H2C_AP_OFFLOAD = 0,
-       H2C_SETPWRMODE = 1,
-       H2C_JOINBSSRPT = 2,
-       H2C_RSVDPAGE = 3,
-       H2C_RSSI_REPORT = 4,
-       H2C_P2P_PS_CTW_CMD = 5,
-       H2C_P2P_PS_OFFLOAD = 6,
-       H2C_RA_MASK = 7,
-       MAX_H2CCMD
-};
-
 #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)                 \
        SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
-#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val)             \
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS_23A(__ph2ccmd, __val)         \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
 #define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val)        \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
@@ -92,10 +80,8 @@ enum rtl8192c_h2c_cmd {
 #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)            \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
 
-int rtl8723ae_download_fw(struct ieee80211_hw *hw);
 void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
                            u32 cmd_len, u8 *p_cmdbuffer);
-void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
 void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
 void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
 void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
index 3d092e4b0b7fe5fbf909d59eb3b3e3e41481000c..48fee1be78c2f54e1e839cf1cec8e05be65b9094 100644 (file)
@@ -31,6 +31,8 @@
 #include "../pci.h"
 #include "dm.h"
 #include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "../rtl8723com/fw_common.h"
 #include "phy.h"
 #include "reg.h"
 #include "hal_btc.h"
index 68c28340f791623f9e78b98d2568a48b3668a86a..5d534df8d90ca2529db7bd3413d05dd76883c2c2 100644 (file)
@@ -30,7 +30,9 @@
 #include "hal_btc.h"
 #include "../pci.h"
 #include "phy.h"
+#include "../rtl8723com/phy_common.h"
 #include "fw.h"
+#include "../rtl8723com/fw_common.h"
 #include "reg.h"
 #include "def.h"
 
@@ -391,13 +393,13 @@ static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw,
        if (sw_dac_swing_on) {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
                         "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
-               rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000,
-                                        sw_dac_swing_lvl);
+               rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000,
+                                      sw_dac_swing_lvl);
                rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
        } else {
                RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
                         "[BTCoex], SwDacSwing Off!\n");
-               rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
+               rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
        }
 }
 
index c333dfd116b868a8aff3dcf780e04611860d7951..65c9e80e1f78ad23988bb962732396153bf1f137 100644 (file)
 #include "def.h"
 #include "phy.h"
 #include "dm.h"
+#include "../rtl8723com/dm_common.h"
 #include "fw.h"
+#include "../rtl8723com/fw_common.h"
 #include "led.h"
 #include "hw.h"
-#include "pwrseqcmd.h"
 #include "pwrseq.h"
 #include "btc.h"
 
@@ -206,14 +207,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
 
                for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_AC_PARAM,
-                                                     (u8 *) (&e_aci));
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+                                                     &e_aci);
                }
                break; }
        case HW_VAR_ACK_PREAMBLE:{
                u8 reg_tmp;
-               u8 short_preamble = (bool) (*(u8 *) val);
+               u8 short_preamble = (bool)*val;
                reg_tmp = (mac->cur_40_prime_sc) << 5;
                if (short_preamble)
                        reg_tmp |= 0x80;
@@ -224,7 +224,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                u8 min_spacing_to_set;
                u8 sec_min_space;
 
-               min_spacing_to_set = *((u8 *) val);
+               min_spacing_to_set = *val;
                if (min_spacing_to_set <= 7) {
                        sec_min_space = 0;
 
@@ -248,7 +248,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HW_VAR_SHORTGI_DENSITY:{
                u8 density_to_set;
 
-               density_to_set = *((u8 *) val);
+               density_to_set = *val;
                mac->min_space_cfg |= (density_to_set << 3);
 
                RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -272,7 +272,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                else
                        p_regtoset = regtoset_normal;
 
-               factor_toset = *((u8 *) val);
+               factor_toset = *val;
                if (factor_toset <= 3) {
                        factor_toset = (1 << (factor_toset + 2));
                        if (factor_toset > 0xf)
@@ -303,16 +303,15 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break; }
        case HW_VAR_AC_PARAM:{
-               u8 e_aci = *((u8 *) val);
-               rtl8723ae_dm_init_edca_turbo(hw);
+               u8 e_aci = *val;
+               rtl8723_dm_init_edca_turbo(hw);
 
-               if (rtlpci->acm_method != eAcmWay2_SW)
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_ACM_CTRL,
-                                                     (u8 *) (&e_aci));
+               if (rtlpci->acm_method != EACMWAY2_SW)
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
+                                                     &e_aci);
                break; }
        case HW_VAR_ACM_CTRL:{
-               u8 e_aci = *((u8 *) val);
+               u8 e_aci = *val;
                union aci_aifsn *p_aci_aifsn =
                    (union aci_aifsn *)(&(mac->ac[0].aifs));
                u8 acm = p_aci_aifsn->f.acm;
@@ -365,7 +364,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                rtlpci->receive_config = ((u32 *) (val))[0];
                break;
        case HW_VAR_RETRY_LIMIT:{
-               u8 retry_limit = ((u8 *) (val))[0];
+               u8 retry_limit = *val;
 
                rtl_write_word(rtlpriv, REG_RL,
                               retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -378,13 +377,13 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                rtlefuse->efuse_usedbytes = *((u16 *) val);
                break;
        case HW_VAR_EFUSE_USAGE:
-               rtlefuse->efuse_usedpercentage = *((u8 *) val);
+               rtlefuse->efuse_usedpercentage = *val;
                break;
        case HW_VAR_IO_CMD:
                rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
                break;
        case HW_VAR_WPA_CONFIG:
-               rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+               rtl_write_byte(rtlpriv, REG_SECCFG, *val);
                break;
        case HW_VAR_SET_RPWM:{
                u8 rpwm_val;
@@ -393,27 +392,25 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                udelay(1);
 
                if (rpwm_val & BIT(7)) {
-                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
-                                      (*(u8 *) val));
+                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
                } else {
-                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
-                                      ((*(u8 *) val) | BIT(7)));
+                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
                }
 
                break; }
        case HW_VAR_H2C_FW_PWRMODE:{
-               u8 psmode = (*(u8 *) val);
+               u8 psmode = *val;
 
                if (psmode != FW_PS_ACTIVE_MODE)
                        rtl8723ae_dm_rf_saving(hw, true);
 
-               rtl8723ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+               rtl8723ae_set_fw_pwrmode_cmd(hw, *val);
                break; }
        case HW_VAR_FW_PSMODE_STATUS:
                ppsc->fw_current_inpsmode = *((bool *) val);
                break;
        case HW_VAR_H2C_FW_JOINBSSRPT:{
-               u8 mstatus = (*(u8 *) val);
+               u8 mstatus = *val;
                u8 tmp_regcr, tmp_reg422;
                bool recover = false;
 
@@ -446,11 +443,11 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        rtl_write_byte(rtlpriv, REG_CR + 1,
                                       (tmp_regcr & ~(BIT(0))));
                }
-               rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+               rtl8723ae_set_fw_joinbss_report_cmd(hw, *val);
 
                break; }
        case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
-               rtl8723ae_set_p2p_ps_offload_cmd(hw, (*(u8 *)val));
+               rtl8723ae_set_p2p_ps_offload_cmd(hw, *val);
                break;
        case HW_VAR_AID:{
                u16 u2btmp;
@@ -460,7 +457,7 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                mac->assoc_id));
                break; }
        case HW_VAR_CORRECT_TSF:{
-               u8 btype_ibss = ((u8 *) (val))[0];
+               u8 btype_ibss = *val;
 
                if (btype_ibss == true)
                        _rtl8723ae_stop_tx_beacon(hw);
@@ -490,20 +487,18 @@ void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        (u8 *)(&fw_current_inps));
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_H2C_FW_PWRMODE,
-                                       (u8 *)(&ppsc->fwctrl_psmode));
+                                       &ppsc->fwctrl_psmode);
 
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                       HW_VAR_SET_RPWM,
-                                       (u8 *)(&rpwm_val));
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+                                                     &rpwm_val);
                } else {
                        rpwm_val = 0x0C;        /* RF on */
                        fw_pwrmode = FW_PS_ACTIVE_MODE;
                        fw_current_inps = false;
                        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
-                                       (u8 *)(&rpwm_val));
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                       HW_VAR_H2C_FW_PWRMODE,
-                                       (u8 *)(&fw_pwrmode));
+                                                     &rpwm_val);
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+                                                     &fw_pwrmode);
 
                        rtlpriv->cfg->ops->set_hw_reg(hw,
                                        HW_VAR_FW_PSMODE_STATUS,
@@ -880,23 +875,33 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
        bool rtstatus = true;
        int err;
        u8 tmp_u1b;
+       unsigned long flags;
 
        rtlpriv->rtlhal.being_init_adapter = true;
+       /* As this function can take a very long time (up to 350 ms)
+        * and can be called with irqs disabled, reenable the irqs
+        * to let the other devices continue being serviced.
+        *
+        * It is safe doing so since our own interrupts will only be enabled
+        * in a subsequent step.
+        */
+       local_save_flags(flags);
+       local_irq_enable();
+
        rtlpriv->intf_ops->disable_aspm(hw);
        rtstatus = _rtl8712e_init_mac(hw);
        if (rtstatus != true) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
                err = 1;
-               return err;
+               goto exit;
        }
 
-       err = rtl8723ae_download_fw(hw);
+       err = rtl8723_download_fw(hw, false);
        if (err) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
                         "Failed to download FW. Init HW without FW now..\n");
                err = 1;
-               rtlhal->fw_ready = false;
-               return err;
+               goto exit;
        } else {
                rtlhal->fw_ready = true;
        }
@@ -971,6 +976,8 @@ int rtl8723ae_hw_init(struct ieee80211_hw *hw)
                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
        }
        rtl8723ae_dm_init(hw);
+exit:
+       local_irq_restore(flags);
        rtlpriv->rtlhal.being_init_adapter = false;
        return err;
 }
@@ -1112,12 +1119,13 @@ static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
 void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       u32 reg_rcr = rtlpci->receive_config;
+       u32 reg_rcr;
 
        if (rtlpriv->psc.rfpwr_state != ERFON)
                return;
 
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
        if (check_bssid == true) {
                reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
@@ -1153,7 +1161,7 @@ void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
-       rtl8723ae_dm_init_edca_turbo(hw);
+       rtl8723_dm_init_edca_turbo(hw);
        switch (aci) {
        case AC1_BK:
                rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
@@ -1614,10 +1622,10 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
        rtl8723ae_read_bt_coexist_info_from_hwpg(hw,
                        rtlefuse->autoload_failflag, hwinfo);
 
-       rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+       rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
        rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
        rtlefuse->txpwr_fromeprom = true;
-       rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+       rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1655,7 +1663,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
                                    CHK_SVID_SMID(0x10EC, 0x9185))
                                        rtlhal->oem_id = RT_CID_TOSHIBA;
                                else if (rtlefuse->eeprom_svid == 0x1025)
-                                       rtlhal->oem_id = RT_CID_819x_Acer;
+                                       rtlhal->oem_id = RT_CID_819X_ACER;
                                else if (CHK_SVID_SMID(0x10EC, 0x6191) ||
                                         CHK_SVID_SMID(0x10EC, 0x6192) ||
                                         CHK_SVID_SMID(0x10EC, 0x6193) ||
@@ -1665,7 +1673,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
                                         CHK_SVID_SMID(0x10EC, 0x8191) ||
                                         CHK_SVID_SMID(0x10EC, 0x8192) ||
                                         CHK_SVID_SMID(0x10EC, 0x8193))
-                                       rtlhal->oem_id = RT_CID_819x_SAMSUNG;
+                                       rtlhal->oem_id = RT_CID_819X_SAMSUNG;
                                else if (CHK_SVID_SMID(0x10EC, 0x8195) ||
                                         CHK_SVID_SMID(0x10EC, 0x9195) ||
                                         CHK_SVID_SMID(0x10EC, 0x7194) ||
@@ -1673,24 +1681,24 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
                                         CHK_SVID_SMID(0x10EC, 0x8201) ||
                                         CHK_SVID_SMID(0x10EC, 0x8202) ||
                                         CHK_SVID_SMID(0x10EC, 0x9200))
-                                       rtlhal->oem_id = RT_CID_819x_Lenovo;
+                                       rtlhal->oem_id = RT_CID_819X_LENOVO;
                                else if (CHK_SVID_SMID(0x10EC, 0x8197) ||
                                         CHK_SVID_SMID(0x10EC, 0x9196))
-                                       rtlhal->oem_id = RT_CID_819x_CLEVO;
+                                       rtlhal->oem_id = RT_CID_819X_CLEVO;
                                else if (CHK_SVID_SMID(0x1028, 0x8194) ||
                                         CHK_SVID_SMID(0x1028, 0x8198) ||
                                         CHK_SVID_SMID(0x1028, 0x9197) ||
                                         CHK_SVID_SMID(0x1028, 0x9198))
-                                       rtlhal->oem_id = RT_CID_819x_DELL;
+                                       rtlhal->oem_id = RT_CID_819X_DELL;
                                else if (CHK_SVID_SMID(0x103C, 0x1629))
-                                       rtlhal->oem_id = RT_CID_819x_HP;
+                                       rtlhal->oem_id = RT_CID_819X_HP;
                                else if (CHK_SVID_SMID(0x1A32, 0x2315))
-                                       rtlhal->oem_id = RT_CID_819x_QMI;
+                                       rtlhal->oem_id = RT_CID_819X_QMI;
                                else if (CHK_SVID_SMID(0x10EC, 0x8203))
-                                       rtlhal->oem_id = RT_CID_819x_PRONETS;
+                                       rtlhal->oem_id = RT_CID_819X_PRONETS;
                                else if (CHK_SVID_SMID(0x1043, 0x84B5))
                                        rtlhal->oem_id =
-                                                RT_CID_819x_Edimax_ASUS;
+                                                RT_CID_819X_EDIMAX_ASUS;
                                else
                                        rtlhal->oem_id = RT_CID_DEFAULT;
                        } else if (rtlefuse->eeprom_did == 0x8178) {
@@ -1712,12 +1720,12 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
                                    CHK_SVID_SMID(0x10EC, 0x9185))
                                        rtlhal->oem_id = RT_CID_TOSHIBA;
                                else if (rtlefuse->eeprom_svid == 0x1025)
-                                       rtlhal->oem_id = RT_CID_819x_Acer;
+                                       rtlhal->oem_id = RT_CID_819X_ACER;
                                else if (CHK_SVID_SMID(0x10EC, 0x8186))
-                                       rtlhal->oem_id = RT_CID_819x_PRONETS;
+                                       rtlhal->oem_id = RT_CID_819X_PRONETS;
                                else if (CHK_SVID_SMID(0x1043, 0x8486))
                                        rtlhal->oem_id =
-                                                    RT_CID_819x_Edimax_ASUS;
+                                                    RT_CID_819X_EDIMAX_ASUS;
                                else
                                        rtlhal->oem_id = RT_CID_DEFAULT;
                        } else {
@@ -1731,7 +1739,7 @@ static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
                        rtlhal->oem_id = RT_CID_CCX;
                        break;
                case EEPROM_CID_QMI:
-                       rtlhal->oem_id = RT_CID_819x_QMI;
+                       rtlhal->oem_id = RT_CID_819X_QMI;
                        break;
                case EEPROM_CID_WHQL:
                                break;
@@ -2037,8 +2045,7 @@ void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw)
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        u16 sifs_timer;
 
-       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
-                                     (u8 *)&mac->slot_time);
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
        if (!mac->ht_enable)
                sifs_timer = 0x0a0a;
        else
index 5d318a85eda4047100eeec7c5297412c15d615fd..3ea78afdec734f6e31c9c1f5a63e8238e91ca318 100644 (file)
 #include "../wifi.h"
 #include "../pci.h"
 #include "../ps.h"
+#include "../core.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
 #include "rf.h"
 #include "dm.h"
 #include "table.h"
+#include "../rtl8723com/phy_common.h"
 
 /* static forward definitions */
 static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
@@ -43,72 +45,17 @@ static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
 static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
                                    enum radio_path rfpath,
                                    u32 offset, u32 data);
-static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
-                              enum radio_path rfpath, u32 offset);
-static void _phy_rf_serial_write(struct ieee80211_hw *hw,
-                                enum radio_path rfpath, u32 offset, u32 data);
-static u32 _phy_calculate_bit_shift(u32 bitmask);
 static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
 static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw);
 static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype);
 static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype);
-static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
-static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
-                                     u32 cmdtableidx, u32 cmdtablesz,
-                                     enum swchnlcmd_id cmdid,
-                                     u32 para1, u32 para2,
-                                     u32 msdelay);
 static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
                                      u8 *stage, u8 *step, u32 *delay);
 static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
                                enum wireless_mode wirelessmode,
                                long power_indbm);
-static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
-                                 enum wireless_mode wirelessmode, u8 txpwridx);
 static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw);
 
-u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
-                              u32 bitmask)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 returnvalue, originalvalue, bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
-                "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
-       originalvalue = rtl_read_dword(rtlpriv, regaddr);
-       bitshift = _phy_calculate_bit_shift(bitmask);
-       returnvalue = (originalvalue & bitmask) >> bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
-                "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr,
-                originalvalue);
-
-       return returnvalue;
-}
-
-void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
-                             u32 regaddr, u32 bitmask, u32 data)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 originalvalue, bitshift;
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
-                "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr,
-                bitmask, data);
-
-       if (bitmask != MASKDWORD) {
-               originalvalue = rtl_read_dword(rtlpriv, regaddr);
-               bitshift = _phy_calculate_bit_shift(bitmask);
-               data = ((originalvalue & (~bitmask)) | (data << bitshift));
-       }
-
-       rtl_write_dword(rtlpriv, regaddr, data);
-
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
-                "regaddr(%#x), bitmask(%#x), data(%#x)\n",
-                regaddr, bitmask, data);
-}
-
 u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
                               enum radio_path rfpath, u32 regaddr, u32 bitmask)
 {
@@ -124,11 +71,11 @@ u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
        spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
 
        if (rtlphy->rf_mode != RF_OP_BY_FW)
-               original_value = _phy_rf_serial_read(hw, rfpath, regaddr);
+               original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
        else
                original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr);
 
-       bitshift = _phy_calculate_bit_shift(bitmask);
+       bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
        readback_value = (original_value & bitmask) >> bitshift;
 
        spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
@@ -157,19 +104,19 @@ void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
 
        if (rtlphy->rf_mode != RF_OP_BY_FW) {
                if (bitmask != RFREG_OFFSET_MASK) {
-                       original_value = _phy_rf_serial_read(hw, rfpath,
-                                                            regaddr);
-                       bitshift = _phy_calculate_bit_shift(bitmask);
+                       original_value = rtl8723_phy_rf_serial_read(hw, rfpath,
+                                                                   regaddr);
+                       bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
                        data = ((original_value & (~bitmask)) |
                               (data << bitshift));
                }
 
-               _phy_rf_serial_write(hw, rfpath, regaddr, data);
+               rtl8723_phy_rf_serial_write(hw, rfpath, regaddr, data);
        } else {
                if (bitmask != RFREG_OFFSET_MASK) {
                        original_value = _phy_fw_rf_serial_read(hw, rfpath,
                                                                regaddr);
-                       bitshift = _phy_calculate_bit_shift(bitmask);
+                       bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
                        data = ((original_value & (~bitmask)) |
                               (data << bitshift));
                }
@@ -197,87 +144,6 @@ static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
        RT_ASSERT(false, "deprecated!\n");
 }
 
-static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
-                              enum radio_path rfpath, u32 offset)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-       u32 newoffset;
-       u32 tmplong, tmplong2;
-       u8 rfpi_enable = 0;
-       u32 retvalue;
-
-       offset &= 0x3f;
-       newoffset = offset;
-       if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
-               return 0xFFFFFFFF;
-       }
-       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
-       if (rfpath == RF90_PATH_A)
-               tmplong2 = tmplong;
-       else
-               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
-       tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
-           (newoffset << 23) | BLSSIREADEDGE;
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
-                     tmplong & (~BLSSIREADEDGE));
-       mdelay(1);
-       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
-       mdelay(1);
-       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
-                     tmplong | BLSSIREADEDGE);
-       mdelay(1);
-       if (rfpath == RF90_PATH_A)
-               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
-                                                BIT(8));
-       else if (rfpath == RF90_PATH_B)
-               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
-                                                BIT(8));
-       if (rfpi_enable)
-               retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
-                                        BLSSIREADBACKDATA);
-       else
-               retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
-                                        BLSSIREADBACKDATA);
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
-                rfpath, pphyreg->rf_rb, retvalue);
-       return retvalue;
-}
-
-static void _phy_rf_serial_write(struct ieee80211_hw *hw,
-                                enum radio_path rfpath, u32 offset, u32 data)
-{
-       u32 data_and_addr;
-       u32 newoffset;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
-       if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
-               return;
-       }
-       offset &= 0x3f;
-       newoffset = offset;
-       data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
-       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
-       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
-                rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-static u32 _phy_calculate_bit_shift(u32 bitmask)
-{
-       u32 i;
-
-       for (i = 0; i <= 31; i++) {
-               if (((bitmask >> i) & 0x1) == 1)
-                       break;
-       }
-       return i;
-}
-
 static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw)
 {
        rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
@@ -307,7 +173,7 @@ bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw)
        u8 tmpu1b;
        u8 reg_hwparafile = 1;
 
-       _phy_init_bb_rf_reg_def(hw);
+       rtl8723_phy_init_bb_rf_reg_def(hw);
 
        /* 1. 0x28[1] = 1 */
        tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL);
@@ -412,18 +278,7 @@ static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype)
        phy_regarray_table = RTL8723EPHY_REG_1TARRAY;
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_reg_arraylen; i = i + 2) {
-                       if (phy_regarray_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray_table[i]);
                        rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
                                      phy_regarray_table[i + 1]);
                        udelay(1);
@@ -585,18 +440,7 @@ static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype)
 
        if (configtype == BASEBAND_CONFIG_PHY_REG) {
                for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
-                       if (phy_regarray_table_pg[i] == 0xfe)
-                               mdelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfd)
-                               mdelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xfc)
-                               mdelay(1);
-                       else if (phy_regarray_table_pg[i] == 0xfb)
-                               udelay(50);
-                       else if (phy_regarray_table_pg[i] == 0xfa)
-                               udelay(5);
-                       else if (phy_regarray_table_pg[i] == 0xf9)
-                               udelay(1);
+                       rtl_addr_delay(phy_regarray_table_pg[i]);
 
                        _st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i],
                                              phy_regarray_table_pg[i + 1],
@@ -623,24 +467,9 @@ bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        switch (rfpath) {
        case RF90_PATH_A:
                for (i = 0; i < radioa_arraylen; i = i + 2) {
-                       if (radioa_array_table[i] == 0xfe)
-                               mdelay(50);
-                       else if (radioa_array_table[i] == 0xfd)
-                               mdelay(5);
-                       else if (radioa_array_table[i] == 0xfc)
-                               mdelay(1);
-                       else if (radioa_array_table[i] == 0xfb)
-                               udelay(50);
-                       else if (radioa_array_table[i] == 0xfa)
-                               udelay(5);
-                       else if (radioa_array_table[i] == 0xf9)
-                               udelay(1);
-                       else {
-                               rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
-                                             RFREG_OFFSET_MASK,
-                                             radioa_array_table[i + 1]);
-                               udelay(1);
-                       }
+                       rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+                                       RFREG_OFFSET_MASK,
+                                       radioa_array_table[i + 1]);
                }
                break;
        case RF90_PATH_B:
@@ -690,92 +519,6 @@ void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
                 ROFDM0_RXDETECTOR3, rtlphy->framesync);
 }
 
-static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-       rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-       rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
-                           RFPGA0_XA_LSSIPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
-                           RFPGA0_XB_LSSIPARAMETER;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-       rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
-       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
-       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
-       rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
-       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
-       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
-       rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
-       rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
-       rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
-       rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
-       rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
-       rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
-       rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
-       rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
-
-       rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
-       rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
-}
-
 void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -785,17 +528,17 @@ void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
        long txpwr_dbm;
 
        txpwr_level = rtlphy->cur_cck_txpwridx;
-       txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
+       txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
        txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
            rtlefuse->legacy_ht_txpowerdiff;
-       if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
-               txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+       if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
+               txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
                                                  txpwr_level);
        txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
-       if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
+       if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
            txpwr_dbm)
-               txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
-                                                 txpwr_level);
+               txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+                                                        txpwr_level);
        *powerlevel = txpwr_dbm;
 }
 
@@ -912,28 +655,6 @@ static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
        return txpwridx;
 }
 
-static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
-                                 enum wireless_mode wirelessmode, u8 txpwridx)
-{
-       long offset;
-       long pwrout_dbm;
-
-       switch (wirelessmode) {
-       case WIRELESS_MODE_B:
-               offset = -7;
-               break;
-       case WIRELESS_MODE_G:
-       case WIRELESS_MODE_N_24G:
-               offset = -8;
-               break;
-       default:
-               offset = -8;
-               break;
-       }
-       pwrout_dbm = txpwridx / 2 + offset;
-       return pwrout_dbm;
-}
-
 void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1117,26 +838,26 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
        u8 num_total_rfpath = rtlphy->num_total_rfpath;
 
        precommoncmdcnt = 0;
-       _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
-                                 MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
-                                 0, 0, 0);
-       _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
-                                 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+       rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
+                                        0, 0, 0);
+       rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
        postcommoncmdcnt = 0;
 
-       _phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
-                                 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
+       rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+                                        MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
        rfdependcmdcnt = 0;
 
        RT_ASSERT((channel >= 1 && channel <= 14),
                  "illegal channel for Zebra: %d\n", channel);
 
-       _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
-                                 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
+       rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
                                  RF_CHNLBW, channel, 10);
 
-       _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
-                                 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
+       rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
 
        do {
                switch (*stage) {
@@ -1204,29 +925,6 @@ static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
        return false;
 }
 
-static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
-                                     u32 cmdtableidx, u32 cmdtablesz,
-                                     enum swchnlcmd_id cmdid, u32 para1,
-                                     u32 para2, u32 msdelay)
-{
-       struct swchnlcmd *pcmd;
-
-       if (cmdtable == NULL) {
-               RT_ASSERT(false, "cmdtable cannot be NULL.\n");
-               return false;
-       }
-
-       if (cmdtableidx >= cmdtablesz)
-               return false;
-
-       pcmd = cmdtable + cmdtableidx;
-       pcmd->cmdid = cmdid;
-       pcmd->para1 = para1;
-       pcmd->para2 = para2;
-       pcmd->msdelay = msdelay;
-       return true;
-}
-
 static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
 {
        u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
@@ -1297,136 +995,6 @@ static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw)
        return result;
 }
 
-static void phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, bool iqk_ok,
-                                      long result[][8], u8 final_candidate,
-                                      bool btxonly)
-{
-       u32 oldval_0, x, tx0_a, reg;
-       long y, tx0_c;
-
-       if (final_candidate == 0xFF) {
-               return;
-       } else if (iqk_ok) {
-               oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
-                                         MASKDWORD) >> 22) & 0x3FF;
-               x = result[final_candidate][0];
-               if ((x & 0x00000200) != 0)
-                       x = x | 0xFFFFFC00;
-               tx0_a = (x * oldval_0) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
-                             ((x * oldval_0 >> 7) & 0x1));
-               y = result[final_candidate][1];
-               if ((y & 0x00000200) != 0)
-                       y = y | 0xFFFFFC00;
-               tx0_c = (y * oldval_0) >> 8;
-               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
-                             ((tx0_c & 0x3C0) >> 6));
-               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
-                             (tx0_c & 0x3F));
-               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
-                             ((y * oldval_0 >> 7) & 0x1));
-               if (btxonly)
-                       return;
-               reg = result[final_candidate][2];
-               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
-               reg = result[final_candidate][3] & 0x3F;
-               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
-               reg = (result[final_candidate][3] >> 6) & 0xF;
-               rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
-       }
-}
-
-static void phy_save_adda_regs(struct ieee80211_hw *hw,
-                                              u32 *addareg, u32 *addabackup,
-                                              u32 registernum)
-{
-       u32 i;
-
-       for (i = 0; i < registernum; i++)
-               addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
-}
-
-static void phy_save_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
-                             u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
-               macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
-       macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
-static void phy_reload_adda_regs(struct ieee80211_hw *hw, u32 *addareg,
-                                u32 *addabackup, u32 regiesternum)
-{
-       u32 i;
-
-       for (i = 0; i < regiesternum; i++)
-               rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
-}
-
-static void phy_reload_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
-                               u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i;
-
-       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
-               rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
-       rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
-}
-
-static void _rtl8723ae_phy_path_adda_on(struct ieee80211_hw *hw,
-                                       u32 *addareg, bool is_patha_on,
-                                       bool is2t)
-{
-       u32 pathOn;
-       u32 i;
-
-       pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
-       if (false == is2t) {
-               pathOn = 0x0bdb25a0;
-               rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
-       } else {
-               rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
-       }
-
-       for (i = 1; i < IQK_ADDA_REG_NUM; i++)
-               rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
-}
-
-static void _rtl8723ae_phy_mac_setting_calibration(struct ieee80211_hw *hw,
-                                                  u32 *macreg, u32 *macbackup)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 i = 0;
-
-       rtl_write_byte(rtlpriv, macreg[i], 0x3F);
-
-       for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
-               rtl_write_byte(rtlpriv, macreg[i],
-                              (u8) (macbackup[i] & (~BIT(3))));
-       rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
-static void _rtl8723ae_phy_path_a_standby(struct ieee80211_hw *hw)
-{
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
-       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
-       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
-}
-
-static void _rtl8723ae_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
-{
-       u32 mode;
-
-       mode = pi_mode ? 0x01000100 : 0x01000000;
-       rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
-       rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
-}
-
 static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8],
                                u8 c1, u8 c2)
 {
@@ -1498,10 +1066,12 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
        const u32 retrycount = 2;
 
        if (t == 0) {
-               phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
-               phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+               rtl8723_save_adda_registers(hw, adda_reg, rtlphy->adda_backup,
+                                           16);
+               rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
+                                              rtlphy->iqk_mac_backup);
        }
-       _rtl8723ae_phy_path_adda_on(hw, adda_reg, true, is2t);
+       rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
        if (t == 0) {
                rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
                                                 RFPGA0_XA_HSSIPARAMETER1,
@@ -1509,7 +1079,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
        }
 
        if (!rtlphy->rfpi_enable)
-               _rtl8723ae_phy_pi_mode_switch(hw, true);
+               rtl8723_phy_pi_mode_switch(hw, true);
        if (t == 0) {
                rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
                rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
@@ -1522,7 +1092,7 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
                rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
                rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
        }
-       _rtl8723ae_phy_mac_setting_calibration(hw, iqk_mac_reg,
+       rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
                                            rtlphy->iqk_mac_backup);
        rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
        if (is2t)
@@ -1552,8 +1122,8 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
        }
 
        if (is2t) {
-               _rtl8723ae_phy_path_a_standby(hw);
-               _rtl8723ae_phy_path_adda_on(hw, adda_reg, false, is2t);
+               rtl8723_phy_path_a_standby(hw);
+               rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
                for (i = 0; i < retrycount; i++) {
                        pathb_ok = _rtl8723ae_phy_path_b_iqk(hw);
                        if (pathb_ok == 0x03) {
@@ -1588,9 +1158,11 @@ static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
                rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
        if (t != 0) {
                if (!rtlphy->rfpi_enable)
-                       _rtl8723ae_phy_pi_mode_switch(hw, false);
-               phy_reload_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
-               phy_reload_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
+                       rtl8723_phy_pi_mode_switch(hw, false);
+               rtl8723_phy_reload_adda_registers(hw, adda_reg,
+                                                 rtlphy->adda_backup, 16);
+               rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
+                                                rtlphy->iqk_mac_backup);
        }
 }
 
@@ -1691,7 +1263,8 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
        };
 
        if (recovery) {
-               phy_reload_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+               rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+                                                 rtlphy->iqk_bb_backup, 10);
                return;
        }
        if (start_conttx || singletone)
@@ -1756,9 +1329,10 @@ void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
                rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
        }
        if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
-               phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
-                                          final_candidate, (reg_ea4 == 0));
-       phy_save_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
+               rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+                                                  final_candidate,
+                                                  (reg_ea4 == 0));
+       rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
 }
 
 void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw)
index 007ebdbbe108623f88c163e77d3e548eabc00eec..cd43139ed332abc85f656e8c766f5b680e5adeb1 100644 (file)
 
 #define RTL92C_MAX_PATH_NUM                    2
 
-enum swchnlcmd_id {
-       CMDID_END,
-       CMDID_SET_TXPOWEROWER_LEVEL,
-       CMDID_BBREGWRITE10,
-       CMDID_WRITEPORT_ULONG,
-       CMDID_WRITEPORT_USHORT,
-       CMDID_WRITEPORT_UCHAR,
-       CMDID_RF_WRITEREG,
-};
-
-struct swchnlcmd {
-       enum swchnlcmd_id cmdid;
-       u32 para1;
-       u32 para2;
-       u32 msdelay;
-};
-
 enum hw90_block_e {
        HW90_BLOCK_MAC = 0,
        HW90_BLOCK_PHY0 = 1,
@@ -183,10 +166,6 @@ struct tx_power_struct {
        u32 mcs_original_offset[4][16];
 };
 
-u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
-                              u32 regaddr, u32 bitmask);
-void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
-                             u32 regaddr, u32 bitmask, u32 data);
 u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
                               enum radio_path rfpath, u32 regaddr,
                               u32 bitmask);
index 7a46f9fdf558b08b5c60f2ddc758b2d9f7adfc4e..a418acb4d0ca1e56be786cc4122e4d8024dca33c 100644 (file)
@@ -30,7 +30,6 @@
 #ifndef __RTL8723E_PWRSEQ_H__
 #define __RTL8723E_PWRSEQ_H__
 
-#include "pwrseqcmd.h"
 /*
        Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
        There are 6 HW Power States:
index 199da366c6da1a6b5a00dd03a14c8179f15026e9..64376b38708bd2e216f4b23a18da87247b4059b9 100644 (file)
 #define        BWORD1                                  0xc
 #define        BWORD                                   0xf
 
-#define        MASKBYTE0                               0xff
-#define        MASKBYTE1                               0xff00
-#define        MASKBYTE2                               0xff0000
-#define        MASKBYTE3                               0xff000000
-#define        MASKHWORD                               0xffff0000
-#define        MASKLWORD                               0x0000ffff
-#define        MASKDWORD                               0xffffffff
-#define        MASK12BITS                              0xfff
-#define        MASKH4BITS                              0xf0000000
-#define MASKOFDM_D                             0xffc00000
-#define        MASKCCK                                 0x3f3f3f3f
-
-#define        MASK4BITS                               0x0f
-#define        MASK20BITS                              0xfffff
-#define RFREG_OFFSET_MASK                      0xfffff
-
 #define        BENABLE                                 0x1
 #define        BDISABLE                                0x0
 
index 62b204faf773f74b1137aa2ada315375357268e8..1087a3bd07fa2dbd51f9983540ac44351374c58d 100644 (file)
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
+#include "../rtl8723com/phy_common.h"
 #include "dm.h"
 #include "hw.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
 #include "sw.h"
 #include "trx.h"
 #include "led.h"
@@ -193,6 +196,11 @@ void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw)
        }
 }
 
+static bool is_fw_header(struct rtl92c_firmware_header *hdr)
+{
+       return (hdr->signature & 0xfff0) == 0x2300;
+}
+
 static struct rtl_hal_ops rtl8723ae_hal_ops = {
        .init_sw_vars = rtl8723ae_init_sw_vars,
        .deinit_sw_vars = rtl8723ae_deinit_sw_vars,
@@ -231,13 +239,14 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
        .set_key = rtl8723ae_set_key,
        .init_sw_leds = rtl8723ae_init_sw_leds,
        .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
-       .get_bbreg = rtl8723ae_phy_query_bb_reg,
-       .set_bbreg = rtl8723ae_phy_set_bb_reg,
+       .get_bbreg = rtl8723_phy_query_bb_reg,
+       .set_bbreg = rtl8723_phy_set_bb_reg,
        .get_rfreg = rtl8723ae_phy_query_rf_reg,
        .set_rfreg = rtl8723ae_phy_set_rf_reg,
        .c2h_command_handle = rtl_8723e_c2h_command_handle,
        .bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify,
        .bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps,
+       .is_fw_header = is_fw_header,
 };
 
 static struct rtl_mod_params rtl8723ae_mod_params = {
index 50b7be3f3a605673756e543e456324993c66980d..10b7577b6ae534906102ec29edff266dbdee12d4 100644 (file)
@@ -334,7 +334,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
                        /* during testing, hdr could be NULL here */
                        return false;
                }
-               if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+               if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
                        (ieee80211_has_protected(hdr->frame_control)))
                        rx_status->flag &= ~RX_FLAG_DECRYPTED;
                else
@@ -365,7 +365,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
 
 void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
                            struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                           struct ieee80211_tx_info *info,
+                           u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                            struct ieee80211_sta *sta,
                            struct sk_buff *skb, u8 hw_queue,
                            struct rtl_tcb_desc *ptcdesc)
@@ -375,7 +375,7 @@ void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        bool defaultadapter = true;
-       u8 *pdesc = (u8 *) pdesc_tx;
+       u8 *pdesc = pdesc_tx;
        u16 seq_number;
        __le16 fc = hdr->frame_control;
        u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -577,7 +577,7 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
 
        SET_TX_DESC_OWN(pdesc, 1);
 
-       SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+       SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
 
        SET_TX_DESC_FIRST_SEG(pdesc, 1);
        SET_TX_DESC_LAST_SEG(pdesc, 1);
@@ -597,7 +597,8 @@ void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
                      pdesc, TX_DESC_SIZE);
 }
 
-void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
+void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                       u8 desc_name, u8 *val)
 {
        if (istx == true) {
                switch (desc_name) {
index ad05b54bc0f1b50d3f03d5b39cb4f6eff351717d..4380b7d3a91ac39c05e192816314bfaa4d4a6bc8 100644 (file)
@@ -521,12 +521,6 @@ do {                                                       \
                memset(__pdesc, 0, _size);              \
 } while (0)
 
-#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs)             \
-       ((rxmcs) == DESC92_RATE1M ||                    \
-        (rxmcs) == DESC92_RATE2M ||                    \
-        (rxmcs) == DESC92_RATE5_5M ||                  \
-        (rxmcs) == DESC92_RATE11M)
-
 struct rx_fwinfo_8723e {
        u8 gain_trsw[4];
        u8 pwdb_all;
@@ -706,8 +700,8 @@ struct rx_desc_8723e {
 } __packed;
 
 void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
-                           struct ieee80211_hdr *hdr, u8 *pdesc_tx,
-                           struct ieee80211_tx_info *info,
+                           struct ieee80211_hdr *hdr, u8 *pdesc,
+                           u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
                            struct ieee80211_sta *sta,
                            struct sk_buff *skb, u8 hw_queue,
                            struct rtl_tcb_desc *ptcb_desc);
@@ -715,7 +709,8 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
                             struct rtl_stats *status,
                             struct ieee80211_rx_status *rx_status,
                             u8 *pdesc, struct sk_buff *skb);
-void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+void rtl8723ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                       u8 desc_name, u8 *val);
 u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
 void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
 void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/Makefile b/drivers/net/wireless/rtlwifi/rtl8723be/Makefile
new file mode 100644 (file)
index 0000000..59e416a
--- /dev/null
@@ -0,0 +1,19 @@
+obj-m := rtl8723be.o
+
+
+rtl8723be-objs :=              \
+               dm.o            \
+               fw.o            \
+               hw.o            \
+               led.o           \
+               phy.o           \
+               pwrseq.o        \
+               rf.o            \
+               sw.o            \
+               table.o         \
+               trx.o           \
+
+
+obj-$(CONFIG_RTL8723BE) += rtl8723be.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/rtlwifi/rtl8723be/def.h
new file mode 100644 (file)
index 0000000..3c30b74
--- /dev/null
@@ -0,0 +1,248 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_DEF_H__
+#define __RTL8723BE_DEF_H__
+
+#define HAL_RETRY_LIMIT_INFRA                          48
+#define HAL_RETRY_LIMIT_AP_ADHOC                       7
+
+#define RESET_DELAY_8185                               20
+
+#define RT_IBSS_INT_MASKS      (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
+#define RT_AC_INT_MASKS                (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
+
+#define NUM_OF_FIRMWARE_QUEUE                  10
+#define NUM_OF_PAGES_IN_FW                     0x100
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK             0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE             0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI             0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO             0x07
+#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA           0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_CMD            0x0
+#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT           0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH           0x02
+#define NUM_OF_PAGE_IN_FW_QUEUE_BCN            0x2
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB            0xA1
+
+#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM         0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM         0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM         0x048
+#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM         0x026
+#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM                0x00
+
+#define MAX_LINES_HWCONFIG_TXT                 1000
+#define MAX_BYTES_LINE_HWCONFIG_TXT            256
+
+#define SW_THREE_WIRE                          0
+#define HW_THREE_WIRE                          2
+
+#define BT_DEMO_BOARD                          0
+#define BT_QA_BOARD                            1
+#define BT_FPGA                                        2
+
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE                0
+#define HAL_PRIME_CHNL_OFFSET_LOWER            1
+#define HAL_PRIME_CHNL_OFFSET_UPPER            2
+
+#define MAX_H2C_QUEUE_NUM                      10
+
+#define RX_MPDU_QUEUE                          0
+#define RX_CMD_QUEUE                           1
+#define RX_MAX_QUEUE                           2
+#define AC2QUEUEID(_AC)                                (_AC)
+
+#define        C2H_RX_CMD_HDR_LEN                      8
+#define        GET_C2H_CMD_CMD_LEN(__prxhdr)           \
+       LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
+#define        GET_C2H_CMD_ELEMENT_ID(__prxhdr)        \
+       LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
+#define        GET_C2H_CMD_CMD_SEQ(__prxhdr)           \
+       LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
+#define        GET_C2H_CMD_CONTINUE(__prxhdr)          \
+       LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
+#define        GET_C2H_CMD_CONTENT(__prxhdr)           \
+       ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
+
+#define        GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr)    \
+       LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
+#define        GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr)       \
+       LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
+#define        GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr)   \
+       LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
+#define        GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr)    \
+       LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
+#define        GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr)     \
+       LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
+#define        GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
+       LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
+#define        GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr)       \
+       LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
+#define        GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr)      \
+       LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
+#define        GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr)       \
+       LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
+
+#define CHIP_BONDING_IDENTIFIER(_value)        (((_value)>>22)&0x3)
+#define        CHIP_BONDING_92C_1T2R           0x1
+
+#define CHIP_8723                      BIT(0)
+#define CHIP_8723B                     (BIT(1) | BIT(2))
+#define NORMAL_CHIP                    BIT(3)
+#define RF_TYPE_1T1R                   (~(BIT(4) | BIT(5) | BIT(6)))
+#define RF_TYPE_1T2R                   BIT(4)
+#define RF_TYPE_2T2R                   BIT(5)
+#define CHIP_VENDOR_UMC                        BIT(7)
+#define B_CUT_VERSION                  BIT(12)
+#define C_CUT_VERSION                  BIT(13)
+#define D_CUT_VERSION                  ((BIT(12) | BIT(13)))
+#define E_CUT_VERSION                  BIT(14)
+#define        RF_RL_ID                        (BIT(31) | BIT(30) | BIT(29) | BIT(28))
+
+/* MASK */
+#define IC_TYPE_MASK                   (BIT(0) | BIT(1) | BIT(2))
+#define CHIP_TYPE_MASK                 BIT(3)
+#define RF_TYPE_MASK                   (BIT(4) | BIT(5) | BIT(6))
+#define MANUFACTUER_MASK               BIT(7)
+#define ROM_VERSION_MASK               (BIT(11) | BIT(10) | BIT(9) | BIT(8))
+#define CUT_VERSION_MASK               (BIT(15) | BIT(14) | BIT(13) | BIT(12))
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version)      ((version) & IC_TYPE_MASK)
+#define GET_CVID_CHIP_TYPE(version)    ((version) & CHIP_TYPE_MASK)
+#define GET_CVID_RF_TYPE(version)      ((version) & RF_TYPE_MASK)
+#define GET_CVID_MANUFACTUER(version)  ((version) & MANUFACTUER_MASK)
+#define GET_CVID_ROM_VERSION(version)  ((version) & ROM_VERSION_MASK)
+#define GET_CVID_CUT_VERSION(version)  ((version) & CUT_VERSION_MASK)
+
+#define IS_92C_SERIAL(version)   ((IS_81XXC(version) && IS_2T2R(version)) ?\
+                                                               true : false)
+#define IS_81XXC(version)      ((GET_CVID_IC_TYPE(version) == 0) ?\
+                                                       true : false)
+#define IS_8723_SERIES(version)        ((GET_CVID_IC_TYPE(version) == CHIP_8723) ?\
+                                                       true : false)
+#define IS_1T1R(version)       ((GET_CVID_RF_TYPE(version)) ? false : true)
+#define IS_1T2R(version)       ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R)\
+                                                       ? true : false)
+#define IS_2T2R(version)       ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R)\
+                                                       ? true : false)
+enum rf_optype {
+       RF_OP_BY_SW_3WIRE = 0,
+       RF_OP_BY_FW,
+       RF_OP_MAX
+};
+
+enum rf_power_state {
+       RF_ON,
+       RF_OFF,
+       RF_SLEEP,
+       RF_SHUT_DOWN,
+};
+
+enum power_save_mode {
+       POWER_SAVE_MODE_ACTIVE,
+       POWER_SAVE_MODE_SAVE,
+};
+
+enum power_polocy_config {
+       POWERCFG_MAX_POWER_SAVINGS,
+       POWERCFG_GLOBAL_POWER_SAVINGS,
+       POWERCFG_LOCAL_POWER_SAVINGS,
+       POWERCFG_LENOVO,
+};
+
+enum interface_select_pci {
+       INTF_SEL1_MINICARD = 0,
+       INTF_SEL0_PCIE = 1,
+       INTF_SEL2_RSV = 2,
+       INTF_SEL3_RSV = 3,
+};
+
+enum rtl_desc_qsel {
+       QSLT_BK = 0x2,
+       QSLT_BE = 0x0,
+       QSLT_VI = 0x5,
+       QSLT_VO = 0x7,
+       QSLT_BEACON = 0x10,
+       QSLT_HIGH = 0x11,
+       QSLT_MGNT = 0x12,
+       QSLT_CMD = 0x13,
+};
+
+enum rtl_desc8723e_rate {
+       DESC92C_RATE1M = 0x00,
+       DESC92C_RATE2M = 0x01,
+       DESC92C_RATE5_5M = 0x02,
+       DESC92C_RATE11M = 0x03,
+
+       DESC92C_RATE6M = 0x04,
+       DESC92C_RATE9M = 0x05,
+       DESC92C_RATE12M = 0x06,
+       DESC92C_RATE18M = 0x07,
+       DESC92C_RATE24M = 0x08,
+       DESC92C_RATE36M = 0x09,
+       DESC92C_RATE48M = 0x0a,
+       DESC92C_RATE54M = 0x0b,
+
+       DESC92C_RATEMCS0 = 0x0c,
+       DESC92C_RATEMCS1 = 0x0d,
+       DESC92C_RATEMCS2 = 0x0e,
+       DESC92C_RATEMCS3 = 0x0f,
+       DESC92C_RATEMCS4 = 0x10,
+       DESC92C_RATEMCS5 = 0x11,
+       DESC92C_RATEMCS6 = 0x12,
+       DESC92C_RATEMCS7 = 0x13,
+       DESC92C_RATEMCS8 = 0x14,
+       DESC92C_RATEMCS9 = 0x15,
+       DESC92C_RATEMCS10 = 0x16,
+       DESC92C_RATEMCS11 = 0x17,
+       DESC92C_RATEMCS12 = 0x18,
+       DESC92C_RATEMCS13 = 0x19,
+       DESC92C_RATEMCS14 = 0x1a,
+       DESC92C_RATEMCS15 = 0x1b,
+       DESC92C_RATEMCS15_SG = 0x1c,
+       DESC92C_RATEMCS32 = 0x20,
+};
+
+enum rx_packet_type {
+       NORMAL_RX,
+       TX_REPORT1,
+       TX_REPORT2,
+       HIS_REPORT,
+};
+
+struct phy_sts_cck_8723e_t {
+       u8 adc_pwdb_X[4];
+       u8 sq_rpt;
+       u8 cck_agc_rpt;
+};
+
+struct h2c_cmd_8723e {
+       u8 element_id;
+       u32 cmd_len;
+       u8 *p_cmdbuffer;
+};
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/rtlwifi/rtl8723be/dm.c
new file mode 100644 (file)
index 0000000..13d53a1
--- /dev/null
@@ -0,0 +1,1325 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "../rtl8723com/dm_common.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "trx.h"
+#include "../btcoexist/rtl_btc.h"
+
+static const u32 ofdmswing_table[] = {
+       0x0b40002d, /* 0,  -15.0dB */
+       0x0c000030, /* 1,  -14.5dB */
+       0x0cc00033, /* 2,  -14.0dB */
+       0x0d800036, /* 3,  -13.5dB */
+       0x0e400039, /* 4,  -13.0dB */
+       0x0f00003c, /* 5,  -12.5dB */
+       0x10000040, /* 6,  -12.0dB */
+       0x11000044, /* 7,  -11.5dB */
+       0x12000048, /* 8,  -11.0dB */
+       0x1300004c, /* 9,  -10.5dB */
+       0x14400051, /* 10, -10.0dB */
+       0x15800056, /* 11, -9.5dB */
+       0x16c0005b, /* 12, -9.0dB */
+       0x18000060, /* 13, -8.5dB */
+       0x19800066, /* 14, -8.0dB */
+       0x1b00006c, /* 15, -7.5dB */
+       0x1c800072, /* 16, -7.0dB */
+       0x1e400079, /* 17, -6.5dB */
+       0x20000080, /* 18, -6.0dB */
+       0x22000088, /* 19, -5.5dB */
+       0x24000090, /* 20, -5.0dB */
+       0x26000098, /* 21, -4.5dB */
+       0x288000a2, /* 22, -4.0dB */
+       0x2ac000ab, /* 23, -3.5dB */
+       0x2d4000b5, /* 24, -3.0dB */
+       0x300000c0, /* 25, -2.5dB */
+       0x32c000cb, /* 26, -2.0dB */
+       0x35c000d7, /* 27, -1.5dB */
+       0x390000e4, /* 28, -1.0dB */
+       0x3c8000f2, /* 29, -0.5dB */
+       0x40000100, /* 30, +0dB */
+       0x43c0010f, /* 31, +0.5dB */
+       0x47c0011f, /* 32, +1.0dB */
+       0x4c000130, /* 33, +1.5dB */
+       0x50800142, /* 34, +2.0dB */
+       0x55400155, /* 35, +2.5dB */
+       0x5a400169, /* 36, +3.0dB */
+       0x5fc0017f, /* 37, +3.5dB */
+       0x65400195, /* 38, +4.0dB */
+       0x6b8001ae, /* 39, +4.5dB */
+       0x71c001c7, /* 40, +5.0dB */
+       0x788001e2, /* 41, +5.5dB */
+       0x7f8001fe  /* 42, +6.0dB */
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+       {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}, /*  0, -16.0dB */
+       {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /*  1, -15.5dB */
+       {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /*  2, -15.0dB */
+       {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /*  3, -14.5dB */
+       {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /*  4, -14.0dB */
+       {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /*  5, -13.5dB */
+       {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /*  6, -13.0dB */
+       {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /*  7, -12.5dB */
+       {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /*  8, -12.0dB */
+       {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /*  9, -11.5dB */
+       {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 10, -11.0dB */
+       {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 11, -10.5dB */
+       {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 12, -10.0dB */
+       {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 13, -9.5dB */
+       {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 14, -9.0dB */
+       {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 15, -8.5dB */
+       {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+       {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 17, -7.5dB */
+       {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 18, -7.0dB */
+       {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 19, -6.5dB */
+       {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 20, -6.0dB */
+       {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 21, -5.5dB */
+       {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 22, -5.0dB */
+       {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 23, -4.5dB */
+       {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 24, -4.0dB */
+       {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 25, -3.5dB */
+       {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 26, -3.0dB */
+       {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 27, -2.5dB */
+       {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 28, -2.0dB */
+       {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 29, -1.5dB */
+       {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 30, -1.0dB */
+       {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 31, -0.5dB */
+       {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}  /* 32, +0dB */
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+       {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}, /*  0, -16.0dB */
+       {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /*  1, -15.5dB */
+       {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /*  2, -15.0dB */
+       {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /*  3, -14.5dB */
+       {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /*  4, -14.0dB */
+       {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /*  5, -13.5dB */
+       {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /*  6, -13.0dB */
+       {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /*  7, -12.5dB */
+       {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /*  8, -12.0dB */
+       {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /*  9, -11.5dB */
+       {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 10, -11.0dB */
+       {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 11, -10.5dB */
+       {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 12, -10.0dB */
+       {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 13, -9.5dB */
+       {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 14, -9.0dB */
+       {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 15, -8.5dB */
+       {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+       {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 17, -7.5dB */
+       {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 18, -7.0dB */
+       {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 19, -6.5dB */
+       {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 20, -6.0dB */
+       {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 21, -5.5dB */
+       {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 22, -5.0dB */
+       {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 23, -4.5dB */
+       {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 24, -4.0dB */
+       {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 25, -3.5dB */
+       {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 26, -3.0dB */
+       {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 27, -2.5dB */
+       {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 28, -2.0dB */
+       {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 29, -1.5dB */
+       {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 30, -1.0dB */
+       {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 31, -0.5dB */
+       {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}  /* 32, +0dB */
+};
+
+static const u32 edca_setting_dl[PEER_MAX] = {
+       0xa44f,         /* 0 UNKNOWN */
+       0x5ea44f,       /* 1 REALTEK_90 */
+       0x5e4322,       /* 2 REALTEK_92SE */
+       0x5ea42b,       /* 3 BROAD */
+       0xa44f,         /* 4 RAL */
+       0xa630,         /* 5 ATH */
+       0x5ea630,       /* 6 CISCO */
+       0x5ea42b,       /* 7 MARVELL */
+};
+
+static const u32 edca_setting_ul[PEER_MAX] = {
+       0x5e4322,       /* 0 UNKNOWN */
+       0xa44f,         /* 1 REALTEK_90 */
+       0x5ea44f,       /* 2 REALTEK_92SE */
+       0x5ea32b,       /* 3 BROAD */
+       0x5ea422,       /* 4 RAL */
+       0x5ea322,       /* 5 ATH */
+       0x3ea430,       /* 6 CISCO */
+       0x5ea44f,       /* 7 MARV */
+};
+
+void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
+                                      u8 *pdirection, u32 *poutwrite_val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+       u8 pwr_val = 0;
+       u8 ofdm_base = rtlpriv->dm.swing_idx_ofdm_base[RF90_PATH_A];
+       u8 ofdm_val = rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A];
+       u8 cck_base = rtldm->swing_idx_cck_base;
+       u8 cck_val = rtldm->swing_idx_cck;
+
+       if (type == 0) {
+               if (ofdm_val <= ofdm_base) {
+                       *pdirection = 1;
+                       pwr_val = ofdm_base - ofdm_val;
+               } else {
+                       *pdirection = 2;
+                       pwr_val = ofdm_val - ofdm_base;
+               }
+       } else if (type == 1) {
+               if (cck_val <= cck_base) {
+                       *pdirection = 1;
+                       pwr_val = cck_base - cck_val;
+               } else {
+                       *pdirection = 2;
+                       pwr_val = cck_val - cck_base;
+               }
+       }
+
+       if (pwr_val >= TXPWRTRACK_MAX_IDX && (*pdirection == 1))
+               pwr_val = TXPWRTRACK_MAX_IDX;
+
+       *poutwrite_val = pwr_val | (pwr_val << 8) |
+                       (pwr_val << 16) | (pwr_val << 24);
+}
+
+static void rtl8723be_dm_diginit(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
+
+       dm_digtable->dig_enable_flag = true;
+       dm_digtable->cur_igvalue = rtl_get_bbreg(hw,
+               ROFDM0_XAAGCCORE1, 0x7f);
+       dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
+       dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
+       dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
+       dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
+       dm_digtable->rx_gain_max = DM_DIG_MAX;
+       dm_digtable->rx_gain_min = DM_DIG_MIN;
+       dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
+       dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
+       dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
+       dm_digtable->pre_cck_cca_thres = 0xff;
+       dm_digtable->cur_cck_cca_thres = 0x83;
+       dm_digtable->forbidden_igi = DM_DIG_MIN;
+       dm_digtable->large_fa_hit = 0;
+       dm_digtable->recover_cnt = 0;
+       dm_digtable->dig_min_0 = DM_DIG_MIN;
+       dm_digtable->dig_min_1 = DM_DIG_MIN;
+       dm_digtable->media_connect_0 = false;
+       dm_digtable->media_connect_1 = false;
+       rtlpriv->dm.dm_initialgain_enable = true;
+       dm_digtable->bt30_cur_igi = 0x32;
+}
+
+void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rate_adaptive *ra = &(rtlpriv->ra);
+
+       ra->ratr_state = DM_RATR_STA_INIT;
+       ra->pre_ratr_state = DM_RATR_STA_INIT;
+
+       if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+               rtlpriv->dm.useramask = true;
+       else
+               rtlpriv->dm.useramask = false;
+
+       ra->high_rssi_thresh_for_ra = 50;
+       ra->low_rssi_thresh_for_ra40m = 20;
+}
+
+static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.txpower_tracking = true;
+       rtlpriv->dm.txpower_track_control = true;
+       rtlpriv->dm.thermalvalue = 0;
+
+       rtlpriv->dm.ofdm_index[0] = 30;
+       rtlpriv->dm.cck_index = 20;
+
+       rtlpriv->dm.swing_idx_cck_base = rtlpriv->dm.cck_index;
+
+       rtlpriv->dm.swing_idx_ofdm_base[0] = rtlpriv->dm.ofdm_index[0];
+       rtlpriv->dm.delta_power_index[RF90_PATH_A] = 0;
+       rtlpriv->dm.delta_power_index_last[RF90_PATH_A] = 0;
+       rtlpriv->dm.power_index_offset[RF90_PATH_A] = 0;
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                "  rtlpriv->dm.txpower_tracking = %d\n",
+                rtlpriv->dm.txpower_tracking);
+}
+
+static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.crystal_cap = rtlpriv->efuse.crystalcap;
+       rtlpriv->dm.atc_status = rtl_get_bbreg(hw, ROFDM1_CFOTRACKING, 0x800);
+       rtlpriv->dm.cfo_threshold = CFO_THRESHOLD_XTAL;
+}
+
+void rtl8723be_dm_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+       rtl8723be_dm_diginit(hw);
+       rtl8723be_dm_init_rate_adaptive_mask(hw);
+       rtl8723_dm_init_edca_turbo(hw);
+       rtl8723_dm_init_dynamic_bb_powersaving(hw);
+       rtl8723_dm_init_dynamic_txpower(hw);
+       rtl8723be_dm_init_txpower_tracking(hw);
+       rtl8723be_dm_init_dynamic_atc_switch(hw);
+}
+
+static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct dig_t *rtl_dm_dig = &(rtlpriv->dm_digtable);
+       struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+       /* Determine the minimum RSSI  */
+       if ((mac->link_state < MAC80211_LINKED) &&
+           (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
+               rtl_dm_dig->min_undec_pwdb_for_dm = 0;
+               RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+                        "Not connected to any\n");
+       }
+       if (mac->link_state >= MAC80211_LINKED) {
+               if (mac->opmode == NL80211_IFTYPE_AP ||
+                   mac->opmode == NL80211_IFTYPE_ADHOC) {
+                       rtl_dm_dig->min_undec_pwdb_for_dm =
+                           rtlpriv->dm.entry_min_undec_sm_pwdb;
+                       RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+                                "AP Client PWDB = 0x%lx\n",
+                                rtlpriv->dm.entry_min_undec_sm_pwdb);
+               } else {
+                       rtl_dm_dig->min_undec_pwdb_for_dm =
+                           rtlpriv->dm.undec_sm_pwdb;
+                       RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+                                "STA Default Port PWDB = 0x%x\n",
+                                rtl_dm_dig->min_undec_pwdb_for_dm);
+               }
+       } else {
+               rtl_dm_dig->min_undec_pwdb_for_dm =
+                               rtlpriv->dm.entry_min_undec_sm_pwdb;
+               RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+                        "AP Ext Port or disconnet PWDB = 0x%x\n",
+                        rtl_dm_dig->min_undec_pwdb_for_dm);
+       }
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+                rtl_dm_dig->min_undec_pwdb_for_dm);
+}
+
+static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_sta_info *drv_priv;
+       u8 h2c_parameter[3] = { 0 };
+       long tmp_entry_max_pwdb = 0, tmp_entry_min_pwdb = 0xff;
+
+       /* AP & ADHOC & MESH */
+       spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+       list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+               if (drv_priv->rssi_stat.undec_sm_pwdb <
+                                               tmp_entry_min_pwdb)
+                       tmp_entry_min_pwdb =
+                               drv_priv->rssi_stat.undec_sm_pwdb;
+               if (drv_priv->rssi_stat.undec_sm_pwdb >
+                                               tmp_entry_max_pwdb)
+                       tmp_entry_max_pwdb =
+                               drv_priv->rssi_stat.undec_sm_pwdb;
+       }
+       spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+       /* If associated entry is found */
+       if (tmp_entry_max_pwdb != 0) {
+               rtlpriv->dm.entry_max_undec_sm_pwdb = tmp_entry_max_pwdb;
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "EntryMaxPWDB = 0x%lx(%ld)\n",
+                        tmp_entry_max_pwdb, tmp_entry_max_pwdb);
+       } else {
+               rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
+       }
+       /* If associated entry is found */
+       if (tmp_entry_min_pwdb != 0xff) {
+               rtlpriv->dm.entry_min_undec_sm_pwdb = tmp_entry_min_pwdb;
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "EntryMinPWDB = 0x%lx(%ld)\n",
+                        tmp_entry_min_pwdb, tmp_entry_min_pwdb);
+       } else {
+               rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
+       }
+       /* Indicate Rx signal strength to FW. */
+       if (rtlpriv->dm.useramask) {
+               h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
+               h2c_parameter[1] = 0x20;
+               h2c_parameter[0] = 0;
+               rtl8723be_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
+       } else {
+               rtl_write_byte(rtlpriv, 0x4fe, rtlpriv->dm.undec_sm_pwdb);
+       }
+       rtl8723be_dm_find_minimum_rssi(hw);
+       rtlpriv->dm_digtable.rssi_val_min =
+               rtlpriv->dm_digtable.min_undec_pwdb_for_dm;
+}
+
+void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->dm_digtable.cur_igvalue != current_igi) {
+               rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, current_igi);
+               if (rtlpriv->phy.rf_type != RF_1T1R)
+                       rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, current_igi);
+       }
+       rtlpriv->dm_digtable.pre_igvalue = rtlpriv->dm_digtable.cur_igvalue;
+       rtlpriv->dm_digtable.cur_igvalue = current_igi;
+}
+
+static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct dig_t *dm_digtable = &(rtlpriv->dm_digtable);
+       u8 dig_dynamic_min, dig_maxofmin;
+       bool firstconnect, firstdisconnect;
+       u8 dm_dig_max, dm_dig_min;
+       u8 current_igi = dm_digtable->cur_igvalue;
+       u8 offset;
+
+       /* AP, BT */
+       if (mac->act_scanning)
+               return;
+
+       dig_dynamic_min = dm_digtable->dig_min_0;
+       firstconnect = (mac->link_state >= MAC80211_LINKED) &&
+                       !dm_digtable->media_connect_0;
+       firstdisconnect = (mac->link_state < MAC80211_LINKED) &&
+                          dm_digtable->media_connect_0;
+
+       dm_dig_max = 0x5a;
+       dm_dig_min = DM_DIG_MIN;
+       dig_maxofmin = DM_DIG_MAX_AP;
+
+       if (mac->link_state >= MAC80211_LINKED) {
+               if ((dm_digtable->rssi_val_min + 10) > dm_dig_max)
+                       dm_digtable->rx_gain_max = dm_dig_max;
+               else if ((dm_digtable->rssi_val_min + 10) < dm_dig_min)
+                       dm_digtable->rx_gain_max = dm_dig_min;
+               else
+                       dm_digtable->rx_gain_max =
+                               dm_digtable->rssi_val_min + 10;
+
+               if (rtlpriv->dm.one_entry_only) {
+                       offset = 12;
+                       if (dm_digtable->rssi_val_min - offset < dm_dig_min)
+                               dig_dynamic_min = dm_dig_min;
+                       else if (dm_digtable->rssi_val_min - offset >
+                                                       dig_maxofmin)
+                               dig_dynamic_min = dig_maxofmin;
+                       else
+                               dig_dynamic_min =
+                                       dm_digtable->rssi_val_min - offset;
+               } else {
+                       dig_dynamic_min = dm_dig_min;
+               }
+       } else {
+               dm_digtable->rx_gain_max = dm_dig_max;
+               dig_dynamic_min = dm_dig_min;
+               RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+       }
+
+       if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
+               if (dm_digtable->large_fa_hit != 3)
+                       dm_digtable->large_fa_hit++;
+               if (dm_digtable->forbidden_igi < current_igi) {
+                       dm_digtable->forbidden_igi = current_igi;
+                       dm_digtable->large_fa_hit = 1;
+               }
+
+               if (dm_digtable->large_fa_hit >= 3) {
+                       if ((dm_digtable->forbidden_igi + 1) >
+                            dm_digtable->rx_gain_max)
+                               dm_digtable->rx_gain_min =
+                                               dm_digtable->rx_gain_max;
+                       else
+                               dm_digtable->rx_gain_min =
+                                               dm_digtable->forbidden_igi + 1;
+                       dm_digtable->recover_cnt = 3600;
+               }
+       } else {
+               if (dm_digtable->recover_cnt != 0) {
+                       dm_digtable->recover_cnt--;
+               } else {
+                       if (dm_digtable->large_fa_hit < 3) {
+                               if ((dm_digtable->forbidden_igi - 1) <
+                                    dig_dynamic_min) {
+                                       dm_digtable->forbidden_igi =
+                                                       dig_dynamic_min;
+                                       dm_digtable->rx_gain_min =
+                                                       dig_dynamic_min;
+                               } else {
+                                       dm_digtable->forbidden_igi--;
+                                       dm_digtable->rx_gain_min =
+                                               dm_digtable->forbidden_igi + 1;
+                               }
+                       } else {
+                               dm_digtable->large_fa_hit = 0;
+                       }
+               }
+       }
+       if (dm_digtable->rx_gain_min > dm_digtable->rx_gain_max)
+               dm_digtable->rx_gain_min = dm_digtable->rx_gain_max;
+
+       if (mac->link_state >= MAC80211_LINKED) {
+               if (firstconnect) {
+                       if (dm_digtable->rssi_val_min <= dig_maxofmin)
+                               current_igi = dm_digtable->rssi_val_min;
+                       else
+                               current_igi = dig_maxofmin;
+
+                       dm_digtable->large_fa_hit = 0;
+               } else {
+                       if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
+                               current_igi += 4;
+                       else if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH1)
+                               current_igi += 2;
+                       else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
+                               current_igi -= 2;
+               }
+       } else {
+               if (firstdisconnect) {
+                       current_igi = dm_digtable->rx_gain_min;
+               } else {
+                       if (rtlpriv->falsealm_cnt.cnt_all > 10000)
+                               current_igi += 4;
+                       else if (rtlpriv->falsealm_cnt.cnt_all > 8000)
+                               current_igi += 2;
+                       else if (rtlpriv->falsealm_cnt.cnt_all < 500)
+                               current_igi -= 2;
+               }
+       }
+
+       if (current_igi > dm_digtable->rx_gain_max)
+               current_igi = dm_digtable->rx_gain_max;
+       else if (current_igi < dm_digtable->rx_gain_min)
+               current_igi = dm_digtable->rx_gain_min;
+
+       rtl8723be_dm_write_dig(hw, current_igi);
+       dm_digtable->media_connect_0 =
+               ((mac->link_state >= MAC80211_LINKED) ? true : false);
+       dm_digtable->dig_min_0 = dig_dynamic_min;
+}
+
+static void rtl8723be_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+       u32 ret_value;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
+
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1);
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 1);
+
+       ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE1_11N, MASKDWORD);
+       falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff;
+       falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16;
+
+       ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE2_11N, MASKDWORD);
+       falsealm_cnt->cnt_ofdm_cca = ret_value & 0xffff;
+       falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16;
+
+       ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE3_11N, MASKDWORD);
+       falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff;
+       falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16;
+
+       ret_value = rtl_get_bbreg(hw, DM_REG_OFDM_FA_TYPE4_11N, MASKDWORD);
+       falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff;
+
+       falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+                                     falsealm_cnt->cnt_rate_illegal +
+                                     falsealm_cnt->cnt_crc8_fail +
+                                     falsealm_cnt->cnt_mcs_fail +
+                                     falsealm_cnt->cnt_fast_fsync_fail +
+                                     falsealm_cnt->cnt_sb_search_fail;
+
+       rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(12), 1);
+       rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(14), 1);
+
+       ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_RST_11N, MASKBYTE0);
+       falsealm_cnt->cnt_cck_fail = ret_value;
+
+       ret_value = rtl_get_bbreg(hw, DM_REG_CCK_FA_MSB_11N, MASKBYTE3);
+       falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+
+       ret_value = rtl_get_bbreg(hw, DM_REG_CCK_CCA_CNT_11N, MASKDWORD);
+       falsealm_cnt->cnt_cck_cca = ((ret_value & 0xff) << 8) |
+                                   ((ret_value & 0xff00) >> 8);
+
+       falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
+                               falsealm_cnt->cnt_sb_search_fail +
+                               falsealm_cnt->cnt_parity_fail +
+                               falsealm_cnt->cnt_rate_illegal +
+                               falsealm_cnt->cnt_crc8_fail +
+                               falsealm_cnt->cnt_mcs_fail +
+                               falsealm_cnt->cnt_cck_fail;
+
+       falsealm_cnt->cnt_cca_all = falsealm_cnt->cnt_ofdm_cca +
+                                   falsealm_cnt->cnt_cck_cca;
+
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 1);
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTC_11N, BIT(31), 0);
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 1);
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(27), 0);
+
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_HOLDC_11N, BIT(31), 0);
+       rtl_set_bbreg(hw, DM_REG_OFDM_FA_RSTD_11N, BIT(31), 0);
+
+       rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 0);
+       rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 2);
+
+       rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
+       rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                "cnt_parity_fail = %d, cnt_rate_illegal = %d, "
+                "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+                falsealm_cnt->cnt_parity_fail,
+                falsealm_cnt->cnt_rate_illegal,
+                falsealm_cnt->cnt_crc8_fail,
+                falsealm_cnt->cnt_mcs_fail);
+
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                "cnt_ofdm_fail = %x, cnt_cck_fail = %x,"
+                " cnt_all = %x\n",
+                falsealm_cnt->cnt_ofdm_fail,
+                falsealm_cnt->cnt_cck_fail,
+                falsealm_cnt->cnt_all);
+}
+
+static void rtl8723be_dm_dynamic_txpower(struct ieee80211_hw *hw)
+{
+       /* 8723BE does not support ODM_BB_DYNAMIC_TXPWR*/
+       return;
+}
+
+static void rtl8723be_set_iqk_matrix(struct ieee80211_hw *hw, u8 ofdm_index,
+                                    u8 rfpath, long iqk_result_x,
+                                    long iqk_result_y)
+{
+       long ele_a = 0, ele_d, ele_c = 0, value32;
+
+       if (ofdm_index >= 43)
+               ofdm_index = 43 - 1;
+
+       ele_d = (ofdmswing_table[ofdm_index] & 0xFFC00000) >> 22;
+
+       if (iqk_result_x != 0) {
+               if ((iqk_result_x & 0x00000200) != 0)
+                       iqk_result_x = iqk_result_x | 0xFFFFFC00;
+               ele_a = ((iqk_result_x * ele_d) >> 8) & 0x000003FF;
+
+               if ((iqk_result_y & 0x00000200) != 0)
+                       iqk_result_y = iqk_result_y | 0xFFFFFC00;
+               ele_c = ((iqk_result_y * ele_d) >> 8) & 0x000003FF;
+
+               switch (rfpath) {
+               case RF90_PATH_A:
+                       value32 = (ele_d << 22) |
+                               ((ele_c & 0x3F) << 16) | ele_a;
+                       rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+                                     value32);
+                       value32 = (ele_c & 0x000003C0) >> 6;
+                       rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32);
+                       value32 = ((iqk_result_x * ele_d) >> 7) & 0x01;
+                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+                                     value32);
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               switch (rfpath) {
+               case RF90_PATH_A:
+                       rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+                                     ofdmswing_table[ofdm_index]);
+                       rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00);
+                       rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00);
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
+static void rtl8723be_dm_tx_power_track_set_power(struct ieee80211_hw *hw,
+                                       enum pwr_track_control_method method,
+                                       u8 rfpath, u8 idx)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+       u8 swing_idx_ofdm_limit = 36;
+
+       if (method == TXAGC) {
+               rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
+       } else if (method == BBSWING) {
+               if (rtldm->swing_idx_cck >= CCK_TABLE_SIZE)
+                       rtldm->swing_idx_cck = CCK_TABLE_SIZE - 1;
+
+               if (!rtldm->cck_inch14) {
+                       rtl_write_byte(rtlpriv, 0xa22,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][0]);
+                       rtl_write_byte(rtlpriv, 0xa23,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][1]);
+                       rtl_write_byte(rtlpriv, 0xa24,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][2]);
+                       rtl_write_byte(rtlpriv, 0xa25,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][3]);
+                       rtl_write_byte(rtlpriv, 0xa26,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][4]);
+                       rtl_write_byte(rtlpriv, 0xa27,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][5]);
+                       rtl_write_byte(rtlpriv, 0xa28,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][6]);
+                       rtl_write_byte(rtlpriv, 0xa29,
+                           cckswing_table_ch1ch13[rtldm->swing_idx_cck][7]);
+               } else {
+                       rtl_write_byte(rtlpriv, 0xa22,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][0]);
+                       rtl_write_byte(rtlpriv, 0xa23,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][1]);
+                       rtl_write_byte(rtlpriv, 0xa24,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][2]);
+                       rtl_write_byte(rtlpriv, 0xa25,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][3]);
+                       rtl_write_byte(rtlpriv, 0xa26,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][4]);
+                       rtl_write_byte(rtlpriv, 0xa27,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][5]);
+                       rtl_write_byte(rtlpriv, 0xa28,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][6]);
+                       rtl_write_byte(rtlpriv, 0xa29,
+                           cckswing_table_ch14[rtldm->swing_idx_cck][7]);
+               }
+
+               if (rfpath == RF90_PATH_A) {
+                       if (rtldm->swing_idx_ofdm[RF90_PATH_A] <
+                           swing_idx_ofdm_limit)
+                               swing_idx_ofdm_limit =
+                                       rtldm->swing_idx_ofdm[RF90_PATH_A];
+
+                       rtl8723be_set_iqk_matrix(hw,
+                               rtldm->swing_idx_ofdm[rfpath], rfpath,
+                               rtlphy->iqk_matrix[idx].value[0][0],
+                               rtlphy->iqk_matrix[idx].value[0][1]);
+               } else if (rfpath == RF90_PATH_B) {
+                       if (rtldm->swing_idx_ofdm[RF90_PATH_B] <
+                           swing_idx_ofdm_limit)
+                               swing_idx_ofdm_limit =
+                                       rtldm->swing_idx_ofdm[RF90_PATH_B];
+
+                       rtl8723be_set_iqk_matrix(hw,
+                               rtldm->swing_idx_ofdm[rfpath], rfpath,
+                               rtlphy->iqk_matrix[idx].value[0][4],
+                               rtlphy->iqk_matrix[idx].value[0][5]);
+               }
+       } else {
+               return;
+       }
+}
+
+static void txpwr_track_cb_therm(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_dm   *rtldm = rtl_dm(rtl_priv(hw));
+       u8 thermalvalue = 0, delta, delta_lck, delta_iqk;
+       u8 thermalvalue_avg_count = 0;
+       u32 thermalvalue_avg = 0;
+       int i = 0;
+
+       u8 ofdm_min_index = 6;
+       u8 index = 0;
+
+       char delta_swing_table_idx_tup_a[] = {
+               0, 0, 1, 2, 2, 2, 3, 3, 3, 4,  5,
+               5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10,
+               10, 11, 11, 12, 12, 13, 14, 15};
+       char delta_swing_table_idx_tdown_a[] = {
+               0, 0, 1, 2, 2, 2, 3, 3, 3, 4,  5,
+               5, 6, 6, 6, 6, 7, 7, 7, 8, 8,  9,
+               9, 10, 10, 11, 12, 13, 14, 15};
+
+       /*Initilization ( 7 steps in total)*/
+       rtlpriv->dm.txpower_trackinginit = true;
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                "rtl8723be_dm_txpower_tracking"
+                "_callback_thermalmeter\n");
+
+       thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xfc00);
+       if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 ||
+           rtlefuse->eeprom_thermalmeter == 0xFF)
+               return;
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+                "eeprom_thermalmeter 0x%x\n",
+                thermalvalue, rtldm->thermalvalue,
+                rtlefuse->eeprom_thermalmeter);
+       /*3 Initialize ThermalValues of RFCalibrateInfo*/
+       if (!rtldm->thermalvalue) {
+               rtlpriv->dm.thermalvalue_lck = thermalvalue;
+               rtlpriv->dm.thermalvalue_iqk = thermalvalue;
+       }
+
+       /*4 Calculate average thermal meter*/
+       rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermalvalue;
+       rtldm->thermalvalue_avg_index++;
+       if (rtldm->thermalvalue_avg_index == AVG_THERMAL_NUM_8723BE)
+               rtldm->thermalvalue_avg_index = 0;
+
+       for (i = 0; i < AVG_THERMAL_NUM_8723BE; i++) {
+               if (rtldm->thermalvalue_avg[i]) {
+                       thermalvalue_avg += rtldm->thermalvalue_avg[i];
+                       thermalvalue_avg_count++;
+               }
+       }
+
+       if (thermalvalue_avg_count)
+               thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
+
+       /* 5 Calculate delta, delta_LCK, delta_IQK.*/
+       delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
+               (thermalvalue - rtlpriv->dm.thermalvalue) :
+               (rtlpriv->dm.thermalvalue - thermalvalue);
+       delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
+                   (rtlpriv->dm.thermalvalue_lck - thermalvalue);
+       delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
+                   (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
+                   (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
+
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                "Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
+                "eeprom_thermalmeter 0x%x delta 0x%x "
+                "delta_lck 0x%x delta_iqk 0x%x\n",
+                thermalvalue, rtlpriv->dm.thermalvalue,
+                rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
+       /* 6 If necessary, do LCK.*/
+       if (delta_lck >= IQK_THRESHOLD) {
+               rtlpriv->dm.thermalvalue_lck = thermalvalue;
+               rtl8723be_phy_lc_calibrate(hw);
+       }
+
+       /* 7 If necessary, move the index of
+        * swing table to adjust Tx power.
+        */
+       if (delta > 0 && rtlpriv->dm.txpower_track_control) {
+               delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
+                       (thermalvalue - rtlefuse->eeprom_thermalmeter) :
+                       (rtlefuse->eeprom_thermalmeter - thermalvalue);
+
+               if (delta >= TXSCALE_TABLE_SIZE)
+                       delta = TXSCALE_TABLE_SIZE - 1;
+               /* 7.1 Get the final CCK_index and
+                * OFDM_index for each swing table.
+                */
+               if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+                       rtldm->delta_power_index_last[RF90_PATH_A] =
+                                       rtldm->delta_power_index[RF90_PATH_A];
+                       rtldm->delta_power_index[RF90_PATH_A] =
+                                       delta_swing_table_idx_tup_a[delta];
+               } else {
+                       rtldm->delta_power_index_last[RF90_PATH_A] =
+                                       rtldm->delta_power_index[RF90_PATH_A];
+                       rtldm->delta_power_index[RF90_PATH_A] =
+                               -1 * delta_swing_table_idx_tdown_a[delta];
+               }
+
+               /* 7.2 Handle boundary conditions of index.*/
+               if (rtldm->delta_power_index[RF90_PATH_A] ==
+                   rtldm->delta_power_index_last[RF90_PATH_A])
+                       rtldm->power_index_offset[RF90_PATH_A] = 0;
+               else
+                       rtldm->power_index_offset[RF90_PATH_A] =
+                               rtldm->delta_power_index[RF90_PATH_A] -
+                               rtldm->delta_power_index_last[RF90_PATH_A];
+
+               rtldm->ofdm_index[0] =
+                       rtldm->swing_idx_ofdm_base[RF90_PATH_A] +
+                       rtldm->power_index_offset[RF90_PATH_A];
+               rtldm->cck_index = rtldm->swing_idx_cck_base +
+                                  rtldm->power_index_offset[RF90_PATH_A];
+
+               rtldm->swing_idx_cck = rtldm->cck_index;
+               rtldm->swing_idx_ofdm[0] = rtldm->ofdm_index[0];
+
+               if (rtldm->ofdm_index[0] > OFDM_TABLE_SIZE - 1)
+                       rtldm->ofdm_index[0] = OFDM_TABLE_SIZE - 1;
+               else if (rtldm->ofdm_index[0] < ofdm_min_index)
+                       rtldm->ofdm_index[0] = ofdm_min_index;
+
+               if (rtldm->cck_index > CCK_TABLE_SIZE - 1)
+                       rtldm->cck_index = CCK_TABLE_SIZE - 1;
+               else if (rtldm->cck_index < 0)
+                       rtldm->cck_index = 0;
+       } else {
+               rtldm->power_index_offset[RF90_PATH_A] = 0;
+       }
+
+       if ((rtldm->power_index_offset[RF90_PATH_A] != 0) &&
+           (rtldm->txpower_track_control)) {
+               rtldm->done_txpower = true;
+               if (thermalvalue > rtlefuse->eeprom_thermalmeter)
+                       rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
+                                                             index);
+               else
+                       rtl8723be_dm_tx_power_track_set_power(hw, BBSWING, 0,
+                                                             index);
+
+               rtldm->swing_idx_cck_base = rtldm->swing_idx_cck;
+               rtldm->swing_idx_ofdm_base[RF90_PATH_A] =
+                                               rtldm->swing_idx_ofdm[0];
+               rtldm->thermalvalue = thermalvalue;
+       }
+
+       if (delta_iqk >= IQK_THRESHOLD) {
+               rtldm->thermalvalue_iqk = thermalvalue;
+               rtl8723be_phy_iq_calibrate(hw, false);
+       }
+
+       rtldm->txpowercount = 0;
+       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
+}
+
+void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       static u8 tm_trigger;
+
+       if (!rtlpriv->dm.txpower_tracking)
+               return;
+
+       if (!tm_trigger) {
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16),
+                             0x03);
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "Trigger 8723be Thermal Meter!!\n");
+               tm_trigger = 1;
+               return;
+       } else {
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "Schedule TxPowerTracking !!\n");
+               txpwr_track_cb_therm(hw);
+               tm_trigger = 0;
+       }
+}
+
+static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rate_adaptive *ra = &(rtlpriv->ra);
+       struct ieee80211_sta *sta = NULL;
+       u32 low_rssithresh_for_ra = ra->low2high_rssi_thresh_for_ra40m;
+       u32 high_rssithresh_for_ra = ra->high_rssi_thresh_for_ra;
+       u8 go_up_gap = 5;
+
+       if (is_hal_stop(rtlhal)) {
+               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                        "driver is going to unload\n");
+               return;
+       }
+
+       if (!rtlpriv->dm.useramask) {
+               RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                        "driver does not control rate adaptive mask\n");
+               return;
+       }
+
+       if (mac->link_state == MAC80211_LINKED &&
+           mac->opmode == NL80211_IFTYPE_STATION) {
+               switch (ra->pre_ratr_state) {
+               case DM_RATR_STA_MIDDLE:
+                       high_rssithresh_for_ra += go_up_gap;
+                       break;
+               case DM_RATR_STA_LOW:
+                       high_rssithresh_for_ra += go_up_gap;
+                       low_rssithresh_for_ra += go_up_gap;
+                       break;
+               default:
+                       break;
+               }
+
+               if (rtlpriv->dm.undec_sm_pwdb >
+                   (long)high_rssithresh_for_ra)
+                       ra->ratr_state = DM_RATR_STA_HIGH;
+               else if (rtlpriv->dm.undec_sm_pwdb >
+                        (long)low_rssithresh_for_ra)
+                       ra->ratr_state = DM_RATR_STA_MIDDLE;
+               else
+                       ra->ratr_state = DM_RATR_STA_LOW;
+
+               if (ra->pre_ratr_state != ra->ratr_state) {
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                "RSSI = %ld\n",
+                                rtlpriv->dm.undec_sm_pwdb);
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                "RSSI_LEVEL = %d\n", ra->ratr_state);
+                       RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
+                                "PreState = %d, CurState = %d\n",
+                                ra->pre_ratr_state, ra->ratr_state);
+
+                       rcu_read_lock();
+                       sta = rtl_find_sta(hw, mac->bssid);
+                       if (sta)
+                               rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
+                                                          ra->ratr_state);
+                       rcu_read_unlock();
+
+                       ra->pre_ratr_state = ra->ratr_state;
+               }
+       }
+}
+
+static bool rtl8723be_dm_is_edca_turbo_disable(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->cfg->ops->get_btc_status()) {
+               if (rtlpriv->btcoexist.btc_ops->btc_is_disable_edca_turbo(rtlpriv))
+                       return true;
+       }
+       if (rtlpriv->mac80211.mode == WIRELESS_MODE_B)
+               return true;
+
+       return false;
+}
+
+static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       static u64 last_txok_cnt;
+       static u64 last_rxok_cnt;
+       u64 cur_txok_cnt = 0;
+       u64 cur_rxok_cnt = 0;
+       u32 edca_be_ul = 0x6ea42b;
+       u32 edca_be_dl = 0x6ea42b;/*not sure*/
+       u32 edca_be = 0x5ea42b;
+       u32 iot_peer = 0;
+       bool is_cur_rdlstate;
+       bool last_is_cur_rdlstate = false;
+       bool bias_on_rx = false;
+       bool edca_turbo_on = false;
+
+       last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate;
+
+       cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+       cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+
+       iot_peer = rtlpriv->mac80211.vendor;
+       bias_on_rx = (iot_peer == PEER_RAL || iot_peer == PEER_ATH) ?
+                    true : false;
+       edca_turbo_on = ((!rtlpriv->dm.is_any_nonbepkts) &&
+                        (!rtlpriv->dm.disable_framebursting)) ?
+                        true : false;
+
+       if ((iot_peer == PEER_CISCO) &&
+           (mac->mode == WIRELESS_MODE_N_24G)) {
+               edca_be_dl = edca_setting_dl[iot_peer];
+               edca_be_ul = edca_setting_ul[iot_peer];
+       }
+       if (rtl8723be_dm_is_edca_turbo_disable(hw))
+               goto exit;
+
+       if (edca_turbo_on) {
+               if (bias_on_rx)
+                       is_cur_rdlstate = (cur_txok_cnt > cur_rxok_cnt * 4) ?
+                                         false : true;
+               else
+                       is_cur_rdlstate = (cur_rxok_cnt > cur_txok_cnt * 4) ?
+                                         true : false;
+
+               edca_be = (is_cur_rdlstate) ? edca_be_dl : edca_be_ul;
+               rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, edca_be);
+               rtlpriv->dm.is_cur_rdlstate = is_cur_rdlstate;
+               rtlpriv->dm.current_turbo_edca = true;
+       } else {
+               if (rtlpriv->dm.current_turbo_edca) {
+                       u8 tmp = AC0_BE;
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+                                                     &tmp);
+               }
+               rtlpriv->dm.current_turbo_edca = false;
+       }
+
+exit:
+       rtlpriv->dm.is_any_nonbepkts = false;
+       last_txok_cnt = rtlpriv->stats.txbytesunicast;
+       last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+
+static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 cur_cck_cca_thresh;
+
+       if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+               if (rtlpriv->dm_digtable.rssi_val_min > 25) {
+                       cur_cck_cca_thresh = 0xcd;
+               } else if ((rtlpriv->dm_digtable.rssi_val_min <= 25) &&
+                          (rtlpriv->dm_digtable.rssi_val_min > 10)) {
+                       cur_cck_cca_thresh = 0x83;
+               } else {
+                       if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+                               cur_cck_cca_thresh = 0x83;
+                       else
+                               cur_cck_cca_thresh = 0x40;
+               }
+       } else {
+               if (rtlpriv->falsealm_cnt.cnt_cck_fail > 1000)
+                       cur_cck_cca_thresh = 0x83;
+               else
+                       cur_cck_cca_thresh = 0x40;
+       }
+
+       if (rtlpriv->dm_digtable.cur_cck_cca_thres != cur_cck_cca_thresh)
+               rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, cur_cck_cca_thresh);
+
+       rtlpriv->dm_digtable.pre_cck_cca_thres = rtlpriv->dm_digtable.cur_cck_cca_thres;
+       rtlpriv->dm_digtable.cur_cck_cca_thres = cur_cck_cca_thresh;
+       RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
+                "CCK cca thresh hold =%x\n",
+                rtlpriv->dm_digtable.cur_cck_cca_thres);
+}
+
+static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 reg_c50, reg_c58;
+       bool fw_current_in_ps_mode = false;
+
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                     (u8 *)(&fw_current_in_ps_mode));
+       if (fw_current_in_ps_mode)
+               return;
+
+       reg_c50 = rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+       reg_c58 = rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+
+       if (reg_c50 > 0x28 && reg_c58 > 0x28) {
+               if (!rtlpriv->rtlhal.pre_edcca_enable) {
+                       rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x03);
+                       rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x00);
+               }
+       } else if (reg_c50 < 0x25 && reg_c58 < 0x25) {
+               if (rtlpriv->rtlhal.pre_edcca_enable) {
+                       rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD, 0x7f);
+                       rtl_write_byte(rtlpriv, ROFDM0_ECCATHRESHOLD + 2, 0x7f);
+               }
+       }
+}
+
+static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+       u8 crystal_cap;
+       u32 packet_count;
+       int cfo_khz_a, cfo_khz_b, cfo_ave = 0, adjust_xtal = 0;
+       int cfo_ave_diff;
+
+       if (rtlpriv->mac80211.link_state < MAC80211_LINKED) {
+               if (rtldm->atc_status == ATC_STATUS_OFF) {
+                       rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+                                     ATC_STATUS_ON);
+                       rtldm->atc_status = ATC_STATUS_ON;
+               }
+               if (rtlpriv->cfg->ops->get_btc_status()) {
+                       if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) {
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "odm_DynamicATCSwitch(): Disable"
+                                        " CFO tracking for BT!!\n");
+                               return;
+                       }
+               }
+
+               if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) {
+                       rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
+                       crystal_cap = rtldm->crystal_cap & 0x3f;
+                       rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+                                     (crystal_cap | (crystal_cap << 6)));
+               }
+       } else {
+               cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
+               cfo_khz_b = (int)(rtldm->cfo_tail[1] * 3125) / 1280;
+               packet_count = rtldm->packet_count;
+
+               if (packet_count == rtldm->packet_count_pre)
+                       return;
+
+               rtldm->packet_count_pre = packet_count;
+
+               if (rtlpriv->phy.rf_type == RF_1T1R)
+                       cfo_ave = cfo_khz_a;
+               else
+                       cfo_ave = (int)(cfo_khz_a + cfo_khz_b) >> 1;
+
+               cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ?
+                              (rtldm->cfo_ave_pre - cfo_ave) :
+                              (cfo_ave - rtldm->cfo_ave_pre);
+
+               if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
+                       rtldm->large_cfo_hit = 1;
+                       return;
+               } else {
+                       rtldm->large_cfo_hit = 0;
+               }
+
+               rtldm->cfo_ave_pre = cfo_ave;
+
+               if (cfo_ave >= -rtldm->cfo_threshold &&
+                   cfo_ave <= rtldm->cfo_threshold && rtldm->is_freeze == 0) {
+                       if (rtldm->cfo_threshold == CFO_THRESHOLD_XTAL) {
+                               rtldm->cfo_threshold = CFO_THRESHOLD_XTAL + 10;
+                               rtldm->is_freeze = 1;
+                       } else {
+                               rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
+                       }
+               }
+
+               if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
+                       adjust_xtal = ((cfo_ave - CFO_THRESHOLD_XTAL) >> 1) + 1;
+               else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) &&
+                                       rtlpriv->dm.crystal_cap > 0)
+                       adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 1) - 1;
+
+               if (adjust_xtal != 0) {
+                       rtldm->is_freeze = 0;
+                       rtldm->crystal_cap += adjust_xtal;
+
+                       if (rtldm->crystal_cap > 0x3f)
+                               rtldm->crystal_cap = 0x3f;
+                       else if (rtldm->crystal_cap < 0)
+                               rtldm->crystal_cap = 0;
+
+                       crystal_cap = rtldm->crystal_cap & 0x3f;
+                       rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+                                     (crystal_cap | (crystal_cap << 6)));
+               }
+
+               if (cfo_ave < CFO_THRESHOLD_ATC &&
+                   cfo_ave > -CFO_THRESHOLD_ATC) {
+                       if (rtldm->atc_status == ATC_STATUS_ON) {
+                               rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+                                             ATC_STATUS_OFF);
+                               rtldm->atc_status = ATC_STATUS_OFF;
+                       }
+               } else {
+                       if (rtldm->atc_status == ATC_STATUS_OFF) {
+                               rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(11),
+                                             ATC_STATUS_ON);
+                               rtldm->atc_status = ATC_STATUS_ON;
+                       }
+               }
+       }
+}
+
+static void rtl8723be_dm_common_info_self_update(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_sta_info *drv_priv;
+       u8 cnt = 0;
+
+       rtlpriv->dm.one_entry_only = false;
+
+       if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_STATION &&
+           rtlpriv->mac80211.link_state >= MAC80211_LINKED) {
+               rtlpriv->dm.one_entry_only = true;
+               return;
+       }
+
+       if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP ||
+           rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC ||
+           rtlpriv->mac80211.opmode == NL80211_IFTYPE_MESH_POINT) {
+               spin_lock_bh(&rtlpriv->locks.entry_list_lock);
+               list_for_each_entry(drv_priv, &rtlpriv->entry_list, list) {
+                       cnt++;
+               }
+               spin_unlock_bh(&rtlpriv->locks.entry_list_lock);
+
+               if (cnt == 1)
+                       rtlpriv->dm.one_entry_only = true;
+       }
+}
+
+void rtl8723be_dm_watchdog(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool fw_current_inpsmode = false;
+       bool fw_ps_awake = true;
+
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                     (u8 *)(&fw_current_inpsmode));
+
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
+                                     (u8 *)(&fw_ps_awake));
+
+       if (ppsc->p2p_ps_info.p2p_ps_mode)
+               fw_ps_awake = false;
+
+       if ((ppsc->rfpwr_state == ERFON) &&
+           ((!fw_current_inpsmode) && fw_ps_awake) &&
+           (!ppsc->rfchange_inprogress)) {
+               rtl8723be_dm_common_info_self_update(hw);
+               rtl8723be_dm_false_alarm_counter_statistics(hw);
+               rtl8723be_dm_check_rssi_monitor(hw);
+               rtl8723be_dm_dig(hw);
+               rtl8723be_dm_dynamic_edcca(hw);
+               rtl8723be_dm_cck_packet_detection_thresh(hw);
+               rtl8723be_dm_refresh_rate_adaptive_mask(hw);
+               rtl8723be_dm_check_edca_turbo(hw);
+               rtl8723be_dm_dynamic_atc_switch(hw);
+               rtl8723be_dm_check_txpower_tracking(hw);
+               rtl8723be_dm_dynamic_txpower(hw);
+               if (rtlpriv->cfg->ops->get_btc_status())
+                       rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv);
+       }
+       rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/rtlwifi/rtl8723be/dm.h
new file mode 100644 (file)
index 0000000..c6c2f2a
--- /dev/null
@@ -0,0 +1,310 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef        __RTL8723BE_DM_H__
+#define __RTL8723BE_DM_H__
+
+#define        MAIN_ANT                0
+#define        AUX_ANT                 1
+#define        MAIN_ANT_CG_TRX         1
+#define        AUX_ANT_CG_TRX          0
+#define        MAIN_ANT_CGCS_RX        0
+#define        AUX_ANT_CGCS_RX         1
+
+#define        TXSCALE_TABLE_SIZE      30
+
+/*RF REG LIST*/
+#define        DM_REG_RF_MODE_11N                      0x00
+#define        DM_REG_RF_0B_11N                        0x0B
+#define        DM_REG_CHNBW_11N                        0x18
+#define        DM_REG_T_METER_11N                      0x24
+#define        DM_REG_RF_25_11N                        0x25
+#define        DM_REG_RF_26_11N                        0x26
+#define        DM_REG_RF_27_11N                        0x27
+#define        DM_REG_RF_2B_11N                        0x2B
+#define        DM_REG_RF_2C_11N                        0x2C
+#define        DM_REG_RXRF_A3_11N                      0x3C
+#define        DM_REG_T_METER_92D_11N                  0x42
+#define        DM_REG_T_METER_88E_11N                  0x42
+
+/*BB REG LIST*/
+/*PAGE 8 */
+#define        DM_REG_BB_CTRL_11N                      0x800
+#define        DM_REG_RF_PIN_11N                       0x804
+#define        DM_REG_PSD_CTRL_11N                     0x808
+#define        DM_REG_TX_ANT_CTRL_11N                  0x80C
+#define        DM_REG_BB_PWR_SAV5_11N                  0x818
+#define        DM_REG_CCK_RPT_FORMAT_11N               0x824
+#define        DM_REG_RX_DEFUALT_A_11N                 0x858
+#define        DM_REG_RX_DEFUALT_B_11N                 0x85A
+#define        DM_REG_BB_PWR_SAV3_11N                  0x85C
+#define        DM_REG_ANTSEL_CTRL_11N                  0x860
+#define        DM_REG_RX_ANT_CTRL_11N                  0x864
+#define        DM_REG_PIN_CTRL_11N                     0x870
+#define        DM_REG_BB_PWR_SAV1_11N                  0x874
+#define        DM_REG_ANTSEL_PATH_11N                  0x878
+#define        DM_REG_BB_3WIRE_11N                     0x88C
+#define        DM_REG_SC_CNT_11N                       0x8C4
+#define        DM_REG_PSD_DATA_11N                     0x8B4
+/*PAGE 9*/
+#define        DM_REG_ANT_MAPPING1_11N                 0x914
+#define        DM_REG_ANT_MAPPING2_11N                 0x918
+/*PAGE A*/
+#define        DM_REG_CCK_ANTDIV_PARA1_11N             0xA00
+#define        DM_REG_CCK_CCA_11N                      0xA0A
+#define        DM_REG_CCK_ANTDIV_PARA2_11N             0xA0C
+#define        DM_REG_CCK_ANTDIV_PARA3_11N             0xA10
+#define        DM_REG_CCK_ANTDIV_PARA4_11N             0xA14
+#define        DM_REG_CCK_FILTER_PARA1_11N             0xA22
+#define        DM_REG_CCK_FILTER_PARA2_11N             0xA23
+#define        DM_REG_CCK_FILTER_PARA3_11N             0xA24
+#define        DM_REG_CCK_FILTER_PARA4_11N             0xA25
+#define        DM_REG_CCK_FILTER_PARA5_11N             0xA26
+#define        DM_REG_CCK_FILTER_PARA6_11N             0xA27
+#define        DM_REG_CCK_FILTER_PARA7_11N             0xA28
+#define        DM_REG_CCK_FILTER_PARA8_11N             0xA29
+#define        DM_REG_CCK_FA_RST_11N                   0xA2C
+#define        DM_REG_CCK_FA_MSB_11N                   0xA58
+#define        DM_REG_CCK_FA_LSB_11N                   0xA5C
+#define        DM_REG_CCK_CCA_CNT_11N                  0xA60
+#define        DM_REG_BB_PWR_SAV4_11N                  0xA74
+/*PAGE B */
+#define        DM_REG_LNA_SWITCH_11N                   0xB2C
+#define        DM_REG_PATH_SWITCH_11N                  0xB30
+#define        DM_REG_RSSI_CTRL_11N                    0xB38
+#define        DM_REG_CONFIG_ANTA_11N                  0xB68
+#define        DM_REG_RSSI_BT_11N                      0xB9C
+/*PAGE C */
+#define        DM_REG_OFDM_FA_HOLDC_11N                0xC00
+#define        DM_REG_RX_PATH_11N                      0xC04
+#define        DM_REG_TRMUX_11N                        0xC08
+#define        DM_REG_OFDM_FA_RSTC_11N                 0xC0C
+#define        DM_REG_RXIQI_MATRIX_11N                 0xC14
+#define        DM_REG_TXIQK_MATRIX_LSB1_11N            0xC4C
+#define        DM_REG_IGI_A_11N                        0xC50
+#define        DM_REG_ANTDIV_PARA2_11N                 0xC54
+#define        DM_REG_IGI_B_11N                        0xC58
+#define        DM_REG_ANTDIV_PARA3_11N                 0xC5C
+#define        DM_REG_BB_PWR_SAV2_11N                  0xC70
+#define        DM_REG_RX_OFF_11N                       0xC7C
+#define        DM_REG_TXIQK_MATRIXA_11N                0xC80
+#define        DM_REG_TXIQK_MATRIXB_11N                0xC88
+#define        DM_REG_TXIQK_MATRIXA_LSB2_11N           0xC94
+#define        DM_REG_TXIQK_MATRIXB_LSB2_11N           0xC9C
+#define        DM_REG_RXIQK_MATRIX_LSB_11N             0xCA0
+#define        DM_REG_ANTDIV_PARA1_11N                 0xCA4
+#define        DM_REG_OFDM_FA_TYPE1_11N                0xCF0
+/*PAGE D */
+#define        DM_REG_OFDM_FA_RSTD_11N                 0xD00
+#define        DM_REG_OFDM_FA_TYPE2_11N                0xDA0
+#define        DM_REG_OFDM_FA_TYPE3_11N                0xDA4
+#define        DM_REG_OFDM_FA_TYPE4_11N                0xDA8
+/*PAGE E */
+#define        DM_REG_TXAGC_A_6_18_11N                 0xE00
+#define        DM_REG_TXAGC_A_24_54_11N                0xE04
+#define        DM_REG_TXAGC_A_1_MCS32_11N              0xE08
+#define        DM_REG_TXAGC_A_MCS0_3_11N               0xE10
+#define        DM_REG_TXAGC_A_MCS4_7_11N               0xE14
+#define        DM_REG_TXAGC_A_MCS8_11_11N              0xE18
+#define        DM_REG_TXAGC_A_MCS12_15_11N             0xE1C
+#define        DM_REG_FPGA0_IQK_11N                    0xE28
+#define        DM_REG_TXIQK_TONE_A_11N                 0xE30
+#define        DM_REG_RXIQK_TONE_A_11N                 0xE34
+#define        DM_REG_TXIQK_PI_A_11N                   0xE38
+#define        DM_REG_RXIQK_PI_A_11N                   0xE3C
+#define        DM_REG_TXIQK_11N                        0xE40
+#define        DM_REG_RXIQK_11N                        0xE44
+#define        DM_REG_IQK_AGC_PTS_11N                  0xE48
+#define        DM_REG_IQK_AGC_RSP_11N                  0xE4C
+#define        DM_REG_BLUETOOTH_11N                    0xE6C
+#define        DM_REG_RX_WAIT_CCA_11N                  0xE70
+#define        DM_REG_TX_CCK_RFON_11N                  0xE74
+#define        DM_REG_TX_CCK_BBON_11N                  0xE78
+#define        DM_REG_OFDM_RFON_11N                    0xE7C
+#define        DM_REG_OFDM_BBON_11N                    0xE80
+#define DM_REG_TX2RX_11N                       0xE84
+#define        DM_REG_TX2TX_11N                        0xE88
+#define        DM_REG_RX_CCK_11N                       0xE8C
+#define        DM_REG_RX_OFDM_11N                      0xED0
+#define        DM_REG_RX_WAIT_RIFS_11N                 0xED4
+#define        DM_REG_RX2RX_11N                        0xED8
+#define        DM_REG_STANDBY_11N                      0xEDC
+#define        DM_REG_SLEEP_11N                        0xEE0
+#define        DM_REG_PMPD_ANAEN_11N                   0xEEC
+
+/*MAC REG LIST*/
+#define        DM_REG_BB_RST_11N                       0x02
+#define        DM_REG_ANTSEL_PIN_11N                   0x4C
+#define        DM_REG_EARLY_MODE_11N                   0x4D0
+#define        DM_REG_RSSI_MONITOR_11N                 0x4FE
+#define        DM_REG_EDCA_VO_11N                      0x500
+#define        DM_REG_EDCA_VI_11N                      0x504
+#define        DM_REG_EDCA_BE_11N                      0x508
+#define        DM_REG_EDCA_BK_11N                      0x50C
+#define        DM_REG_TXPAUSE_11N                      0x522
+#define        DM_REG_RESP_TX_11N                      0x6D8
+#define        DM_REG_ANT_TRAIN_PARA1_11N              0x7b0
+#define        DM_REG_ANT_TRAIN_PARA2_11N              0x7b4
+
+/*DIG Related*/
+#define        DM_BIT_IGI_11N                          0x0000007F
+
+#define HAL_DM_DIG_DISABLE                     BIT(0)
+#define HAL_DM_HIPWR_DISABLE                   BIT(1)
+
+#define OFDM_TABLE_LENGTH                      43
+#define CCK_TABLE_LENGTH                       33
+
+#define OFDM_TABLE_SIZE                                37
+#define CCK_TABLE_SIZE                         33
+
+#define BW_AUTO_SWITCH_HIGH_LOW                        25
+#define BW_AUTO_SWITCH_LOW_HIGH                        30
+
+#define DM_DIG_THRESH_HIGH                     40
+#define DM_DIG_THRESH_LOW                      35
+
+#define DM_FALSEALARM_THRESH_LOW               400
+#define DM_FALSEALARM_THRESH_HIGH              1000
+
+#define DM_DIG_MAX                             0x3e
+#define DM_DIG_MIN                             0x1e
+
+#define DM_DIG_MAX_AP                          0x32
+#define DM_DIG_MIN_AP                          0x20
+
+#define DM_DIG_FA_UPPER                                0x3e
+#define DM_DIG_FA_LOWER                                0x1e
+#define DM_DIG_FA_TH0                          0x200
+#define DM_DIG_FA_TH1                          0x300
+#define DM_DIG_FA_TH2                          0x400
+
+#define DM_DIG_BACKOFF_MAX                     12
+#define DM_DIG_BACKOFF_MIN                     -4
+#define DM_DIG_BACKOFF_DEFAULT                 10
+
+#define RXPATHSELECTION_DIFF_TH                        18
+
+#define DM_RATR_STA_INIT                       0
+#define DM_RATR_STA_HIGH                       1
+#define DM_RATR_STA_MIDDLE                     2
+#define DM_RATR_STA_LOW                                3
+
+#define CTS2SELF_THVAL                         30
+#define REGC38_TH                              20
+
+#define TXHIGHPWRLEVEL_NORMAL                  0
+#define TXHIGHPWRLEVEL_LEVEL1                  1
+#define TXHIGHPWRLEVEL_LEVEL2                  2
+#define TXHIGHPWRLEVEL_BT1                     3
+#define TXHIGHPWRLEVEL_BT2                     4
+
+#define DM_TYPE_BYFW                           0
+#define DM_TYPE_BYDRIVER                       1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2                74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1                67
+#define TXPWRTRACK_MAX_IDX                     6
+
+/* Dynamic ATC switch */
+#define ATC_STATUS_OFF                         0x0 /* enable */
+#define        ATC_STATUS_ON                           0x1 /* disable */
+#define        CFO_THRESHOLD_XTAL                      10 /* kHz */
+#define        CFO_THRESHOLD_ATC                       80 /* kHz */
+
+enum FAT_STATE {
+       FAT_NORMAL_STATE        = 0,
+       FAT_TRAINING_STATE      = 1,
+};
+
+enum tag_dynamic_init_gain_operation_type_definition {
+       DIG_TYPE_THRESH_HIGH    = 0,
+       DIG_TYPE_THRESH_LOW     = 1,
+       DIG_TYPE_BACKOFF        = 2,
+       DIG_TYPE_RX_GAIN_MIN    = 3,
+       DIG_TYPE_RX_GAIN_MAX    = 4,
+       DIG_TYPE_ENABLE         = 5,
+       DIG_TYPE_DISABLE        = 6,
+       DIG_OP_TYPE_MAX
+};
+
+enum dm_1r_cca_e {
+       CCA_1R          = 0,
+       CCA_2R          = 1,
+       CCA_MAX         = 2,
+};
+
+enum dm_rf_e {
+       RF_SAVE         = 0,
+       RF_NORMAL       = 1,
+       RF_MAX          = 2,
+};
+
+enum dm_sw_ant_switch_e {
+       ANS_ANTENNA_B   = 1,
+       ANS_ANTENNA_A   = 2,
+       ANS_ANTENNA_MAX = 3,
+};
+
+enum dm_dig_ext_port_alg_e {
+       DIG_EXT_PORT_STAGE_0    = 0,
+       DIG_EXT_PORT_STAGE_1    = 1,
+       DIG_EXT_PORT_STAGE_2    = 2,
+       DIG_EXT_PORT_STAGE_3    = 3,
+       DIG_EXT_PORT_STAGE_MAX  = 4,
+};
+
+enum dm_dig_connect_e {
+       DIG_STA_DISCONNECT      = 0,
+       DIG_STA_CONNECT         = 1,
+       DIG_STA_BEFORE_CONNECT  = 2,
+       DIG_MULTISTA_DISCONNECT = 3,
+       DIG_MULTISTA_CONNECT    = 4,
+       DIG_CONNECT_MAX
+};
+
+enum pwr_track_control_method {
+       BBSWING,
+       TXAGC
+};
+
+#define BT_RSSI_STATE_NORMAL_POWER      BIT_OFFSET_LEN_MASK_32(0, 1)
+#define BT_RSSI_STATE_AMDPU_OFF         BIT_OFFSET_LEN_MASK_32(1, 1)
+#define BT_RSSI_STATE_SPECIAL_LOW       BIT_OFFSET_LEN_MASK_32(2, 1)
+#define BT_RSSI_STATE_BG_EDCA_LOW       BIT_OFFSET_LEN_MASK_32(3, 1)
+#define BT_RSSI_STATE_TXPOWER_LOW       BIT_OFFSET_LEN_MASK_32(4, 1)
+
+void rtl8723be_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw, u8 *pdesc,
+                                       u32 mac_id);
+void rtl8723be_dm_ant_sel_statistics(struct ieee80211_hw *hw, u8 antsel_tr_mux,
+                                    u32 mac_id, u32 rx_pwdb_all);
+void rtl8723be_dm_fast_antenna_trainning_callback(unsigned long data);
+void rtl8723be_dm_init(struct ieee80211_hw *hw);
+void rtl8723be_dm_watchdog(struct ieee80211_hw *hw);
+void rtl8723be_dm_write_dig(struct ieee80211_hw *hw, u8 current_igi);
+void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw);
+void rtl8723be_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl8723be_dm_txpower_track_adjust(struct ieee80211_hw *hw, u8 type,
+                                      u8 *pdirection, u32 *poutwrite_val);
+void rtl8723be_dm_init_edca_turbo(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/rtlwifi/rtl8723be/fw.c
new file mode 100644 (file)
index 0000000..f856be6
--- /dev/null
@@ -0,0 +1,620 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "reg.h"
+#include "def.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+
+static bool _rtl8723be_check_fw_read_last_h2c(struct ieee80211_hw *hw,
+                                             u8 boxnum)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 val_hmetfr;
+       bool result = false;
+
+       val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+       if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+               result = true;
+       return result;
+}
+
+static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
+                                       u32 cmd_len, u8 *p_cmdbuffer)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 boxnum;
+       u16 box_reg = 0, box_extreg = 0;
+       u8 u1b_tmp;
+       bool isfw_read = false;
+       u8 buf_index = 0;
+       bool bwrite_sucess = false;
+       u8 wait_h2c_limit = 100;
+       u8 wait_writeh2c_limit = 100;
+       u8 boxcontent[4], boxextcontent[4];
+       u32 h2c_waitcounter = 0;
+       unsigned long flag;
+       u8 idx;
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+
+       while (true) {
+               spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+               if (rtlhal->h2c_setinprogress) {
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                "H2C set in progress! Wait to set.."
+                                "element_id(%d).\n", element_id);
+
+                       while (rtlhal->h2c_setinprogress) {
+                               spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+                                                      flag);
+                               h2c_waitcounter++;
+                               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                        "Wait 100 us (%d times)...\n",
+                                        h2c_waitcounter);
+                               udelay(100);
+
+                               if (h2c_waitcounter > 1000)
+                                       return;
+                               spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+                                                 flag);
+                       }
+                       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+               } else {
+                       rtlhal->h2c_setinprogress = true;
+                       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+                       break;
+               }
+       }
+       while (!bwrite_sucess) {
+               wait_writeh2c_limit--;
+               if (wait_writeh2c_limit == 0) {
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "Write H2C fail because no trigger "
+                                "for FW INT!\n");
+                       break;
+               }
+               boxnum = rtlhal->last_hmeboxnum;
+               switch (boxnum) {
+               case 0:
+                       box_reg = REG_HMEBOX_0;
+                       box_extreg = REG_HMEBOX_EXT_0;
+                       break;
+               case 1:
+                       box_reg = REG_HMEBOX_1;
+                       box_extreg = REG_HMEBOX_EXT_1;
+                       break;
+               case 2:
+                       box_reg = REG_HMEBOX_2;
+                       box_extreg = REG_HMEBOX_EXT_2;
+                       break;
+               case 3:
+                       box_reg = REG_HMEBOX_3;
+                       box_extreg = REG_HMEBOX_EXT_3;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "switch case not processed\n");
+                       break;
+               }
+               isfw_read = _rtl8723be_check_fw_read_last_h2c(hw, boxnum);
+               while (!isfw_read) {
+                       wait_h2c_limit--;
+                       if (wait_h2c_limit == 0) {
+                               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                        "Wating too long for FW read "
+                                        "clear HMEBox(%d)!\n", boxnum);
+                               break;
+                       }
+                       udelay(10);
+
+                       isfw_read = _rtl8723be_check_fw_read_last_h2c(hw,
+                                                               boxnum);
+                       u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                "Wating for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+                                boxnum, u1b_tmp);
+               }
+               if (!isfw_read) {
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                                "Write H2C register BOX[%d] fail!!!!! "
+                                "Fw do not read.\n", boxnum);
+                       break;
+               }
+               memset(boxcontent, 0, sizeof(boxcontent));
+               memset(boxextcontent, 0, sizeof(boxextcontent));
+               boxcontent[0] = element_id;
+               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                        "Write element_id box_reg(%4x) = %2x\n",
+                        box_reg, element_id);
+
+               switch (cmd_len) {
+               case 1:
+               case 2:
+               case 3:
+                       /*boxcontent[0] &= ~(BIT(7));*/
+                       memcpy((u8 *)(boxcontent) + 1,
+                              p_cmdbuffer + buf_index, cmd_len);
+
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_reg + idx,
+                                              boxcontent[idx]);
+                       }
+                       break;
+               case 4:
+               case 5:
+               case 6:
+               case 7:
+                       /*boxcontent[0] |= (BIT(7));*/
+                       memcpy((u8 *)(boxextcontent),
+                              p_cmdbuffer + buf_index+3, cmd_len-3);
+                       memcpy((u8 *)(boxcontent) + 1,
+                              p_cmdbuffer + buf_index, 3);
+
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_extreg + idx,
+                                              boxextcontent[idx]);
+                       }
+                       for (idx = 0; idx < 4; idx++) {
+                               rtl_write_byte(rtlpriv, box_reg + idx,
+                                              boxcontent[idx]);
+                       }
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "switch case not process\n");
+                       break;
+               }
+               bwrite_sucess = true;
+
+               rtlhal->last_hmeboxnum = boxnum + 1;
+               if (rtlhal->last_hmeboxnum == 4)
+                       rtlhal->last_hmeboxnum = 0;
+
+               RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
+                        "pHalData->last_hmeboxnum  = %d\n",
+                        rtlhal->last_hmeboxnum);
+       }
+       spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+       rtlhal->h2c_setinprogress = false;
+       spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+}
+
+void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+                           u32 cmd_len, u8 *p_cmdbuffer)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u32 tmp_cmdbuf[2];
+
+       if (!rtlhal->fw_ready) {
+               RT_ASSERT(false,
+                         "return H2C cmd because of Fw download fail!!!\n");
+               return;
+       }
+       memset(tmp_cmdbuf, 0, 8);
+       memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
+       _rtl8723be_fill_h2c_command(hw, element_id, cmd_len,
+                                   (u8 *)&tmp_cmdbuf);
+       return;
+}
+
+void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 u1_h2c_set_pwrmode[H2C_8723BE_PWEMODE_LENGTH] = { 0 };
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       u8 rlbm, power_state = 0;
+       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+
+       SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
+       rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM = 2.*/
+       SET_H2CCMD_PWRMODE_PARM_RLBM(u1_h2c_set_pwrmode, rlbm);
+       SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
+                                        (rtlpriv->mac80211.p2p) ?
+                                        ppsc->smart_ps : 1);
+       SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(u1_h2c_set_pwrmode,
+                                              ppsc->reg_max_lps_awakeintvl);
+       SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(u1_h2c_set_pwrmode, 0);
+       if (mode == FW_PS_ACTIVE_MODE)
+               power_state |= FW_PWR_STATE_ACTIVE;
+       else
+               power_state |= FW_PWR_STATE_RF_OFF;
+       SET_H2CCMD_PWRMODE_PARM_PWR_STATE(u1_h2c_set_pwrmode, power_state);
+
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+                     "rtl92c_set_fw_pwrmode(): u1_h2c_set_pwrmode\n",
+                     u1_h2c_set_pwrmode, H2C_8723BE_PWEMODE_LENGTH);
+       rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_SETPWRMODE,
+                              H2C_8723BE_PWEMODE_LENGTH,
+                              u1_h2c_set_pwrmode);
+}
+
+static bool _rtl8723be_cmd_send_packet(struct ieee80211_hw *hw,
+                                      struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring;
+       struct rtl_tx_desc *pdesc;
+       struct sk_buff *pskb = NULL;
+       u8 own;
+       unsigned long flags;
+
+       ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+       pskb = __skb_dequeue(&ring->queue);
+       if (pskb)
+               kfree_skb(pskb);
+
+       spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+       pdesc = &ring->desc[0];
+       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
+
+       rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
+
+       __skb_queue_tail(&ring->queue, skb);
+
+       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+       rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+       return true;
+}
+#define BEACON_PG              0 /* ->1 */
+#define PSPOLL_PG              2
+#define NULL_PG                        3
+#define PROBERSP_PG            4 /* ->5 */
+
+#define TOTAL_RESERVED_PKT_LEN 768
+
+static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
+       /* page 0 beacon */
+       0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
+       0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+       0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x20, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x64, 0x00, 0x10, 0x04, 0x00, 0x05, 0x54, 0x65,
+       0x73, 0x74, 0x32, 0x01, 0x08, 0x82, 0x84, 0x0B,
+       0x16, 0x24, 0x30, 0x48, 0x6C, 0x03, 0x01, 0x06,
+       0x06, 0x02, 0x00, 0x00, 0x2A, 0x01, 0x02, 0x32,
+       0x04, 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C,
+       0x09, 0x03, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x3D, 0x00, 0xDD, 0x07, 0x00, 0xE0, 0x4C,
+       0x02, 0x02, 0x00, 0x00, 0xDD, 0x18, 0x00, 0x50,
+       0xF2, 0x01, 0x01, 0x00, 0x00, 0x50, 0xF2, 0x04,
+       0x01, 0x00, 0x00, 0x50, 0xF2, 0x04, 0x01, 0x00,
+
+       /* page 1 beacon */
+       0x00, 0x50, 0xF2, 0x02, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x10, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 2  ps-poll */
+       0xA4, 0x10, 0x01, 0xC0, 0xEC, 0x1A, 0x59, 0x0B,
+       0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x18, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 3  null */
+       0x48, 0x01, 0x00, 0x00, 0xEC, 0x1A, 0x59, 0x0B,
+       0xAD, 0xD4, 0x00, 0xE0, 0x4C, 0x02, 0xB1, 0x78,
+       0xEC, 0x1A, 0x59, 0x0B, 0xAD, 0xD4, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x72, 0x00, 0x28, 0x8C, 0x00, 0x12, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 4  probe_resp */
+       0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
+       0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
+       0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
+       0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
+       0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
+       0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
+       0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
+       0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
+       0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
+       0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
+       0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
+       0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+       /* page 5  probe_resp */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+                                 bool dl_finished)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct sk_buff *skb = NULL;
+
+       u32 totalpacketlen;
+       bool rtstatus;
+       u8 u1rsvdpageloc[5] = { 0 };
+       bool dlok = false;
+
+       u8 *beacon;
+       u8 *p_pspoll;
+       u8 *nullfunc;
+       u8 *p_probersp;
+       /*---------------------------------------------------------
+        *                      (1) beacon
+        *---------------------------------------------------------
+        */
+       beacon = &reserved_page_packet[BEACON_PG * 128];
+       SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
+
+       /*-------------------------------------------------------
+        *                      (2) ps-poll
+        *-------------------------------------------------------
+        */
+       p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
+       SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
+       SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
+       SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
+
+       SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
+
+       /*--------------------------------------------------------
+        *                      (3) null data
+        *--------------------------------------------------------
+        */
+       nullfunc = &reserved_page_packet[NULL_PG * 128];
+       SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
+       SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
+
+       /*---------------------------------------------------------
+        *                      (4) probe response
+        *---------------------------------------------------------
+        */
+       p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
+       SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
+       SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
+       SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
+
+       SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
+
+       totalpacketlen = TOTAL_RESERVED_PKT_LEN;
+
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
+                     "rtl8723be_set_fw_rsvdpagepkt(): "
+                     "HW_VAR_SET_TX_CMD: ALL\n",
+                     &reserved_page_packet[0], totalpacketlen);
+       RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
+                     "rtl8723be_set_fw_rsvdpagepkt(): "
+                     "HW_VAR_SET_TX_CMD: ALL\n", u1rsvdpageloc, 3);
+
+
+       skb = dev_alloc_skb(totalpacketlen);
+       memcpy((u8 *)skb_put(skb, totalpacketlen),
+              &reserved_page_packet, totalpacketlen);
+
+       rtstatus = _rtl8723be_cmd_send_packet(hw, skb);
+
+       if (rtstatus)
+               dlok = true;
+
+       if (dlok) {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                        "Set RSVD page location to Fw.\n");
+               RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
+                             u1rsvdpageloc, 3);
+               rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RSVDPAGE,
+                                      sizeof(u1rsvdpageloc), u1rsvdpageloc);
+       } else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        "Set RSVD page location to Fw FAIL!!!!!!.\n");
+       }
+}
+
+/*Should check FW support p2p or not.*/
+static void rtl8723be_set_p2p_ctw_period_cmd(struct ieee80211_hw *hw,
+                                            u8 ctwindow)
+{
+       u8 u1_ctwindow_period[1] = {ctwindow};
+
+       rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_CTW_CMD, 1,
+                              u1_ctwindow_period);
+}
+
+void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
+                                     u8 p2p_ps_state)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_p2p_ps_info *p2pinfo = &(rtlps->p2p_ps_info);
+       struct p2p_ps_offload_t *p2p_ps_offload = &rtlhal->p2p_ps_offload;
+       u8 i;
+       u16 ctwindow;
+       u32 start_time, tsf_low;
+
+       switch (p2p_ps_state) {
+       case P2P_PS_DISABLE:
+               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+               memset(p2p_ps_offload, 0, sizeof(struct p2p_ps_offload_t));
+               break;
+       case P2P_PS_ENABLE:
+               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+               /* update CTWindow value. */
+               if (p2pinfo->ctwindow > 0) {
+                       p2p_ps_offload->ctwindow_en = 1;
+                       ctwindow = p2pinfo->ctwindow;
+                       rtl8723be_set_p2p_ctw_period_cmd(hw, ctwindow);
+               }
+               /* hw only support 2 set of NoA */
+               for (i = 0; i < p2pinfo->noa_num; i++) {
+                       /* To control the register setting
+                        * for which NOA
+                        */
+                       rtl_write_byte(rtlpriv, 0x5cf, (i << 4));
+                       if (i == 0)
+                               p2p_ps_offload->noa0_en = 1;
+                       else
+                               p2p_ps_offload->noa1_en = 1;
+
+                       /* config P2P NoA Descriptor Register */
+                       rtl_write_dword(rtlpriv, 0x5E0,
+                                       p2pinfo->noa_duration[i]);
+                       rtl_write_dword(rtlpriv, 0x5E4,
+                                       p2pinfo->noa_interval[i]);
+
+                       /*Get Current TSF value */
+                       tsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+                       start_time = p2pinfo->noa_start_time[i];
+                       if (p2pinfo->noa_count_type[i] != 1) {
+                               while (start_time <= (tsf_low + (50 * 1024))) {
+                                       start_time += p2pinfo->noa_interval[i];
+                                       if (p2pinfo->noa_count_type[i] != 255)
+                                               p2pinfo->noa_count_type[i]--;
+                               }
+                       }
+                       rtl_write_dword(rtlpriv, 0x5E8, start_time);
+                       rtl_write_dword(rtlpriv, 0x5EC,
+                                       p2pinfo->noa_count_type[i]);
+               }
+               if ((p2pinfo->opp_ps == 1) ||
+                   (p2pinfo->noa_num > 0)) {
+                       /* rst p2p circuit */
+                       rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, BIT(4));
+
+                       p2p_ps_offload->offload_en = 1;
+
+                       if (P2P_ROLE_GO == rtlpriv->mac80211.p2p) {
+                               p2p_ps_offload->role = 1;
+                               p2p_ps_offload->allstasleep = 0;
+                       } else {
+                               p2p_ps_offload->role = 0;
+                       }
+                       p2p_ps_offload->discovery = 0;
+               }
+               break;
+       case P2P_PS_SCAN:
+               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+               p2p_ps_offload->discovery = 1;
+               break;
+       case P2P_PS_SCAN_DONE:
+               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+               p2p_ps_offload->discovery = 0;
+               p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
+               break;
+       default:
+               break;
+       }
+       rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_P2P_PS_OFFLOAD, 1,
+                              (u8 *)p2p_ps_offload);
+}
+
+void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+       u8 u1_joinbssrpt_parm[1] = { 0 };
+
+       SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
+
+       rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_JOINBSSRPT, 1,
+                              u1_joinbssrpt_parm);
+}
+
+void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
+                                     u8 ap_offload_enable)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u8 u1_apoffload_parm[H2C_8723BE_AP_OFFLOAD_LENGTH] = { 0 };
+
+       SET_H2CCMD_AP_OFFLOAD_ON(u1_apoffload_parm, ap_offload_enable);
+       SET_H2CCMD_AP_OFFLOAD_HIDDEN(u1_apoffload_parm, mac->hiddenssid);
+       SET_H2CCMD_AP_OFFLOAD_DENYANY(u1_apoffload_parm, 0);
+
+       rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_AP_OFFLOAD,
+                              H2C_8723BE_AP_OFFLOAD_LENGTH, u1_apoffload_parm);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/rtlwifi/rtl8723be/fw.h
new file mode 100644 (file)
index 0000000..31eec28
--- /dev/null
@@ -0,0 +1,248 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE__FW__H__
+#define __RTL8723BE__FW__H__
+
+#define FW_8192C_SIZE                          0x8000
+#define FW_8192C_START_ADDRESS                 0x1000
+#define FW_8192C_END_ADDRESS                   0x5FFF
+#define FW_8192C_PAGE_SIZE                     4096
+#define FW_8192C_POLLING_DELAY                 5
+#define FW_8192C_POLLING_TIMEOUT_COUNT         6000
+
+#define IS_FW_HEADER_EXIST(_pfwhdr)    \
+       ((_pfwhdr->signature&0xFFF0) == 0x5300)
+#define USE_OLD_WOWLAN_DEBUG_FW                        0
+
+#define H2C_8723BE_RSVDPAGE_LOC_LEN            5
+#define H2C_8723BE_PWEMODE_LENGTH              5
+#define H2C_8723BE_JOINBSSRPT_LENGTH           1
+#define H2C_8723BE_AP_OFFLOAD_LENGTH           3
+#define H2C_8723BE_WOWLAN_LENGTH               3
+#define H2C_8723BE_KEEP_ALIVE_CTRL_LENGTH      3
+#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN                1
+#else
+#define H2C_8723BE_REMOTE_WAKE_CTRL_LEN                3
+#endif
+#define H2C_8723BE_AOAC_GLOBAL_INFO_LEN                2
+#define H2C_8723BE_AOAC_RSVDPAGE_LOC_LEN       7
+
+
+/* Fw PS state for RPWM.
+*BIT[2:0] = HW state
+*BIT[3] = Protocol PS state, 1: register active state , 0: register sleep state
+*BIT[4] = sub-state
+*/
+#define        FW_PS_GO_ON             BIT(0)
+#define        FW_PS_TX_NULL           BIT(1)
+#define        FW_PS_RF_ON             BIT(2)
+#define        FW_PS_REGISTER_ACTIVE   BIT(3)
+
+#define        FW_PS_DPS               BIT(0)
+#define        FW_PS_LCLK              (FW_PS_DPS)
+#define        FW_PS_RF_OFF            BIT(1)
+#define        FW_PS_ALL_ON            BIT(2)
+#define        FW_PS_ST_ACTIVE BIT(3)
+#define        FW_PS_ISR_ENABLE        BIT(4)
+#define        FW_PS_IMR_ENABLE        BIT(5)
+
+
+#define        FW_PS_ACK               BIT(6)
+#define        FW_PS_TOGGLE            BIT(7)
+
+ /* 88E RPWM value*/
+ /* BIT[0] = 1: 32k, 0: 40M*/
+#define        FW_PS_CLOCK_OFF         BIT(0)          /* 32k*/
+#define        FW_PS_CLOCK_ON          0               /*40M*/
+
+#define        FW_PS_STATE_MASK        (0x0F)
+#define        FW_PS_STATE_HW_MASK     (0x07)
+/*ISR_ENABLE, IMR_ENABLE, and PS mode should be inherited.*/
+#define        FW_PS_STATE_INT_MASK    (0x3F)
+
+#define        FW_PS_STATE(x)  (FW_PS_STATE_MASK & (x))
+#define        FW_PS_STATE_HW(x)       (FW_PS_STATE_HW_MASK & (x))
+#define        FW_PS_STATE_INT(x)      (FW_PS_STATE_INT_MASK & (x))
+#define        FW_PS_ISR_VAL(x)        ((x) & 0x70)
+#define        FW_PS_IMR_MASK(x)       ((x) & 0xDF)
+#define        FW_PS_KEEP_IMR(x)       ((x) & 0x20)
+
+
+#define        FW_PS_STATE_S0          (FW_PS_DPS)
+#define        FW_PS_STATE_S1          (FW_PS_LCLK)
+#define        FW_PS_STATE_S2          (FW_PS_RF_OFF)
+#define        FW_PS_STATE_S3          (FW_PS_ALL_ON)
+#define        FW_PS_STATE_S4          ((FW_PS_ST_ACTIVE) | (FW_PS_ALL_ON))
+
+/* ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))*/
+#define        FW_PS_STATE_ALL_ON_88E  (FW_PS_CLOCK_ON)
+/* (FW_PS_RF_ON)*/
+#define        FW_PS_STATE_RF_ON_88E   (FW_PS_CLOCK_ON)
+/* 0x0*/
+#define        FW_PS_STATE_RF_OFF_88E  (FW_PS_CLOCK_ON)
+/* (FW_PS_STATE_RF_OFF)*/
+#define        FW_PS_STATE_RF_OFF_LOW_PWR_88E  (FW_PS_CLOCK_OFF)
+
+#define        FW_PS_STATE_ALL_ON_92C  (FW_PS_STATE_S4)
+#define        FW_PS_STATE_RF_ON_92C           (FW_PS_STATE_S3)
+#define        FW_PS_STATE_RF_OFF_92C  (FW_PS_STATE_S2)
+#define        FW_PS_STATE_RF_OFF_LOW_PWR_92C  (FW_PS_STATE_S1)
+
+
+/* For 88E H2C PwrMode Cmd ID 5.*/
+#define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define        FW_PWR_STATE_RF_OFF     0
+
+#define        FW_PS_IS_ACK(x) ((x) & FW_PS_ACK)
+#define        FW_PS_IS_CLK_ON(x)      ((x) & (FW_PS_RF_OFF | FW_PS_ALL_ON))
+#define        FW_PS_IS_RF_ON(x)       ((x) & (FW_PS_ALL_ON))
+#define        FW_PS_IS_ACTIVE(x)      ((x) & (FW_PS_ST_ACTIVE))
+#define        FW_PS_IS_CPWM_INT(x)    ((x) & 0x40)
+
+#define        FW_CLR_PS_STATE(x)      ((x) = ((x) & (0xF0)))
+
+#define        IS_IN_LOW_POWER_STATE_88E(fwpsstate)            \
+                       (FW_PS_STATE(fwpsstate) == FW_PS_CLOCK_OFF)
+
+#define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
+#define        FW_PWR_STATE_RF_OFF     0
+
+#define pagenum_128(_len)      (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
+
+#define SET_88E_H2CCMD_WOWLAN_FUNC_ENABLE(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_PATTERN_MATCH_ENABLE(__ph2ccmd, __val)   \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_MAGIC_PKT_ENABLE(__ph2ccmd, __val)       \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_UNICAST_PKT_ENABLE(__ph2ccmd, __val)     \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_ALL_PKT_DROP(__ph2ccmd, __val)           \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 4, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIO_ACTIVE(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 5, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_REKEY_WAKE_UP(__ph2ccmd, __val)          \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 6, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_DISCONNECT_WAKE_UP(__ph2ccmd, __val)     \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 7, 1, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIONUM(__ph2ccmd, __val)                        \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_WOWLAN_GPIO_DURATION(__ph2ccmd, __val)          \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+
+#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)                 \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_RLBM(__ph2ccmd, __val)                 \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 4, __val)
+#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val)             \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 4, 4, __val)
+#define SET_H2CCMD_PWRMODE_PARM_AWAKE_INTERVAL(__ph2ccmd, __val)       \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_ALL_QUEUE_UAPSD(__ph2ccmd, __val)      \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_H2CCMD_PWRMODE_PARM_PWR_STATE(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+#define GET_88E_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd)                    \
+       LE_BITS_TO_1BYTE(__ph2ccmd, 0, 8)
+
+#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val)               \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)            \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+
+/* AP_OFFLOAD */
+#define SET_H2CCMD_AP_OFFLOAD_ON(__ph2ccmd, __val)                     \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_HIDDEN(__ph2ccmd, __val)                 \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_DENYANY(__ph2ccmd, __val)                        \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_H2CCMD_AP_OFFLOAD_WAKEUP_EVT_RPT(__ph2ccmd, __val)         \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+
+/* Keep Alive Control*/
+#define SET_88E_H2CCMD_KEEP_ALIVE_ENABLE(__ph2ccmd, __val)             \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#define SET_88E_H2CCMD_KEEP_ALIVE_ACCPEPT_USER_DEFINED(__ph2ccmd, __val)\
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_KEEP_ALIVE_PERIOD(__ph2ccmd, __val)             \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+
+/*REMOTE_WAKE_CTRL */
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_EN(__ph2ccmd, __val)           \
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 1, __val)
+#if (USE_OLD_WOWLAN_DEBUG_FW == 0)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(__ph2ccmd, __val)\
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 1, 1, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_NDP_OFFLOAD_EN(__ph2ccmd, __val)\
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 2, 1, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GTK_OFFLOAD_EN(__ph2ccmd, __val)\
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 3, 1, __val)
+#else
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_REMOTE_WAKE_CTRL_GROUP_ENC_ALG(__ph2ccmd, __val)        \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#endif
+
+/* GTK_OFFLOAD */
+#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(__ph2ccmd, __val)\
+       SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(__ph2ccmd, __val)        \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+
+/* AOAC_RSVDPAGE_LOC */
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_REM_WAKE_CTRL_INFO(__ph2ccmd, __val)\
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd), 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(__ph2ccmd, __val)     \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_NEIGHBOR_ADV(__ph2ccmd, __val)        \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_RSP(__ph2ccmd, __val)     \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+3, 0, 8, __val)
+#define SET_88E_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_INFO(__ph2ccmd, __val)    \
+       SET_BITS_TO_LE_1BYTE((__ph2ccmd)+4, 0, 8, __val)
+
+void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
+void rtl8723be_set_fw_ap_off_load_cmd(struct ieee80211_hw *hw,
+                                     u8 ap_offload_enable);
+void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+                           u32 cmd_len, u8 *p_cmdbuffer);
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
+                                 bool dl_finished);
+void rtl8723be_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+int rtl8723be_download_fw(struct ieee80211_hw *hw,
+                         bool buse_wake_on_wlan_fw);
+void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
+                                     u8 p2p_ps_state);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
new file mode 100644 (file)
index 0000000..0fdf090
--- /dev/null
@@ -0,0 +1,2523 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../efuse.h"
+#include "../base.h"
+#include "../regd.h"
+#include "../cam.h"
+#include "../ps.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "dm.h"
+#include "../rtl8723com/dm_common.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "led.h"
+#include "hw.h"
+#include "pwrseq.h"
+#include "../btcoexist/rtl_btc.h"
+
+#define LLT_CONFIG     5
+
+static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+       while (skb_queue_len(&ring->queue)) {
+               struct rtl_tx_desc *entry = &ring->desc[ring->idx];
+               struct sk_buff *skb = __skb_dequeue(&ring->queue);
+
+               pci_unmap_single(rtlpci->pdev,
+                                rtlpriv->cfg->ops->get_desc(
+                                (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
+                                skb->len, PCI_DMA_TODEVICE);
+               kfree_skb(skb);
+               ring->idx = (ring->idx + 1) % ring->entries;
+       }
+}
+
+static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+                                       u8 set_bits, u8 clear_bits)
+{
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpci->reg_bcn_ctrl_val |= set_bits;
+       rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
+
+       rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
+}
+
+static void _rtl8723be_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 tmp1byte;
+
+       tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+       rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+       rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+       tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+       tmp1byte &= ~(BIT(0));
+       rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723be_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 tmp1byte;
+
+       tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+       rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+       rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+       tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+       tmp1byte |= BIT(1);
+       rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+
+static void _rtl8723be_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+       _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl8723be_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+       _rtl8723be_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val,
+                                      bool need_turn_off_ckk)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool support_remote_wake_up;
+       u32 count = 0, isr_regaddr, content;
+       bool schedule_timer = need_turn_off_ckk;
+       rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
+                                     (u8 *)(&support_remote_wake_up));
+
+       if (!rtlhal->fw_ready)
+               return;
+       if (!rtlpriv->psc.fw_current_inpsmode)
+               return;
+
+       while (1) {
+               spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+               if (rtlhal->fw_clk_change_in_progress) {
+                       while (rtlhal->fw_clk_change_in_progress) {
+                               spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+                               count++;
+                               udelay(100);
+                               if (count > 1000)
+                                       return;
+                               spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+                       }
+                       spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+               } else {
+                       rtlhal->fw_clk_change_in_progress = false;
+                       spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+                       break;
+               }
+       }
+       if (IS_IN_LOW_POWER_STATE_88E(rtlhal->fw_ps_state)) {
+               rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_SET_RPWM,
+                                             &rpwm_val);
+               if (FW_PS_IS_ACK(rpwm_val)) {
+                       isr_regaddr = REG_HISR;
+                       content = rtl_read_dword(rtlpriv, isr_regaddr);
+                       while (!(content & IMR_CPWM) && (count < 500)) {
+                               udelay(50);
+                               count++;
+                               content = rtl_read_dword(rtlpriv, isr_regaddr);
+                       }
+
+                       if (content & IMR_CPWM) {
+                               rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
+                               rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
+                               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                                        "Receive CPWM INT!!! Set "
+                                        "pHalData->FwPSState = %X\n",
+                                        rtlhal->fw_ps_state);
+                       }
+               }
+               spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+               rtlhal->fw_clk_change_in_progress = false;
+               spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+               if (schedule_timer) {
+                       mod_timer(&rtlpriv->works.fw_clockoff_timer,
+                                 jiffies + MSECS(10));
+               }
+       } else  {
+               spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+               rtlhal->fw_clk_change_in_progress = false;
+               spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+       }
+}
+
+static void _rtl8723be_set_fw_clock_off(struct ieee80211_hw *hw, u8 rpwm_val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring;
+       enum rf_pwrstate rtstate;
+       bool schedule_timer = false;
+       u8 queue;
+
+       if (!rtlhal->fw_ready)
+               return;
+       if (!rtlpriv->psc.fw_current_inpsmode)
+               return;
+       if (!rtlhal->allow_sw_to_change_hwclc)
+               return;
+       rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, (u8 *)(&rtstate));
+       if (rtstate == ERFOFF || rtlpriv->psc.inactive_pwrstate == ERFOFF)
+               return;
+
+       for (queue = 0; queue < RTL_PCI_MAX_TX_QUEUE_COUNT; queue++) {
+               ring = &rtlpci->tx_ring[queue];
+               if (skb_queue_len(&ring->queue)) {
+                       schedule_timer = true;
+                       break;
+               }
+       }
+       if (schedule_timer) {
+               mod_timer(&rtlpriv->works.fw_clockoff_timer,
+                         jiffies + MSECS(10));
+               return;
+       }
+       if (FW_PS_STATE(rtlhal->fw_ps_state) !=
+           FW_PS_STATE_RF_OFF_LOW_PWR_88E) {
+               spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+               if (!rtlhal->fw_clk_change_in_progress) {
+                       rtlhal->fw_clk_change_in_progress = true;
+                       spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+                       rtlhal->fw_ps_state = FW_PS_STATE(rpwm_val);
+                       rtl_write_word(rtlpriv, REG_HISR, 0x0100);
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
+                                                     &rpwm_val);
+                       spin_lock_bh(&rtlpriv->locks.fw_ps_lock);
+                       rtlhal->fw_clk_change_in_progress = false;
+                       spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+               } else {
+                       spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+                       mod_timer(&rtlpriv->works.fw_clockoff_timer,
+                                 jiffies + MSECS(10));
+               }
+       }
+}
+
+static void _rtl8723be_set_fw_ps_rf_on(struct ieee80211_hw *hw)
+{
+       u8 rpwm_val = 0;
+       rpwm_val |= (FW_PS_STATE_RF_OFF_88E | FW_PS_ACK);
+       _rtl8723be_set_fw_clock_on(hw, rpwm_val, true);
+}
+
+static void _rtl8723be_fwlps_leave(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool fw_current_inps = false;
+       u8 rpwm_val = 0, fw_pwrmode = FW_PS_ACTIVE_MODE;
+
+       if (ppsc->low_power_enable) {
+               rpwm_val = (FW_PS_STATE_ALL_ON_88E | FW_PS_ACK);/* RF on */
+               _rtl8723be_set_fw_clock_on(hw, rpwm_val, false);
+               rtlhal->allow_sw_to_change_hwclc = false;
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+                                             &fw_pwrmode);
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                             (u8 *)(&fw_current_inps));
+       } else {
+               rpwm_val = FW_PS_STATE_ALL_ON_88E;      /* RF on */
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+                                             &fw_pwrmode);
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                             (u8 *)(&fw_current_inps));
+       }
+}
+
+static void _rtl8723be_fwlps_enter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       bool fw_current_inps = true;
+       u8 rpwm_val;
+
+       if (ppsc->low_power_enable) {
+               rpwm_val = FW_PS_STATE_RF_OFF_LOW_PWR_88E;      /* RF off */
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                             (u8 *)(&fw_current_inps));
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+                                             &ppsc->fwctrl_psmode);
+               rtlhal->allow_sw_to_change_hwclc = true;
+               _rtl8723be_set_fw_clock_off(hw, rpwm_val);
+
+       } else {
+               rpwm_val = FW_PS_STATE_RF_OFF_88E;      /* RF off */
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
+                                             (u8 *)(&fw_current_inps));
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE,
+                                             &ppsc->fwctrl_psmode);
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM, &rpwm_val);
+       }
+}
+
+void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       switch (variable) {
+       case HW_VAR_RCR:
+               *((u32 *)(val)) = rtlpci->receive_config;
+               break;
+       case HW_VAR_RF_STATE:
+               *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+               break;
+       case HW_VAR_FWLPS_RF_ON: {
+               enum rf_pwrstate rfstate;
+               u32 val_rcr;
+
+               rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+                                             (u8 *)(&rfstate));
+               if (rfstate == ERFOFF) {
+                       *((bool *)(val)) = true;
+               } else {
+                       val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+                       val_rcr &= 0x00070000;
+                       if (val_rcr)
+                               *((bool *)(val)) = false;
+                       else
+                               *((bool *)(val)) = true;
+               }
+               break; }
+       case HW_VAR_FW_PSMODE_STATUS:
+               *((bool *)(val)) = ppsc->fw_current_inpsmode;
+               break;
+       case HW_VAR_CORRECT_TSF: {
+               u64 tsf;
+               u32 *ptsf_low = (u32 *)&tsf;
+               u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+               *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+               *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+
+               *((u64 *)(val)) = tsf;
+
+               break; }
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process %x\n", variable);
+               break;
+       }
+}
+
+void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       u8 idx;
+
+       switch (variable) {
+       case HW_VAR_ETHER_ADDR:
+               for (idx = 0; idx < ETH_ALEN; idx++)
+                       rtl_write_byte(rtlpriv, (REG_MACID + idx), val[idx]);
+               break;
+       case HW_VAR_BASIC_RATE: {
+               u16 rate_cfg = ((u16 *)val)[0];
+               u8 rate_index = 0;
+               rate_cfg = rate_cfg & 0x15f;
+               rate_cfg |= 0x01;
+               rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+               rtl_write_byte(rtlpriv, REG_RRSR + 1, (rate_cfg >> 8) & 0xff);
+               while (rate_cfg > 0x1) {
+                       rate_cfg = (rate_cfg >> 1);
+                       rate_index++;
+               }
+               rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, rate_index);
+               break; }
+       case HW_VAR_BSSID:
+               for (idx = 0; idx < ETH_ALEN; idx++)
+                       rtl_write_byte(rtlpriv, (REG_BSSID + idx), val[idx]);
+               break;
+       case HW_VAR_SIFS:
+               rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+               rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+
+               rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+               rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+
+               if (!mac->ht_enable)
+                       rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM, 0x0e0e);
+               else
+                       rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+                                      *((u16 *)val));
+               break;
+       case HW_VAR_SLOT_TIME: {
+               u8 e_aci;
+
+               RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                        "HW_VAR_SLOT_TIME %x\n", val[0]);
+
+               rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+
+               for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+                                                     &e_aci);
+               }
+               break; }
+       case HW_VAR_ACK_PREAMBLE: {
+               u8 reg_tmp;
+               u8 short_preamble = (bool)*val;
+               reg_tmp = rtl_read_byte(rtlpriv, REG_TRXPTCL_CTL + 2);
+               if (short_preamble) {
+                       reg_tmp |= 0x02;
+                       rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+               } else {
+                       reg_tmp &= 0xFD;
+                       rtl_write_byte(rtlpriv, REG_TRXPTCL_CTL + 2, reg_tmp);
+               }
+               break; }
+       case HW_VAR_WPA_CONFIG:
+               rtl_write_byte(rtlpriv, REG_SECCFG, *val);
+               break;
+       case HW_VAR_AMPDU_MIN_SPACE: {
+               u8 min_spacing_to_set;
+               u8 sec_min_space;
+
+               min_spacing_to_set = *val;
+               if (min_spacing_to_set <= 7) {
+                       sec_min_space = 0;
+
+                       if (min_spacing_to_set < sec_min_space)
+                               min_spacing_to_set = sec_min_space;
+
+                       mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
+                                             min_spacing_to_set);
+
+                       *val = min_spacing_to_set;
+
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+                                mac->min_space_cfg);
+
+                       rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+                                      mac->min_space_cfg);
+               }
+               break; }
+       case HW_VAR_SHORTGI_DENSITY: {
+               u8 density_to_set;
+
+               density_to_set = *val;
+               mac->min_space_cfg |= (density_to_set << 3);
+
+               RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                        "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+                        mac->min_space_cfg);
+
+               rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+                              mac->min_space_cfg);
+               break; }
+       case HW_VAR_AMPDU_FACTOR: {
+               u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+               u8 factor_toset;
+               u8 *p_regtoset = NULL;
+               u8 index = 0;
+
+               p_regtoset = regtoset_normal;
+
+               factor_toset = *val;
+               if (factor_toset <= 3) {
+                       factor_toset = (1 << (factor_toset + 2));
+                       if (factor_toset > 0xf)
+                               factor_toset = 0xf;
+
+                       for (index = 0; index < 4; index++) {
+                               if ((p_regtoset[index] & 0xf0) >
+                                   (factor_toset << 4))
+                                       p_regtoset[index] =
+                                               (p_regtoset[index] & 0x0f) |
+                                               (factor_toset << 4);
+
+                               if ((p_regtoset[index] & 0x0f) > factor_toset)
+                                       p_regtoset[index] =
+                                               (p_regtoset[index] & 0xf0) |
+                                               (factor_toset);
+
+                               rtl_write_byte(rtlpriv,
+                                              (REG_AGGLEN_LMT + index),
+                                              p_regtoset[index]);
+                       }
+                       RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
+                                "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+                                factor_toset);
+               }
+               break; }
+       case HW_VAR_AC_PARAM: {
+               u8 e_aci = *val;
+               rtl8723_dm_init_edca_turbo(hw);
+
+               if (rtlpci->acm_method != EACMWAY2_SW)
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
+                                                     &e_aci);
+               break; }
+       case HW_VAR_ACM_CTRL: {
+               u8 e_aci = *val;
+               union aci_aifsn *p_aci_aifsn =
+                               (union aci_aifsn *)(&(mac->ac[0].aifs));
+               u8 acm = p_aci_aifsn->f.acm;
+               u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+               acm_ctrl =
+                   acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
+
+               if (acm) {
+                       switch (e_aci) {
+                       case AC0_BE:
+                               acm_ctrl |= ACMHW_BEQEN;
+                               break;
+                       case AC2_VI:
+                               acm_ctrl |= ACMHW_VIQEN;
+                               break;
+                       case AC3_VO:
+                               acm_ctrl |= ACMHW_VOQEN;
+                               break;
+                       default:
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        "HW_VAR_ACM_CTRL acm set "
+                                         "failed: eACI is %d\n", acm);
+                               break;
+                       }
+               } else {
+                       switch (e_aci) {
+                       case AC0_BE:
+                               acm_ctrl &= (~ACMHW_BEQEN);
+                               break;
+                       case AC2_VI:
+                               acm_ctrl &= (~ACMHW_VIQEN);
+                               break;
+                       case AC3_VO:
+                               acm_ctrl &= (~ACMHW_BEQEN);
+                               break;
+                       default:
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                        "switch case not process\n");
+                               break;
+                       }
+               }
+               RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
+                        "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
+                        "Write 0x%X\n", acm_ctrl);
+               rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+               break; }
+       case HW_VAR_RCR:
+               rtl_write_dword(rtlpriv, REG_RCR, ((u32 *)(val))[0]);
+               rtlpci->receive_config = ((u32 *)(val))[0];
+               break;
+       case HW_VAR_RETRY_LIMIT: {
+               u8 retry_limit = *val;
+
+               rtl_write_word(rtlpriv, REG_RL,
+                              retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+                              retry_limit << RETRY_LIMIT_LONG_SHIFT);
+               break; }
+       case HW_VAR_DUAL_TSF_RST:
+               rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+               break;
+       case HW_VAR_EFUSE_BYTES:
+               rtlefuse->efuse_usedbytes = *((u16 *)val);
+               break;
+       case HW_VAR_EFUSE_USAGE:
+               rtlefuse->efuse_usedpercentage = *val;
+               break;
+       case HW_VAR_IO_CMD:
+               rtl8723be_phy_set_io_cmd(hw, (*(enum io_type *)val));
+               break;
+       case HW_VAR_SET_RPWM: {
+               u8 rpwm_val;
+
+               rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
+               udelay(1);
+
+               if (rpwm_val & BIT(7)) {
+                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
+               } else {
+                       rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val | BIT(7));
+               }
+               break; }
+       case HW_VAR_H2C_FW_PWRMODE:
+               rtl8723be_set_fw_pwrmode_cmd(hw, *val);
+               break;
+       case HW_VAR_FW_PSMODE_STATUS:
+               ppsc->fw_current_inpsmode = *((bool *)val);
+               break;
+       case HW_VAR_RESUME_CLK_ON:
+               _rtl8723be_set_fw_ps_rf_on(hw);
+               break;
+       case HW_VAR_FW_LPS_ACTION: {
+               bool enter_fwlps = *((bool *)val);
+
+               if (enter_fwlps)
+                       _rtl8723be_fwlps_enter(hw);
+               else
+                       _rtl8723be_fwlps_leave(hw);
+
+               break; }
+       case HW_VAR_H2C_FW_JOINBSSRPT: {
+               u8 mstatus = *val;
+               u8 tmp_regcr, tmp_reg422, bcnvalid_reg;
+               u8 count = 0, dlbcn_count = 0;
+               bool recover = false;
+
+               if (mstatus == RT_MEDIA_CONNECT) {
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
+
+                       tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+                       rtl_write_byte(rtlpriv, REG_CR + 1,
+                                      (tmp_regcr | BIT(0)));
+
+                       _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
+                       _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
+
+                       tmp_reg422 = rtl_read_byte(rtlpriv,
+                                                  REG_FWHW_TXQ_CTRL + 2);
+                       rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                                      tmp_reg422 & (~BIT(6)));
+                       if (tmp_reg422 & BIT(6))
+                               recover = true;
+
+                       do {
+                               bcnvalid_reg = rtl_read_byte(rtlpriv,
+                                                            REG_TDECTRL + 2);
+                               rtl_write_byte(rtlpriv, REG_TDECTRL + 2,
+                                              (bcnvalid_reg | BIT(0)));
+                               _rtl8723be_return_beacon_queue_skb(hw);
+
+                               rtl8723be_set_fw_rsvdpagepkt(hw, 0);
+                               bcnvalid_reg = rtl_read_byte(rtlpriv,
+                                                            REG_TDECTRL + 2);
+                               count = 0;
+                               while (!(bcnvalid_reg & BIT(0)) && count < 20) {
+                                       count++;
+                                       udelay(10);
+                                       bcnvalid_reg = rtl_read_byte(rtlpriv,
+                                                              REG_TDECTRL + 2);
+                               }
+                               dlbcn_count++;
+                       } while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
+
+                       if (bcnvalid_reg & BIT(0))
+                               rtl_write_byte(rtlpriv, REG_TDECTRL+2, BIT(0));
+
+                       _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+                       _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
+
+                       if (recover) {
+                               rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+                                              tmp_reg422);
+                       }
+                       rtl_write_byte(rtlpriv, REG_CR + 1,
+                                      (tmp_regcr & ~(BIT(0))));
+               }
+               rtl8723be_set_fw_joinbss_report_cmd(hw, *val);
+               break; }
+       case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
+               rtl8723be_set_p2p_ps_offload_cmd(hw, *val);
+               break;
+       case HW_VAR_AID: {
+               u16 u2btmp;
+               u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+               u2btmp &= 0xC000;
+               rtl_write_word(rtlpriv, REG_BCN_PSR_RPT,
+                              (u2btmp | mac->assoc_id));
+               break; }
+       case HW_VAR_CORRECT_TSF: {
+               u8 btype_ibss = *val;
+
+               if (btype_ibss)
+                       _rtl8723be_stop_tx_beacon(hw);
+
+               _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(3));
+
+               rtl_write_dword(rtlpriv, REG_TSFTR,
+                               (u32) (mac->tsf & 0xffffffff));
+               rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+                               (u32) ((mac->tsf >> 32) & 0xffffffff));
+
+               _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+
+               if (btype_ibss)
+                       _rtl8723be_resume_tx_beacon(hw);
+               break; }
+       case HW_VAR_KEEP_ALIVE: {
+               u8 array[2];
+               array[0] = 0xff;
+               array[1] = *val;
+               rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_KEEP_ALIVE_CTRL,
+                                      2, array);
+               break; }
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process %x\n",
+                        variable);
+               break;
+       }
+}
+
+static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       bool status = true;
+       int count = 0;
+       u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) |
+                   _LLT_OP(_LLT_WRITE_ACCESS);
+
+       rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+
+       do {
+               value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+               if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+                       break;
+
+               if (count > POLLING_LLT_THRESHOLD) {
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "Failed to polling write LLT done at "
+                                 "address %d!\n", address);
+                       status = false;
+                       break;
+               }
+       } while (++count);
+
+       return status;
+}
+
+static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       unsigned short i;
+       u8 txpktbuf_bndy;
+       u8 maxpage;
+       bool status;
+
+       maxpage = 255;
+       txpktbuf_bndy = 245;
+
+       rtl_write_dword(rtlpriv, REG_TRXFF_BNDY,
+                       (0x27FF0000 | txpktbuf_bndy));
+       rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+       rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
+       rtl_write_byte(rtlpriv, REG_PBP, 0x31);
+       rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+       for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+               status = _rtl8723be_llt_write(hw, i, i + 1);
+               if (!status)
+                       return status;
+       }
+       status = _rtl8723be_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+
+       if (!status)
+               return status;
+
+       for (i = txpktbuf_bndy; i < maxpage; i++) {
+               status = _rtl8723be_llt_write(hw, i, (i + 1));
+               if (!status)
+                       return status;
+       }
+       status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy);
+       if (!status)
+               return status;
+
+       rtl_write_dword(rtlpriv, REG_RQPN, 0x80e40808);
+       rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x00);
+
+       return true;
+}
+
+static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+
+       if (rtlpriv->rtlhal.up_first_time)
+               return;
+
+       if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+               rtl8723be_sw_led_on(hw, pled0);
+       else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
+               rtl8723be_sw_led_on(hw, pled0);
+       else
+               rtl8723be_sw_led_off(hw, pled0);
+}
+
+static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       unsigned char bytetmp;
+       unsigned short wordtmp;
+       u16 retry = 0;
+       bool mac_func_enable;
+
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+       /*Auto Power Down to CHIP-off State*/
+       bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) & (~BIT(7));
+       rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+       if (bytetmp == 0xFF)
+               mac_func_enable = true;
+       else
+               mac_func_enable = false;
+
+       /* HW Power on sequence */
+       if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
+                                     PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
+                                     RTL8723_NIC_ENABLE_FLOW)) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "init MAC Fail as power on failure\n");
+               return false;
+       }
+       bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO) | BIT(4);
+       rtl_write_byte(rtlpriv, REG_APS_FSMCO, bytetmp);
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_CR);
+       bytetmp = 0xff;
+       rtl_write_byte(rtlpriv, REG_CR, bytetmp);
+       mdelay(2);
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_HWSEQ_CTRL);
+       bytetmp |= 0x7f;
+       rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, bytetmp);
+       mdelay(2);
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CFG + 3);
+       if (bytetmp & BIT(0)) {
+               bytetmp = rtl_read_byte(rtlpriv, 0x7c);
+               bytetmp |= BIT(6);
+               rtl_write_byte(rtlpriv, 0x7c, bytetmp);
+       }
+       bytetmp = rtl_read_byte(rtlpriv, REG_SYS_CLKR);
+       bytetmp |= BIT(3);
+       rtl_write_byte(rtlpriv, REG_SYS_CLKR, bytetmp);
+       bytetmp = rtl_read_byte(rtlpriv, REG_GPIO_MUXCFG + 1);
+       bytetmp &= ~BIT(4);
+       rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG + 1, bytetmp);
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+3);
+       rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+3, bytetmp | 0x77);
+
+       rtl_write_word(rtlpriv, REG_CR, 0x2ff);
+
+       if (!mac_func_enable) {
+               if (!_rtl8723be_llt_table_init(hw))
+                       return false;
+       }
+       rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+       rtl_write_dword(rtlpriv, REG_HISRE, 0xffffffff);
+
+       /* Enable FW Beamformer Interrupt */
+       bytetmp = rtl_read_byte(rtlpriv, REG_FWIMR + 3);
+       rtl_write_byte(rtlpriv, REG_FWIMR + 3, bytetmp | BIT(6));
+
+       wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
+       wordtmp &= 0xf;
+       wordtmp |= 0xF5B1;
+       rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
+
+       rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
+       rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+       rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
+       rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
+
+       rtl_write_byte(rtlpriv, 0x4d0, 0x0);
+
+       rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
+                       ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
+                       DMA_BIT_MASK(32));
+       rtl_write_dword(rtlpriv, REG_MGQ_DESA,
+                       (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
+                       DMA_BIT_MASK(32));
+       rtl_write_dword(rtlpriv, REG_VOQ_DESA,
+                       (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
+       rtl_write_dword(rtlpriv, REG_VIQ_DESA,
+                       (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
+       rtl_write_dword(rtlpriv, REG_BEQ_DESA,
+                       (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
+       rtl_write_dword(rtlpriv, REG_BKQ_DESA,
+                       (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
+       rtl_write_dword(rtlpriv, REG_HQ_DESA,
+                       (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
+                       DMA_BIT_MASK(32));
+       rtl_write_dword(rtlpriv, REG_RX_DESA,
+                       (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
+                       DMA_BIT_MASK(32));
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG + 3);
+       rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, bytetmp | 0x77);
+
+       rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+       rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
+
+       rtl_write_byte(rtlpriv, REG_SECONDARY_CCA_CTRL, 0x3);
+
+       do {
+               retry++;
+               bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
+       } while ((retry < 200) && (bytetmp & BIT(7)));
+
+       _rtl8723be_gen_refresh_led_state(hw);
+
+       rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
+
+       bytetmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
+       rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, bytetmp & ~BIT(2));
+
+       return true;
+}
+
+static void _rtl8723be_hw_configure(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 reg_bw_opmode;
+       u32 reg_ratr, reg_prsr;
+
+       reg_bw_opmode = BW_OPMODE_20MHZ;
+       reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
+                  RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
+       reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
+
+       rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
+       rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+}
+
+static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+       rtl_write_byte(rtlpriv, 0x34b, 0x93);
+       rtl_write_word(rtlpriv, 0x350, 0x870c);
+       rtl_write_byte(rtlpriv, 0x352, 0x1);
+
+       if (ppsc->support_backdoor)
+               rtl_write_byte(rtlpriv, 0x349, 0x1b);
+       else
+               rtl_write_byte(rtlpriv, 0x349, 0x03);
+
+       rtl_write_word(rtlpriv, 0x350, 0x2718);
+       rtl_write_byte(rtlpriv, 0x352, 0x1);
+}
+
+void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 sec_reg_value;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+                rtlpriv->sec.pairwise_enc_algorithm,
+                rtlpriv->sec.group_enc_algorithm);
+
+       if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                        "not open hw encryption\n");
+               return;
+       }
+       sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
+
+       if (rtlpriv->sec.use_defaultkey) {
+               sec_reg_value |= SCR_TXUSEDK;
+               sec_reg_value |= SCR_RXUSEDK;
+       }
+       sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+
+       rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+
+       RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "The SECR-value %x\n",
+                sec_reg_value);
+
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+
+int rtl8723be_hw_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       bool rtstatus = true;
+       int err;
+       u8 tmp_u1b;
+       unsigned long flags;
+
+       /* reenable interrupts to not interfere with other devices */
+       local_save_flags(flags);
+       local_irq_enable();
+
+       rtlpriv->rtlhal.being_init_adapter = true;
+       rtlpriv->intf_ops->disable_aspm(hw);
+       rtstatus = _rtl8723be_init_mac(hw);
+       if (!rtstatus) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               err = 1;
+               goto exit;
+       }
+       tmp_u1b = rtl_read_byte(rtlpriv, REG_SYS_CFG);
+       tmp_u1b &= 0x7F;
+       rtl_write_byte(rtlpriv, REG_SYS_CFG, tmp_u1b);
+
+       err = rtl8723_download_fw(hw, true);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        "Failed to download FW. Init HW without FW now..\n");
+               err = 1;
+               rtlhal->fw_ready = false;
+               goto exit;
+       } else {
+               rtlhal->fw_ready = true;
+       }
+       rtlhal->last_hmeboxnum = 0;
+       rtl8723be_phy_mac_config(hw);
+       /* because last function modify RCR, so we update
+        * rcr var here, or TP will unstable for receive_config
+        * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
+        * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
+        */
+       rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
+       rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
+       rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+       rtl8723be_phy_bb_config(hw);
+       rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+       rtl8723be_phy_rf_config(hw);
+
+       rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
+                                                RF_CHNLBW, RFREG_OFFSET_MASK);
+       rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
+                                                RF_CHNLBW, RFREG_OFFSET_MASK);
+       rtlphy->rfreg_chnlval[0] &= 0xFFF03FF;
+       rtlphy->rfreg_chnlval[0] |= (BIT(10) | BIT(11));
+
+       rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+       rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+       rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
+       _rtl8723be_hw_configure(hw);
+       rtl_cam_reset_all_entry(hw);
+       rtl8723be_enable_hw_security_config(hw);
+
+       ppsc->rfpwr_state = ERFON;
+
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
+       _rtl8723be_enable_aspm_back_door(hw);
+       rtlpriv->intf_ops->enable_aspm(hw);
+
+       rtl8723be_bt_hw_init(hw);
+
+       rtl_set_bbreg(hw, 0x64, BIT(20), 0);
+       rtl_set_bbreg(hw, 0x64, BIT(24), 0);
+
+       rtl_set_bbreg(hw, 0x40, BIT(4), 0);
+       rtl_set_bbreg(hw, 0x40, BIT(3), 1);
+
+       rtl_set_bbreg(hw, 0x944, BIT(0)|BIT(1), 0x3);
+       rtl_set_bbreg(hw, 0x930, 0xff, 0x77);
+
+       rtl_set_bbreg(hw, 0x38, BIT(11), 0x1);
+
+       rtl_set_bbreg(hw, 0xb2c, 0xffffffff, 0x80000000);
+
+       if (ppsc->rfpwr_state == ERFON) {
+               rtl8723be_dm_check_txpower_tracking(hw);
+               rtl8723be_phy_lc_calibrate(hw);
+       }
+       tmp_u1b = efuse_read_1byte(hw, 0x1FA);
+       if (!(tmp_u1b & BIT(0))) {
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+       }
+       if (!(tmp_u1b & BIT(4))) {
+               tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
+               tmp_u1b &= 0x0F;
+               rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
+               udelay(10);
+               rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+       }
+       rtl8723be_dm_init(hw);
+exit:
+       local_irq_restore(flags);
+       rtlpriv->rtlhal.being_init_adapter = false;
+       return err;
+}
+
+static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       enum version_8723e version = VERSION_UNKNOWN;
+       u8 count = 0;
+       u8 value8;
+       u32 value32;
+
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0);
+
+       value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 2);
+       rtl_write_byte(rtlpriv, REG_APS_FSMCO + 2, value8 | BIT(0));
+
+       value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+       rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, value8 | BIT(0));
+
+       value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+       while (((value8 & BIT(0))) && (count++ < 100)) {
+               udelay(10);
+               value8 = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
+       }
+       count = 0;
+       value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+       while ((value8 == 0) && (count++ < 50)) {
+               value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+               mdelay(1);
+       }
+       value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
+       if ((value32 & (CHIP_8723B)) != CHIP_8723B)
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n");
+       else
+               version = (enum version_8723e) VERSION_TEST_CHIP_1T1R_8723B;
+
+               rtlphy->rf_type = RF_1T1R;
+
+       value8 = rtl_read_byte(rtlpriv, REG_ROM_VERSION);
+       if (value8 >= 0x02)
+               version |= BIT(3);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+                "RF_2T2R" : "RF_1T1R");
+
+       return version;
+}
+
+static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
+                                      enum nl80211_iftype type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc;
+       enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+
+       rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
+       RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+                "clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
+
+       if (type == NL80211_IFTYPE_UNSPECIFIED ||
+           type == NL80211_IFTYPE_STATION) {
+               _rtl8723be_stop_tx_beacon(hw);
+               _rtl8723be_enable_bcn_sub_func(hw);
+       } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
+               _rtl8723be_resume_tx_beacon(hw);
+               _rtl8723be_disable_bcn_sub_func(hw);
+       } else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        "Set HW_VAR_MEDIA_STATUS: "
+                        "No such media status(%x).\n", type);
+       }
+       switch (type) {
+       case NL80211_IFTYPE_UNSPECIFIED:
+               bt_msr |= MSR_NOLINK;
+               ledaction = LED_CTL_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        "Set Network type to NO LINK!\n");
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               bt_msr |= MSR_ADHOC;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        "Set Network type to Ad Hoc!\n");
+               break;
+       case NL80211_IFTYPE_STATION:
+               bt_msr |= MSR_INFRA;
+               ledaction = LED_CTL_LINK;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        "Set Network type to STA!\n");
+               break;
+       case NL80211_IFTYPE_AP:
+               bt_msr |= MSR_AP;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        "Set Network type to AP!\n");
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Network type %d not support!\n", type);
+               return 1;
+       }
+       rtl_write_byte(rtlpriv, (MSR), bt_msr);
+       rtlpriv->cfg->ops->led_control(hw, ledaction);
+       if ((bt_msr & 0x03) == MSR_AP)
+               rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+       else
+               rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+       return 0;
+}
+
+void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       u32 reg_rcr = rtlpci->receive_config;
+
+       if (rtlpriv->psc.rfpwr_state != ERFON)
+               return;
+
+       if (check_bssid) {
+               reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                             (u8 *)(&reg_rcr));
+               _rtl8723be_set_bcn_ctrl_reg(hw, 0, BIT(4));
+       } else if (!check_bssid) {
+               reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+               _rtl8723be_set_bcn_ctrl_reg(hw, BIT(4), 0);
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                             (u8 *)(&reg_rcr));
+       }
+}
+
+int rtl8723be_set_network_type(struct ieee80211_hw *hw,
+                              enum nl80211_iftype type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (_rtl8723be_set_media_status(hw, type))
+               return -EOPNOTSUPP;
+
+       if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+               if (type != NL80211_IFTYPE_AP)
+                       rtl8723be_set_check_bssid(hw, true);
+       } else {
+               rtl8723be_set_check_bssid(hw, false);
+       }
+       return 0;
+}
+
+/* don't set REG_EDCA_BE_PARAM here
+ * because mac80211 will send pkt when scan
+ */
+void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       rtl8723_dm_init_edca_turbo(hw);
+       switch (aci) {
+       case AC1_BK:
+               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
+               break;
+       case AC0_BE:
+               break;
+       case AC2_VI:
+               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
+               break;
+       case AC3_VO:
+               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
+               break;
+       default:
+               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               break;
+       }
+}
+
+void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+       rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+       rtlpci->irq_enabled = true;
+       /* there are some C2H CMDs have been sent
+        * before system interrupt is enabled, e.g., C2H, CPWM.
+        * So we need to clear all C2H events that FW has notified,
+        * otherwise FW won't schedule any commands anymore.
+        */
+       rtl_write_byte(rtlpriv, REG_C2HEVT_CLEAR, 0);
+       /*enable system interrupt*/
+       rtl_write_dword(rtlpriv, REG_HSIMR, rtlpci->sys_irq_mask & 0xFFFFFFFF);
+}
+
+void rtl8723be_disable_interrupt(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       rtl_write_dword(rtlpriv, REG_HIMR, IMR_DISABLED);
+       rtl_write_dword(rtlpriv, REG_HIMRE, IMR_DISABLED);
+       rtlpci->irq_enabled = false;
+       synchronize_irq(rtlpci->pdev->irq);
+}
+
+static void _rtl8723be_poweroff_adapter(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 u1b_tmp;
+
+       /* Combo (PCIe + USB) Card and PCIe-MF Card */
+       /* 1. Run LPS WL RFOFF flow */
+       rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+                                PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW);
+
+       /* 2. 0x1F[7:0] = 0 */
+       /* turn off RF */
+       rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+       if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) &&
+           rtlhal->fw_ready)
+               rtl8723be_firmware_selfreset(hw);
+
+       /* Reset MCU. Suggested by Filen. */
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
+
+       /* g.   MCUFWDL 0x80[1:0]= 0     */
+       /* reset MCU ready status */
+       rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+       /* HW card disable configuration. */
+       rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
+                                PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW);
+
+       /* Reset MCU IO Wrapper */
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1b_tmp | BIT(0));
+
+       /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
+       /* lock ISO/CLK/Power control register */
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+}
+
+void rtl8723be_card_disable(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       enum nl80211_iftype opmode;
+
+       mac->link_state = MAC80211_NOLINK;
+       opmode = NL80211_IFTYPE_UNSPECIFIED;
+       _rtl8723be_set_media_status(hw, opmode);
+       if (rtlpriv->rtlhal.driver_is_goingto_unload ||
+           ppsc->rfoff_reason > RF_CHANGE_BY_PS)
+               rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+       _rtl8723be_poweroff_adapter(hw);
+
+       /* after power off we should do iqk again */
+       rtlpriv->phy.iqk_initialized = false;
+}
+
+void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
+                                   u32 *p_inta, u32 *p_intb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
+       rtl_write_dword(rtlpriv, ISR, *p_inta);
+
+       *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) &
+                                       rtlpci->irq_mask[1];
+       rtl_write_dword(rtlpriv, REG_HISRE, *p_intb);
+}
+
+void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 bcn_interval, atim_window;
+
+       bcn_interval = mac->beacon_interval;
+       atim_window = 2;        /*FIX MERGE */
+       rtl8723be_disable_interrupt(hw);
+       rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+       rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+       rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+       rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
+       rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
+       rtl_write_byte(rtlpriv, 0x606, 0x30);
+       rtl8723be_enable_interrupt(hw);
+}
+
+void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 bcn_interval = mac->beacon_interval;
+
+       RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
+                "beacon_interval:%d\n", bcn_interval);
+       rtl8723be_disable_interrupt(hw);
+       rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+       rtl8723be_enable_interrupt(hw);
+}
+
+void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
+                                  u32 add_msr, u32 rm_msr)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
+                "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+
+       if (add_msr)
+               rtlpci->irq_mask[0] |= add_msr;
+       if (rm_msr)
+               rtlpci->irq_mask[0] &= (~rm_msr);
+       rtl8723be_disable_interrupt(hw);
+       rtl8723be_enable_interrupt(hw);
+}
+
+static u8 _rtl8723be_get_chnl_group(u8 chnl)
+{
+       u8 group;
+
+       if (chnl < 3)
+               group = 0;
+       else if (chnl < 9)
+               group = 1;
+       else
+               group = 2;
+       return group;
+}
+
+static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw,
+                                       struct txpower_info_2g *pw2g,
+                                       struct txpower_info_5g *pw5g,
+                                       bool autoload_fail, u8 *hwinfo)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "hal_ReadPowerValueFromPROM8723BE(): "
+                "PROMContent[0x%x]= 0x%x\n",
+                (addr + 1), hwinfo[addr + 1]);
+       if (0xFF == hwinfo[addr + 1])  /*YJ, add, 120316*/
+               autoload_fail = true;
+
+       if (autoload_fail) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "auto load fail : Use Default value!\n");
+               for (path = 0; path < MAX_RF_PATH; path++) {
+                       /* 2.4G default value */
+                       for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+                               pw2g->index_cck_base[path][group] = 0x2D;
+                               pw2g->index_bw40_base[path][group] = 0x2D;
+                       }
+                       for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+                               if (cnt == 0) {
+                                       pw2g->bw20_diff[path][0] = 0x02;
+                                       pw2g->ofdm_diff[path][0] = 0x04;
+                               } else {
+                                       pw2g->bw20_diff[path][cnt] = 0xFE;
+                                       pw2g->bw40_diff[path][cnt] = 0xFE;
+                                       pw2g->cck_diff[path][cnt] = 0xFE;
+                                       pw2g->ofdm_diff[path][cnt] = 0xFE;
+                               }
+                       }
+               }
+               return;
+       }
+       for (path = 0; path < MAX_RF_PATH; path++) {
+               /*2.4G default value*/
+               for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+                       pw2g->index_cck_base[path][group] = hwinfo[addr++];
+                       if (pw2g->index_cck_base[path][group] == 0xFF)
+                               pw2g->index_cck_base[path][group] = 0x2D;
+               }
+               for (group = 0; group < MAX_CHNL_GROUP_24G - 1; group++) {
+                       pw2g->index_bw40_base[path][group] = hwinfo[addr++];
+                       if (pw2g->index_bw40_base[path][group] == 0xFF)
+                               pw2g->index_bw40_base[path][group] = 0x2D;
+               }
+               for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+                       if (cnt == 0) {
+                               pw2g->bw40_diff[path][cnt] = 0;
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw2g->bw20_diff[path][cnt] = 0x02;
+                               } else {
+                                       pw2g->bw20_diff[path][cnt] =
+                                               (hwinfo[addr] & 0xf0) >> 4;
+                                       /*bit sign number to 8 bit sign number*/
+                                       if (pw2g->bw20_diff[path][cnt] & BIT(3))
+                                               pw2g->bw20_diff[path][cnt] |= 0xF0;
+                               }
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw2g->ofdm_diff[path][cnt] = 0x04;
+                               } else {
+                                       pw2g->ofdm_diff[path][cnt] =
+                                                       (hwinfo[addr] & 0x0f);
+                                       /*bit sign number to 8 bit sign number*/
+                                       if (pw2g->ofdm_diff[path][cnt] & BIT(3))
+                                               pw2g->ofdm_diff[path][cnt] |=
+                                                                         0xF0;
+                               }
+                               pw2g->cck_diff[path][cnt] = 0;
+                               addr++;
+                       } else {
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw2g->bw40_diff[path][cnt] = 0xFE;
+                               } else {
+                                       pw2g->bw40_diff[path][cnt] =
+                                               (hwinfo[addr] & 0xf0) >> 4;
+                                       if (pw2g->bw40_diff[path][cnt] & BIT(3))
+                                               pw2g->bw40_diff[path][cnt] |=
+                                                                         0xF0;
+                               }
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw2g->bw20_diff[path][cnt] = 0xFE;
+                               } else {
+                                       pw2g->bw20_diff[path][cnt] =
+                                                       (hwinfo[addr] & 0x0f);
+                                       if (pw2g->bw20_diff[path][cnt] & BIT(3))
+                                               pw2g->bw20_diff[path][cnt] |=
+                                                                         0xF0;
+                               }
+                               addr++;
+
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw2g->ofdm_diff[path][cnt] = 0xFE;
+                               } else {
+                                       pw2g->ofdm_diff[path][cnt] =
+                                               (hwinfo[addr] & 0xf0) >> 4;
+                                       if (pw2g->ofdm_diff[path][cnt] & BIT(3))
+                                               pw2g->ofdm_diff[path][cnt] |=
+                                                                         0xF0;
+                               }
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw2g->cck_diff[path][cnt] = 0xFE;
+                               } else {
+                                       pw2g->cck_diff[path][cnt] =
+                                                       (hwinfo[addr] & 0x0f);
+                                       if (pw2g->cck_diff[path][cnt] & BIT(3))
+                                               pw2g->cck_diff[path][cnt] |=
+                                                                        0xF0;
+                               }
+                               addr++;
+                       }
+               }
+               /*5G default value*/
+               for (group = 0; group < MAX_CHNL_GROUP_5G; group++) {
+                       pw5g->index_bw40_base[path][group] = hwinfo[addr++];
+                       if (pw5g->index_bw40_base[path][group] == 0xFF)
+                               pw5g->index_bw40_base[path][group] = 0xFE;
+               }
+               for (cnt = 0; cnt < MAX_TX_COUNT; cnt++) {
+                       if (cnt == 0) {
+                               pw5g->bw40_diff[path][cnt] = 0;
+
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw5g->bw20_diff[path][cnt] = 0;
+                               } else {
+                                       pw5g->bw20_diff[path][0] =
+                                               (hwinfo[addr] & 0xf0) >> 4;
+                                       if (pw5g->bw20_diff[path][cnt] & BIT(3))
+                                               pw5g->bw20_diff[path][cnt] |=
+                                                                         0xF0;
+                               }
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw5g->ofdm_diff[path][cnt] = 0x04;
+                               } else {
+                                       pw5g->ofdm_diff[path][0] =
+                                                       (hwinfo[addr] & 0x0f);
+                                       if (pw5g->ofdm_diff[path][cnt] & BIT(3))
+                                               pw5g->ofdm_diff[path][cnt] |=
+                                                                         0xF0;
+                               }
+                               addr++;
+                       } else {
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw5g->bw40_diff[path][cnt] = 0xFE;
+                               } else {
+                                       pw5g->bw40_diff[path][cnt] =
+                                               (hwinfo[addr] & 0xf0) >> 4;
+                                       if (pw5g->bw40_diff[path][cnt] & BIT(3))
+                                               pw5g->bw40_diff[path][cnt] |= 0xF0;
+                               }
+                               if (hwinfo[addr] == 0xFF) {
+                                       pw5g->bw20_diff[path][cnt] = 0xFE;
+                               } else {
+                                       pw5g->bw20_diff[path][cnt] =
+                                                       (hwinfo[addr] & 0x0f);
+                                       if (pw5g->bw20_diff[path][cnt] & BIT(3))
+                                               pw5g->bw20_diff[path][cnt] |= 0xF0;
+                               }
+                               addr++;
+                       }
+               }
+               if (hwinfo[addr] == 0xFF) {
+                       pw5g->ofdm_diff[path][1] = 0xFE;
+                       pw5g->ofdm_diff[path][2] = 0xFE;
+               } else {
+                       pw5g->ofdm_diff[path][1] = (hwinfo[addr] & 0xf0) >> 4;
+                       pw5g->ofdm_diff[path][2] = (hwinfo[addr] & 0x0f);
+               }
+               addr++;
+
+               if (hwinfo[addr] == 0xFF)
+                       pw5g->ofdm_diff[path][3] = 0xFE;
+               else
+                       pw5g->ofdm_diff[path][3] = (hwinfo[addr] & 0x0f);
+               addr++;
+
+               for (cnt = 1; cnt < MAX_TX_COUNT; cnt++) {
+                       if (pw5g->ofdm_diff[path][cnt] == 0xFF)
+                               pw5g->ofdm_diff[path][cnt] = 0xFE;
+                       else if (pw5g->ofdm_diff[path][cnt] & BIT(3))
+                               pw5g->ofdm_diff[path][cnt] |= 0xF0;
+               }
+       }
+}
+
+static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
+                                                  bool autoload_fail,
+                                                  u8 *hwinfo)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct txpower_info_2g pw2g;
+       struct txpower_info_5g pw5g;
+       u8 rf_path, index;
+       u8 i;
+
+       _rtl8723be_read_power_value_fromprom(hw, &pw2g, &pw5g, autoload_fail,
+                                            hwinfo);
+
+       for (rf_path = 0; rf_path < 2; rf_path++) {
+               for (i = 0; i < 14; i++) {
+                       index = _rtl8723be_get_chnl_group(i+1);
+
+                       rtlefuse->txpwrlevel_cck[rf_path][i] =
+                                       pw2g.index_cck_base[rf_path][index];
+                       rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
+                                       pw2g.index_bw40_base[rf_path][index];
+               }
+               for (i = 0; i < MAX_TX_COUNT; i++) {
+                       rtlefuse->txpwr_ht20diff[rf_path][i] =
+                                               pw2g.bw20_diff[rf_path][i];
+                       rtlefuse->txpwr_ht40diff[rf_path][i] =
+                                               pw2g.bw40_diff[rf_path][i];
+                       rtlefuse->txpwr_legacyhtdiff[rf_path][i] =
+                                               pw2g.ofdm_diff[rf_path][i];
+               }
+               for (i = 0; i < 14; i++) {
+                       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+                               "RF(%d)-Ch(%d) [CCK / HT40_1S ] = "
+                               "[0x%x / 0x%x ]\n", rf_path, i,
+                               rtlefuse->txpwrlevel_cck[rf_path][i],
+                               rtlefuse->txpwrlevel_ht40_1s[rf_path][i]);
+               }
+       }
+       if (!autoload_fail)
+               rtlefuse->eeprom_thermalmeter =
+                                       hwinfo[EEPROM_THERMAL_METER_88E];
+       else
+               rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+
+       if (rtlefuse->eeprom_thermalmeter == 0xff || autoload_fail) {
+               rtlefuse->apk_thermalmeterignore = true;
+               rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+       }
+       rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+               "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+
+       if (!autoload_fail) {
+               rtlefuse->eeprom_regulatory =
+                       hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0x07;/*bit0~2*/
+               if (hwinfo[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
+                       rtlefuse->eeprom_regulatory = 0;
+       } else {
+               rtlefuse->eeprom_regulatory = 0;
+       }
+       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+               "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+}
+
+static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
+                                        bool pseudo_test)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u16 i, usvalue;
+       u8 hwinfo[HWSET_MAX_SIZE];
+       u16 eeprom_id;
+       bool is_toshiba_smid1 = false;
+       bool is_toshiba_smid2 = false;
+       bool is_samsung_smid = false;
+       bool is_lenovo_smid = false;
+       u16 toshiba_smid1[] = {
+               0x6151, 0x6152, 0x6154, 0x6155, 0x6177, 0x6178, 0x6179, 0x6180,
+               0x7151, 0x7152, 0x7154, 0x7155, 0x7177, 0x7178, 0x7179, 0x7180,
+               0x8151, 0x8152, 0x8154, 0x8155, 0x8181, 0x8182, 0x8184, 0x8185,
+               0x9151, 0x9152, 0x9154, 0x9155, 0x9181, 0x9182, 0x9184, 0x9185
+       };
+       u16 toshiba_smid2[] = {
+               0x6181, 0x6184, 0x6185, 0x7181, 0x7182, 0x7184, 0x7185, 0x8181,
+               0x8182, 0x8184, 0x8185, 0x9181, 0x9182, 0x9184, 0x9185
+       };
+       u16 samsung_smid[] = {
+               0x6191, 0x6192, 0x6193, 0x7191, 0x7192, 0x7193, 0x8191, 0x8192,
+               0x8193, 0x9191, 0x9192, 0x9193
+       };
+       u16 lenovo_smid[] = {
+               0x8195, 0x9195, 0x7194, 0x8200, 0x8201, 0x8202, 0x9199, 0x9200
+       };
+
+       if (pseudo_test) {
+               /* needs to be added */
+               return;
+       }
+       if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
+               rtl_efuse_shadow_map_update(hw);
+
+               memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
+                      HWSET_MAX_SIZE);
+       } else if (rtlefuse->epromtype == EEPROM_93C46) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "RTL819X Not boot from eeprom, check it !!");
+       }
+       RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
+                     hwinfo, HWSET_MAX_SIZE);
+
+       eeprom_id = *((u16 *)&hwinfo[0]);
+       if (eeprom_id != RTL8723BE_EEPROM_ID) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+               rtlefuse->autoload_failflag = true;
+       } else {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+               rtlefuse->autoload_failflag = false;
+       }
+       if (rtlefuse->autoload_failflag)
+               return;
+
+       rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
+       rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+       rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID];
+       rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "EEPROMId = 0x%4x\n", eeprom_id);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+
+       for (i = 0; i < 6; i += 2) {
+               usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
+               *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n",
+                rtlefuse->dev_addr);
+
+       /*parse xtal*/
+       rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8723BE];
+       if (rtlefuse->crystalcap == 0xFF)
+               rtlefuse->crystalcap = 0x20;
+
+       _rtl8723be_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
+                                              hwinfo);
+
+       rtl8723be_read_bt_coexist_info_from_hwpg(hw,
+                                                rtlefuse->autoload_failflag,
+                                                hwinfo);
+
+       rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
+       rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+       rtlefuse->txpwr_fromeprom = true;
+       rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+
+       /* set channel plan to world wide 13 */
+       rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+
+       if (rtlhal->oem_id == RT_CID_DEFAULT) {
+               /* Does this one have a Toshiba SMID from group 1? */
+               for (i = 0; i < sizeof(toshiba_smid1) / sizeof(u16); i++) {
+                       if (rtlefuse->eeprom_smid == toshiba_smid1[i]) {
+                               is_toshiba_smid1 = true;
+                               break;
+                       }
+               }
+               /* Does this one have a Toshiba SMID from group 2? */
+               for (i = 0; i < sizeof(toshiba_smid2) / sizeof(u16); i++) {
+                       if (rtlefuse->eeprom_smid == toshiba_smid2[i]) {
+                               is_toshiba_smid2 = true;
+                               break;
+                       }
+               }
+               /* Does this one have a Samsung SMID? */
+               for (i = 0; i < sizeof(samsung_smid) / sizeof(u16); i++) {
+                       if (rtlefuse->eeprom_smid == samsung_smid[i]) {
+                               is_samsung_smid = true;
+                               break;
+                       }
+               }
+               /* Does this one have a Lenovo SMID? */
+               for (i = 0; i < sizeof(lenovo_smid) / sizeof(u16); i++) {
+                       if (rtlefuse->eeprom_smid == lenovo_smid[i]) {
+                               is_lenovo_smid = true;
+                               break;
+                       }
+               }
+               switch (rtlefuse->eeprom_oemid) {
+               case EEPROM_CID_DEFAULT:
+                       if (rtlefuse->eeprom_did == 0x8176) {
+                               if (rtlefuse->eeprom_svid == 0x10EC &&
+                                   is_toshiba_smid1) {
+                                       rtlhal->oem_id = RT_CID_TOSHIBA;
+                               } else if (rtlefuse->eeprom_svid == 0x1025) {
+                                       rtlhal->oem_id = RT_CID_819X_ACER;
+                               } else if (rtlefuse->eeprom_svid == 0x10EC &&
+                                          is_samsung_smid) {
+                                       rtlhal->oem_id = RT_CID_819X_SAMSUNG;
+                               } else if (rtlefuse->eeprom_svid == 0x10EC &&
+                                          is_lenovo_smid) {
+                                       rtlhal->oem_id = RT_CID_819X_LENOVO;
+                               } else if ((rtlefuse->eeprom_svid == 0x10EC &&
+                                           rtlefuse->eeprom_smid == 0x8197) ||
+                                          (rtlefuse->eeprom_svid == 0x10EC &&
+                                           rtlefuse->eeprom_smid == 0x9196)) {
+                                       rtlhal->oem_id = RT_CID_819X_CLEVO;
+                               } else if ((rtlefuse->eeprom_svid == 0x1028 &&
+                                           rtlefuse->eeprom_smid == 0x8194) ||
+                                          (rtlefuse->eeprom_svid == 0x1028 &&
+                                           rtlefuse->eeprom_smid == 0x8198) ||
+                                          (rtlefuse->eeprom_svid == 0x1028 &&
+                                           rtlefuse->eeprom_smid == 0x9197) ||
+                                          (rtlefuse->eeprom_svid == 0x1028 &&
+                                           rtlefuse->eeprom_smid == 0x9198)) {
+                                       rtlhal->oem_id = RT_CID_819X_DELL;
+                               } else if ((rtlefuse->eeprom_svid == 0x103C &&
+                                           rtlefuse->eeprom_smid == 0x1629)) {
+                                       rtlhal->oem_id = RT_CID_819X_HP;
+                               } else if ((rtlefuse->eeprom_svid == 0x1A32 &&
+                                          rtlefuse->eeprom_smid == 0x2315)) {
+                                       rtlhal->oem_id = RT_CID_819X_QMI;
+                               } else if ((rtlefuse->eeprom_svid == 0x10EC &&
+                                          rtlefuse->eeprom_smid == 0x8203)) {
+                                       rtlhal->oem_id = RT_CID_819X_PRONETS;
+                               } else if ((rtlefuse->eeprom_svid == 0x1043 &&
+                                          rtlefuse->eeprom_smid == 0x84B5)) {
+                                       rtlhal->oem_id = RT_CID_819X_EDIMAX_ASUS;
+                               } else {
+                                       rtlhal->oem_id = RT_CID_DEFAULT;
+                               }
+                       } else if (rtlefuse->eeprom_did == 0x8178) {
+                               if (rtlefuse->eeprom_svid == 0x10EC &&
+                                   is_toshiba_smid2)
+                                       rtlhal->oem_id = RT_CID_TOSHIBA;
+                               else if (rtlefuse->eeprom_svid == 0x1025)
+                                       rtlhal->oem_id = RT_CID_819X_ACER;
+                               else if ((rtlefuse->eeprom_svid == 0x10EC &&
+                                         rtlefuse->eeprom_smid == 0x8186))
+                                       rtlhal->oem_id = RT_CID_819X_PRONETS;
+                               else if ((rtlefuse->eeprom_svid == 0x1043 &&
+                                         rtlefuse->eeprom_smid == 0x84B6))
+                                       rtlhal->oem_id =
+                                                       RT_CID_819X_EDIMAX_ASUS;
+                               else
+                                       rtlhal->oem_id = RT_CID_DEFAULT;
+                       } else {
+                                       rtlhal->oem_id = RT_CID_DEFAULT;
+                       }
+                       break;
+               case EEPROM_CID_TOSHIBA:
+                       rtlhal->oem_id = RT_CID_TOSHIBA;
+                       break;
+               case EEPROM_CID_CCX:
+                       rtlhal->oem_id = RT_CID_CCX;
+                       break;
+               case EEPROM_CID_QMI:
+                       rtlhal->oem_id = RT_CID_819X_QMI;
+                       break;
+               case EEPROM_CID_WHQL:
+                       break;
+               default:
+                       rtlhal->oem_id = RT_CID_DEFAULT;
+                       break;
+               }
+       }
+}
+
+static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       pcipriv->ledctl.led_opendrain = true;
+       switch (rtlhal->oem_id) {
+       case RT_CID_819X_HP:
+               pcipriv->ledctl.led_opendrain = true;
+               break;
+       case RT_CID_819X_LENOVO:
+       case RT_CID_DEFAULT:
+       case RT_CID_TOSHIBA:
+       case RT_CID_CCX:
+       case RT_CID_819X_ACER:
+       case RT_CID_WHQL:
+       default:
+               break;
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+                "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+}
+
+void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 tmp_u1b;
+
+       rtlhal->version = _rtl8723be_read_chip_version(hw);
+       if (get_rf_type(rtlphy) == RF_1T1R)
+               rtlpriv->dm.rfpath_rxenable[0] = true;
+       else
+               rtlpriv->dm.rfpath_rxenable[0] =
+                   rtlpriv->dm.rfpath_rxenable[1] = true;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+                rtlhal->version);
+       tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+       if (tmp_u1b & BIT(4)) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+               rtlefuse->epromtype = EEPROM_93C46;
+       } else {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+               rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+       }
+       if (tmp_u1b & BIT(5)) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+               rtlefuse->autoload_failflag = false;
+               _rtl8723be_read_adapter_info(hw, false);
+       } else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+       }
+       _rtl8723be_hal_customized_behavior(hw);
+}
+
+static void rtl8723be_update_hal_rate_table(struct ieee80211_hw *hw,
+                                           struct ieee80211_sta *sta)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u32 ratr_value;
+       u8 ratr_index = 0;
+       u8 nmode = mac->ht_enable;
+       u8 mimo_ps = IEEE80211_SMPS_OFF;
+       u16 shortgi_rate;
+       u32 tmp_ratr_value;
+       u8 curtxbw_40mhz = mac->bw_40;
+       u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+                              1 : 0;
+       u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+                              1 : 0;
+       enum wireless_mode wirelessmode = mac->mode;
+
+       if (rtlhal->current_bandtype == BAND_ON_5G)
+               ratr_value = sta->supp_rates[1] << 4;
+       else
+               ratr_value = sta->supp_rates[0];
+       if (mac->opmode == NL80211_IFTYPE_ADHOC)
+               ratr_value = 0xfff;
+       ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+                      sta->ht_cap.mcs.rx_mask[0] << 12);
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               if (ratr_value & 0x0000000c)
+                       ratr_value &= 0x0000000d;
+               else
+                       ratr_value &= 0x0000000f;
+               break;
+       case WIRELESS_MODE_G:
+               ratr_value &= 0x00000FF5;
+               break;
+       case WIRELESS_MODE_N_24G:
+       case WIRELESS_MODE_N_5G:
+               nmode = 1;
+               if (mimo_ps == IEEE80211_SMPS_STATIC) {
+                       ratr_value &= 0x0007F005;
+               } else {
+                       u32 ratr_mask;
+
+                       if (get_rf_type(rtlphy) == RF_1T2R ||
+                           get_rf_type(rtlphy) == RF_1T1R)
+                               ratr_mask = 0x000ff005;
+                       else
+                               ratr_mask = 0x0f0ff005;
+                       ratr_value &= ratr_mask;
+               }
+               break;
+       default:
+               if (rtlphy->rf_type == RF_1T2R)
+                       ratr_value &= 0x000ff0ff;
+               else
+                       ratr_value &= 0x0f0ff0ff;
+               break;
+       }
+       if ((rtlpriv->btcoexist.bt_coexistence) &&
+           (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+           (rtlpriv->btcoexist.bt_cur_state) &&
+           (rtlpriv->btcoexist.bt_ant_isolation) &&
+           ((rtlpriv->btcoexist.bt_service == BT_SCO) ||
+            (rtlpriv->btcoexist.bt_service == BT_BUSY)))
+               ratr_value &= 0x0fffcfc0;
+       else
+               ratr_value &= 0x0FFFFFFF;
+
+       if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+                     (!curtxbw_40mhz && curshortgi_20mhz))) {
+               ratr_value |= 0x10000000;
+               tmp_ratr_value = (ratr_value >> 12);
+
+               for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+                       if ((1 << shortgi_rate) & tmp_ratr_value)
+                               break;
+               }
+               shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+                              (shortgi_rate << 4) | (shortgi_rate);
+       }
+       rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+                "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+}
+
+static u8 _rtl8723be_mrate_idx_to_arfr_id(struct ieee80211_hw *hw,
+                                         u8 rate_index)
+{
+       u8 ret = 0;
+
+       switch (rate_index) {
+       case RATR_INX_WIRELESS_NGB:
+               ret = 1;
+               break;
+       case RATR_INX_WIRELESS_N:
+       case RATR_INX_WIRELESS_NG:
+               ret = 5;
+               break;
+       case RATR_INX_WIRELESS_NB:
+               ret = 3;
+               break;
+       case RATR_INX_WIRELESS_GB:
+               ret = 6;
+               break;
+       case RATR_INX_WIRELESS_G:
+               ret = 7;
+               break;
+       case RATR_INX_WIRELESS_B:
+               ret = 8;
+               break;
+       default:
+               ret = 0;
+               break;
+       }
+       return ret;
+}
+
+static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
+                                          struct ieee80211_sta *sta,
+                                          u8 rssi_level)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_sta_info *sta_entry = NULL;
+       u32 ratr_bitmap;
+       u8 ratr_index;
+       u8 curtxbw_40mhz = (sta->ht_cap.cap &
+                           IEEE80211_HT_CAP_SUP_WIDTH_20_40) ? 1 : 0;
+       u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+                              1 : 0;
+       u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+                              1 : 0;
+       enum wireless_mode wirelessmode = 0;
+       bool shortgi = false;
+       u8 rate_mask[7];
+       u8 macid = 0;
+       u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+       sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+       wirelessmode = sta_entry->wireless_mode;
+       if (mac->opmode == NL80211_IFTYPE_STATION ||
+           mac->opmode == NL80211_IFTYPE_MESH_POINT)
+               curtxbw_40mhz = mac->bw_40;
+       else if (mac->opmode == NL80211_IFTYPE_AP ||
+                mac->opmode == NL80211_IFTYPE_ADHOC)
+               macid = sta->aid + 1;
+
+       ratr_bitmap = sta->supp_rates[0];
+
+       if (mac->opmode == NL80211_IFTYPE_ADHOC)
+               ratr_bitmap = 0xfff;
+
+       ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+                       sta->ht_cap.mcs.rx_mask[0] << 12);
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               ratr_index = RATR_INX_WIRELESS_B;
+               if (ratr_bitmap & 0x0000000c)
+                       ratr_bitmap &= 0x0000000d;
+               else
+                       ratr_bitmap &= 0x0000000f;
+               break;
+       case WIRELESS_MODE_G:
+               ratr_index = RATR_INX_WIRELESS_GB;
+
+               if (rssi_level == 1)
+                       ratr_bitmap &= 0x00000f00;
+               else if (rssi_level == 2)
+                       ratr_bitmap &= 0x00000ff0;
+               else
+                       ratr_bitmap &= 0x00000ff5;
+               break;
+       case WIRELESS_MODE_A:
+               ratr_index = RATR_INX_WIRELESS_A;
+               ratr_bitmap &= 0x00000ff0;
+               break;
+       case WIRELESS_MODE_N_24G:
+       case WIRELESS_MODE_N_5G:
+               ratr_index = RATR_INX_WIRELESS_NGB;
+
+               if (mimo_ps == IEEE80211_SMPS_STATIC  ||
+                   mimo_ps == IEEE80211_SMPS_DYNAMIC) {
+                       if (rssi_level == 1)
+                               ratr_bitmap &= 0x00070000;
+                       else if (rssi_level == 2)
+                               ratr_bitmap &= 0x0007f000;
+                       else
+                               ratr_bitmap &= 0x0007f005;
+               } else {
+                       if (rtlphy->rf_type == RF_1T1R) {
+                               if (curtxbw_40mhz) {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x000f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x000ff000;
+                                       else
+                                               ratr_bitmap &= 0x000ff015;
+                               } else {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x000f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x000ff000;
+                                       else
+                                               ratr_bitmap &= 0x000ff005;
+                               }
+                       } else {
+                               if (curtxbw_40mhz) {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x0f8f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x0f8ff000;
+                                       else
+                                               ratr_bitmap &= 0x0f8ff015;
+                               } else {
+                                       if (rssi_level == 1)
+                                               ratr_bitmap &= 0x0f8f0000;
+                                       else if (rssi_level == 2)
+                                               ratr_bitmap &= 0x0f8ff000;
+                                       else
+                                               ratr_bitmap &= 0x0f8ff005;
+                               }
+                       }
+               }
+               if ((curtxbw_40mhz && curshortgi_40mhz) ||
+                   (!curtxbw_40mhz && curshortgi_20mhz)) {
+                       if (macid == 0)
+                               shortgi = true;
+                       else if (macid == 1)
+                               shortgi = false;
+               }
+               break;
+       default:
+               ratr_index = RATR_INX_WIRELESS_NGB;
+
+               if (rtlphy->rf_type == RF_1T2R)
+                       ratr_bitmap &= 0x000ff0ff;
+               else
+                       ratr_bitmap &= 0x0f0ff0ff;
+               break;
+       }
+       sta_entry->ratr_index = ratr_index;
+
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+                "ratr_bitmap :%x\n", ratr_bitmap);
+       *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
+       rate_mask[0] = macid;
+       rate_mask[1] = _rtl8723be_mrate_idx_to_arfr_id(hw, ratr_index) |
+                                                      (shortgi ? 0x80 : 0x00);
+       rate_mask[2] = curtxbw_40mhz;
+       /* if (prox_priv->proxim_modeinfo->power_output > 0)
+        *      rate_mask[2] |= BIT(6);
+        */
+
+       rate_mask[3] = (u8)(ratr_bitmap & 0x000000ff);
+       rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
+       rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
+       rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
+
+       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+                "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+                ratr_index, ratr_bitmap,
+                rate_mask[0], rate_mask[1],
+                rate_mask[2], rate_mask[3],
+                rate_mask[4], rate_mask[5],
+                rate_mask[6]);
+       rtl8723be_fill_h2c_cmd(hw, H2C_8723BE_RA_MASK, 7, rate_mask);
+       _rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
+}
+
+void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
+                                  struct ieee80211_sta *sta,
+                                  u8 rssi_level)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       if (rtlpriv->dm.useramask)
+               rtl8723be_update_hal_rate_mask(hw, sta, rssi_level);
+       else
+               rtl8723be_update_hal_rate_table(hw, sta);
+}
+
+void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u16 sifs_timer;
+
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, &mac->slot_time);
+       if (!mac->ht_enable)
+               sifs_timer = 0x0a0a;
+       else
+               sifs_timer = 0x0e0e;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+
+bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
+       u8 u1tmp;
+       bool actuallyset = false;
+
+       if (rtlpriv->rtlhal.being_init_adapter)
+               return false;
+
+       if (ppsc->swrf_processing)
+               return false;
+
+       spin_lock(&rtlpriv->locks.rf_ps_lock);
+       if (ppsc->rfchange_inprogress) {
+               spin_unlock(&rtlpriv->locks.rf_ps_lock);
+               return false;
+       } else {
+               ppsc->rfchange_inprogress = true;
+               spin_unlock(&rtlpriv->locks.rf_ps_lock);
+       }
+       cur_rfstate = ppsc->rfpwr_state;
+
+       rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
+                      rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2) & ~(BIT(1)));
+
+       u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
+
+       if (rtlphy->polarity_ctl)
+               e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
+       else
+               e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
+
+       if (ppsc->hwradiooff &&
+           (e_rfpowerstate_toset == ERFON)) {
+               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                        "GPIOChangeRF  - HW Radio ON, RF ON\n");
+
+               e_rfpowerstate_toset = ERFON;
+               ppsc->hwradiooff = false;
+               actuallyset = true;
+       } else if (!ppsc->hwradiooff &&
+                  (e_rfpowerstate_toset == ERFOFF)) {
+               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                        "GPIOChangeRF  - HW Radio OFF, RF OFF\n");
+
+               e_rfpowerstate_toset = ERFOFF;
+               ppsc->hwradiooff = true;
+               actuallyset = true;
+       }
+       if (actuallyset) {
+               spin_lock(&rtlpriv->locks.rf_ps_lock);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock(&rtlpriv->locks.rf_ps_lock);
+       } else {
+               if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+
+               spin_lock(&rtlpriv->locks.rf_ps_lock);
+               ppsc->rfchange_inprogress = false;
+               spin_unlock(&rtlpriv->locks.rf_ps_lock);
+       }
+       *valid = 1;
+       return !ppsc->hwradiooff;
+}
+
+void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
+                      u8 *p_macaddr, bool is_group, u8 enc_algo,
+                      bool is_wepkey, bool clear_all)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 *macaddr = p_macaddr;
+       u32 entry_id = 0;
+       bool is_pairwise = false;
+
+       static u8 cam_const_addr[4][6] = {
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+               {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+       };
+       static u8 cam_const_broad[] = {
+               0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+       };
+
+       if (clear_all) {
+               u8 idx = 0;
+               u8 cam_offset = 0;
+               u8 clear_number = 5;
+
+               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+
+               for (idx = 0; idx < clear_number; idx++) {
+                       rtl_cam_mark_invalid(hw, cam_offset + idx);
+                       rtl_cam_empty_entry(hw, cam_offset + idx);
+
+                       if (idx < 5) {
+                               memset(rtlpriv->sec.key_buf[idx], 0,
+                                      MAX_KEY_LEN);
+                               rtlpriv->sec.key_len[idx] = 0;
+                       }
+               }
+       } else {
+               switch (enc_algo) {
+               case WEP40_ENCRYPTION:
+                       enc_algo = CAM_WEP40;
+                       break;
+               case WEP104_ENCRYPTION:
+                       enc_algo = CAM_WEP104;
+                       break;
+               case TKIP_ENCRYPTION:
+                       enc_algo = CAM_TKIP;
+                       break;
+               case AESCCMP_ENCRYPTION:
+                       enc_algo = CAM_AES;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "switch case not process\n");
+                       enc_algo = CAM_TKIP;
+                       break;
+               }
+
+               if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+                       macaddr = cam_const_addr[key_index];
+                       entry_id = key_index;
+               } else {
+                       if (is_group) {
+                               macaddr = cam_const_broad;
+                               entry_id = key_index;
+                       } else {
+                               if (mac->opmode == NL80211_IFTYPE_AP) {
+                                       entry_id = rtl_cam_get_free_entry(hw,
+                                                               p_macaddr);
+                                       if (entry_id >=  TOTAL_CAM_ENTRY) {
+                                               RT_TRACE(rtlpriv, COMP_SEC,
+                                                        DBG_EMERG,
+                                                        "Can not find free"
+                                                        " hw security cam "
+                                                        "entry\n");
+                                               return;
+                                       }
+                               } else {
+                                       entry_id = CAM_PAIRWISE_KEY_POSITION;
+                               }
+                               key_index = PAIRWISE_KEYIDX;
+                               is_pairwise = true;
+                       }
+               }
+               if (rtlpriv->sec.key_len[key_index] == 0) {
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                "delete one entry, entry_id is %d\n",
+                                entry_id);
+                       if (mac->opmode == NL80211_IFTYPE_AP)
+                               rtl_cam_del_entry(hw, p_macaddr);
+                       rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+               } else {
+                       RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                "add one entry\n");
+                       if (is_pairwise) {
+                               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                        "set Pairwise key\n");
+
+                               rtl_cam_add_one_entry(hw, macaddr, key_index,
+                                                     entry_id, enc_algo,
+                                                     CAM_CONFIG_NO_USEDK,
+                                                     rtlpriv->sec.key_buf[key_index]);
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+                                        "set group key\n");
+
+                               if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+                                       rtl_cam_add_one_entry(hw,
+                                               rtlefuse->dev_addr,
+                                               PAIRWISE_KEYIDX,
+                                               CAM_PAIRWISE_KEY_POSITION,
+                                               enc_algo,
+                                               CAM_CONFIG_NO_USEDK,
+                                               rtlpriv->sec.key_buf
+                                               [entry_id]);
+                               }
+                               rtl_cam_add_one_entry(hw, macaddr, key_index,
+                                                     entry_id, enc_algo,
+                                                     CAM_CONFIG_NO_USEDK,
+                                                     rtlpriv->sec.key_buf[entry_id]);
+                       }
+               }
+       }
+}
+
+void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+                                             bool auto_load_fail, u8 *hwinfo)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value;
+       u32 tmpu_32;
+
+       if (!auto_load_fail) {
+               tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
+               if (tmpu_32 & BIT(18))
+                       rtlpriv->btcoexist.btc_info.btcoexist = 1;
+               else
+                       rtlpriv->btcoexist.btc_info.btcoexist = 0;
+               value = hwinfo[RF_OPTION4];
+               rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+               rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+       } else {
+               rtlpriv->btcoexist.btc_info.btcoexist = 0;
+               rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
+               rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+       }
+}
+
+void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       /* 0:Low, 1:High, 2:From Efuse. */
+       rtlpriv->btcoexist.reg_bt_iso = 2;
+       /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
+       rtlpriv->btcoexist.reg_bt_sco = 3;
+       /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
+       rtlpriv->btcoexist.reg_bt_sco = 0;
+}
+
+void rtl8723be_bt_hw_init(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->cfg->ops->get_btc_status())
+               rtlpriv->btcoexist.btc_ops->btc_init_hw_config(rtlpriv);
+}
+
+void rtl8723be_suspend(struct ieee80211_hw *hw)
+{
+}
+
+void rtl8723be_resume(struct ieee80211_hw *hw)
+{
+}
+
+/* Turn on AAP (RCR:bit 0) for promicuous mode. */
+void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
+                                 bool write_into_reg)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       if (allow_all_da) /* Set BIT0 */
+               rtlpci->receive_config |= RCR_AAP;
+       else /* Clear BIT0 */
+               rtlpci->receive_config &= ~RCR_AAP;
+
+       if (write_into_reg)
+               rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
+
+       RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+                "receive_config = 0x%08X, write_into_reg =%d\n",
+                rtlpci->receive_config, write_into_reg);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/rtlwifi/rtl8723be/hw.h
new file mode 100644 (file)
index 0000000..b7449a9
--- /dev/null
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_HW_H__
+#define __RTL8723BE_HW_H__
+
+void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw);
+
+void rtl8723be_interrupt_recognized(struct ieee80211_hw *hw,
+                                   u32 *p_inta, u32 *p_intb);
+int rtl8723be_hw_init(struct ieee80211_hw *hw);
+void rtl8723be_card_disable(struct ieee80211_hw *hw);
+void rtl8723be_enable_interrupt(struct ieee80211_hw *hw);
+void rtl8723be_disable_interrupt(struct ieee80211_hw *hw);
+int rtl8723be_set_network_type(struct ieee80211_hw *hw,
+                              enum nl80211_iftype type);
+void rtl8723be_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl8723be_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
+                                    u32 add_msr, u32 rm_msr);
+void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl8723be_update_hal_rate_tbl(struct ieee80211_hw *hw,
+                                  struct ieee80211_sta *sta,
+                                  u8 rssi_level);
+void rtl8723be_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
+                      u8 *p_macaddr, bool is_group, u8 enc_algo,
+                      bool is_wepkey, bool clear_all);
+void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+                                             bool autoload_fail, u8 *hwinfo);
+void rtl8723be_bt_reg_init(struct ieee80211_hw *hw);
+void rtl8723be_bt_hw_init(struct ieee80211_hw *hw);
+void rtl8723be_suspend(struct ieee80211_hw *hw);
+void rtl8723be_resume(struct ieee80211_hw *hw);
+void rtl8723be_allow_all_destaddr(struct ieee80211_hw *hw, bool allow_all_da,
+                                 bool write_into_reg);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/rtlwifi/rtl8723be/led.c
new file mode 100644 (file)
index 0000000..cb931a3
--- /dev/null
@@ -0,0 +1,153 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "reg.h"
+#include "led.h"
+
+static void _rtl8723be_init_led(struct ieee80211_hw *hw,  struct rtl_led *pled,
+                               enum rtl_led_pin ledpin)
+{
+       pled->hw = hw;
+       pled->ledpin = ledpin;
+       pled->ledon = false;
+}
+
+void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+       u8 ledcfg;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+                "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
+
+       switch (pled->ledpin) {
+       case LED_PIN_GPIO0:
+               break;
+       case LED_PIN_LED0:
+               ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+               ledcfg &= ~BIT(6);
+               rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5));
+               break;
+       case LED_PIN_LED1:
+               ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+               rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process\n");
+               break;
+       }
+       pled->ledon = true;
+}
+
+void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       u8 ledcfg;
+
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
+                "LedAddr:%X ledpin =%d\n", REG_LEDCFG2, pled->ledpin);
+
+       ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
+
+       switch (pled->ledpin) {
+       case LED_PIN_GPIO0:
+               break;
+       case LED_PIN_LED0:
+               ledcfg &= 0xf0;
+               if (pcipriv->ledctl.led_opendrain) {
+                       ledcfg &= 0x90; /* Set to software control. */
+                       rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
+                       ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
+                       ledcfg &= 0xFE;
+                       rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, ledcfg);
+               } else {
+                       ledcfg &= ~BIT(6);
+                       rtl_write_byte(rtlpriv, REG_LEDCFG2,
+                                      (ledcfg | BIT(3) | BIT(5)));
+               }
+               break;
+       case LED_PIN_LED1:
+               ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG1);
+               ledcfg &= 0x10; /* Set to software control. */
+               rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
+
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not processed\n");
+               break;
+       }
+       pled->ledon = false;
+}
+
+void rtl8723be_init_sw_leds(struct ieee80211_hw *hw)
+{
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
+       _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+}
+
+static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw,
+                                     enum led_ctl_mode ledaction)
+{
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+       switch (ledaction) {
+       case LED_CTL_POWER_ON:
+       case LED_CTL_LINK:
+       case LED_CTL_NO_LINK:
+               rtl8723be_sw_led_on(hw, pled0);
+               break;
+       case LED_CTL_POWER_OFF:
+               rtl8723be_sw_led_off(hw, pled0);
+               break;
+       default:
+               break;
+       }
+}
+
+void rtl8723be_led_control(struct ieee80211_hw *hw,
+                          enum led_ctl_mode ledaction)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+       if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
+           (ledaction == LED_CTL_TX ||
+            ledaction == LED_CTL_RX ||
+            ledaction == LED_CTL_SITE_SURVEY ||
+            ledaction == LED_CTL_LINK ||
+            ledaction == LED_CTL_NO_LINK ||
+            ledaction == LED_CTL_START_TO_LINK ||
+            ledaction == LED_CTL_POWER_ON)) {
+               return;
+       }
+       RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+       _rtl8723be_sw_led_control(hw, ledaction);
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/rtlwifi/rtl8723be/led.h
new file mode 100644 (file)
index 0000000..c57de37
--- /dev/null
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_LED_H__
+#define __RTL8723BE_LED_H__
+
+void rtl8723be_init_sw_leds(struct ieee80211_hw *hw);
+void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
+void rtl8723be_led_control(struct ieee80211_hw *hw,
+                          enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/rtlwifi/rtl8723be/phy.c
new file mode 100644 (file)
index 0000000..1575ef9
--- /dev/null
@@ -0,0 +1,2156 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../ps.h"
+#include "../core.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "../rtl8723com/phy_common.h"
+#include "rf.h"
+#include "dm.h"
+#include "table.h"
+#include "trx.h"
+
+static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw);
+static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                      u8 configtype);
+static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
+                                             u8 channel, u8 *stage,
+                                             u8 *step, u32 *delay);
+static bool _rtl8723be_check_condition(struct ieee80211_hw *hw,
+                                      const u32  condition)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u32 _board = rtlefuse->board_type; /*need efuse define*/
+       u32 _interface = rtlhal->interface;
+       u32 _platform = 0x08;/*SupportPlatform */
+       u32 cond = condition;
+
+       if (condition == 0xCDCDCDCD)
+               return true;
+
+       cond = condition & 0xFF;
+       if ((_board & cond) == 0 && cond != 0x1F)
+               return false;
+
+       cond = condition & 0xFF00;
+       cond = cond >> 8;
+       if ((_interface & cond) == 0 && cond != 0x07)
+               return false;
+
+       cond = condition & 0xFF0000;
+       cond = cond >> 16;
+       if ((_platform & cond) == 0 && cond != 0x0F)
+               return false;
+       return true;
+}
+
+static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+       u32 arraylength;
+       u32 *ptrarray;
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
+       arraylength = RTL8723BEMAC_1T_ARRAYLEN;
+       ptrarray = RTL8723BEMAC_1T_ARRAY;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength);
+       for (i = 0; i < arraylength; i = i + 2)
+               rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
+       return true;
+}
+
+static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
+                                                    u8 configtype)
+{
+       #define READ_NEXT_PAIR(v1, v2, i) \
+               do { \
+                       i += 2; \
+                       v1 = array_table[i];\
+                       v2 = array_table[i+1]; \
+               } while (0)
+
+       int i;
+       u32 *array_table;
+       u16 arraylen;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 v1 = 0, v2 = 0;
+
+       if (configtype == BASEBAND_CONFIG_PHY_REG) {
+               arraylen = RTL8723BEPHY_REG_1TARRAYLEN;
+               array_table = RTL8723BEPHY_REG_1TARRAY;
+
+               for (i = 0; i < arraylen; i = i + 2) {
+                       v1 = array_table[i];
+                       v2 = array_table[i+1];
+                       if (v1 < 0xcdcdcdcd) {
+                               rtl_bb_delay(hw, v1, v2);
+                       } else {/*This line is the start line of branch.*/
+                               if (!_rtl8723be_check_condition(hw, array_table[i])) {
+                                       /*Discard the following (offset, data) pairs*/
+                                       READ_NEXT_PAIR(v1, v2, i);
+                                       while (v2 != 0xDEAD &&
+                                              v2 != 0xCDEF &&
+                                              v2 != 0xCDCD &&
+                                              i < arraylen - 2) {
+                                               READ_NEXT_PAIR(v1, v2, i);
+                                       }
+                                       i -= 2; /* prevent from for-loop += 2*/
+                               /* Configure matched pairs and
+                                * skip to end of if-else.
+                                */
+                               } else {
+                                       READ_NEXT_PAIR(v1, v2, i);
+                                       while (v2 != 0xDEAD &&
+                                              v2 != 0xCDEF &&
+                                              v2 != 0xCDCD &&
+                                              i < arraylen - 2) {
+                                               rtl_bb_delay(hw,
+                                                                   v1, v2);
+                                               READ_NEXT_PAIR(v1, v2, i);
+                                       }
+
+                                       while (v2 != 0xDEAD && i < arraylen - 2)
+                                               READ_NEXT_PAIR(v1, v2, i);
+                               }
+                       }
+               }
+       } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+               arraylen = RTL8723BEAGCTAB_1TARRAYLEN;
+               array_table = RTL8723BEAGCTAB_1TARRAY;
+
+               for (i = 0; i < arraylen; i = i + 2) {
+                       v1 = array_table[i];
+                       v2 = array_table[i+1];
+                       if (v1 < 0xCDCDCDCD) {
+                               rtl_set_bbreg(hw, array_table[i],
+                                             MASKDWORD,
+                                             array_table[i + 1]);
+                               udelay(1);
+                               continue;
+                       } else {/*This line is the start line of branch.*/
+                               if (!_rtl8723be_check_condition(hw, array_table[i])) {
+                                       /* Discard the following
+                                        * (offset, data) pairs
+                                        */
+                                       READ_NEXT_PAIR(v1, v2, i);
+                                       while (v2 != 0xDEAD &&
+                                              v2 != 0xCDEF &&
+                                              v2 != 0xCDCD &&
+                                              i < arraylen - 2) {
+                                               READ_NEXT_PAIR(v1, v2, i);
+                                       }
+                                       i -= 2; /* prevent from for-loop += 2*/
+                               /*Configure matched pairs and
+                                *skip to end of if-else.
+                                */
+                               } else {
+                                       READ_NEXT_PAIR(v1, v2, i);
+                                       while (v2 != 0xDEAD &&
+                                              v2 != 0xCDEF &&
+                                              v2 != 0xCDCD &&
+                                              i < arraylen - 2) {
+                                               rtl_set_bbreg(hw, array_table[i],
+                                                             MASKDWORD,
+                                                             array_table[i + 1]);
+                                               udelay(1);
+                                               READ_NEXT_PAIR(v1, v2, i);
+                                       }
+
+                                       while (v2 != 0xDEAD && i < arraylen - 2)
+                                               READ_NEXT_PAIR(v1, v2, i);
+                               }
+                       }
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                "The agctab_array_table[0] is "
+                                 "%x Rtl818EEPHY_REGArray[1] is %x\n",
+                                 array_table[i], array_table[i + 1]);
+               }
+       }
+       return true;
+}
+
+static u8 _rtl8723be_get_rate_section_index(u32 regaddr)
+{
+       u8 index = 0;
+
+       switch (regaddr) {
+       case RTXAGC_A_RATE18_06:
+       case RTXAGC_B_RATE18_06:
+               index = 0;
+               break;
+       case RTXAGC_A_RATE54_24:
+       case RTXAGC_B_RATE54_24:
+               index = 1;
+               break;
+       case RTXAGC_A_CCK1_MCS32:
+       case RTXAGC_B_CCK1_55_MCS32:
+               index = 2;
+               break;
+       case RTXAGC_B_CCK11_A_CCK2_11:
+               index = 3;
+               break;
+       case RTXAGC_A_MCS03_MCS00:
+       case RTXAGC_B_MCS03_MCS00:
+               index = 4;
+               break;
+       case RTXAGC_A_MCS07_MCS04:
+       case RTXAGC_B_MCS07_MCS04:
+               index = 5;
+               break;
+       case RTXAGC_A_MCS11_MCS08:
+       case RTXAGC_B_MCS11_MCS08:
+               index = 6;
+               break;
+       case RTXAGC_A_MCS15_MCS12:
+       case RTXAGC_B_MCS15_MCS12:
+               index = 7;
+               break;
+       default:
+               regaddr &= 0xFFF;
+               if (regaddr >= 0xC20 && regaddr <= 0xC4C)
+                       index = (u8) ((regaddr - 0xC20) / 4);
+               else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
+                       index = (u8) ((regaddr - 0xE20) / 4);
+               break;
+       };
+       return index;
+}
+
+u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                              u32 regaddr, u32 bitmask)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 original_value, readback_value, bitshift;
+       unsigned long flags;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+                 regaddr, rfpath, bitmask);
+
+       spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+       original_value = rtl8723_phy_rf_serial_read(hw, rfpath, regaddr);
+       bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+       readback_value = (original_value & bitmask) >> bitshift;
+
+       spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "regaddr(%#x), rfpath(%#x), "
+                 "bitmask(%#x), original_value(%#x)\n",
+                 regaddr, rfpath, bitmask, original_value);
+
+       return readback_value;
+}
+
+void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
+                             u32 regaddr, u32 bitmask, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 original_value, bitshift;
+       unsigned long flags;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+                 regaddr, bitmask, data, path);
+
+       spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
+
+       if (bitmask != RFREG_OFFSET_MASK) {
+                       original_value = rtl8723_phy_rf_serial_read(hw, path,
+                                                                   regaddr);
+                       bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+                       data = ((original_value & (~bitmask)) |
+                               (data << bitshift));
+               }
+
+       rtl8723_phy_rf_serial_write(hw, path, regaddr, data);
+
+       spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+                 regaddr, bitmask, data, path);
+}
+
+bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       bool rtstatus = _rtl8723be_phy_config_mac_with_headerfile(hw);
+
+       rtl_write_byte(rtlpriv, 0x04CA, 0x0B);
+       return rtstatus;
+}
+
+bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw)
+{
+       bool rtstatus = true;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u16 regval;
+       u8 reg_hwparafile = 1;
+       u32 tmp;
+       u8 crystalcap = rtlpriv->efuse.crystalcap;
+       rtl8723_phy_init_bb_rf_reg_def(hw);
+       regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+       rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
+                      regval | BIT(13) | BIT(0) | BIT(1));
+
+       rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
+                      FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
+                      FEN_BB_GLB_RSTN | FEN_BBRSTB);
+       tmp = rtl_read_dword(rtlpriv, 0x4c);
+       rtl_write_dword(rtlpriv, 0x4c, tmp | BIT(23));
+
+       rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+
+       if (reg_hwparafile == 1)
+               rtstatus = _rtl8723be_phy_bb8723b_config_parafile(hw);
+
+       crystalcap = crystalcap & 0x3F;
+       rtl_set_bbreg(hw, REG_MAC_PHY_CTRL, 0xFFF000,
+                     (crystalcap | crystalcap << 6));
+
+       return rtstatus;
+}
+
+bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw)
+{
+       return rtl8723be_phy_rf6052_config(hw);
+}
+
+static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr,
+                                    u32 data, enum radio_path rfpath,
+                                    u32 regaddr)
+{
+       if (addr == 0xfe || addr == 0xffe) {
+               mdelay(50);
+       } else {
+               rtl_set_rfreg(hw, rfpath, regaddr, RFREG_OFFSET_MASK, data);
+               udelay(1);
+       }
+}
+
+static void _rtl8723be_config_rf_radio_a(struct ieee80211_hw *hw,
+                                        u32 addr, u32 data)
+{
+       u32 content = 0x1000; /*RF Content: radio_a_txt*/
+       u32 maskforphyset = (u32)(content & 0xE000);
+
+       _rtl8723be_config_rf_reg(hw, addr, data, RF90_PATH_A,
+                                addr | maskforphyset);
+}
+
+static void _rtl8723be_phy_init_tx_power_by_rate(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       u8 band, path, txnum, section;
+
+       for (band = BAND_ON_2_4G; band <= BAND_ON_5G; ++band)
+               for (path = 0; path < TX_PWR_BY_RATE_NUM_RF; ++path)
+                       for (txnum = 0; txnum < TX_PWR_BY_RATE_NUM_RF; ++txnum)
+                               for (section = 0;
+                                    section < TX_PWR_BY_RATE_NUM_SECTION;
+                                    ++section)
+                                       rtlphy->tx_power_by_rate_offset[band]
+                                               [path][txnum][section] = 0;
+}
+
+static void phy_set_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band,
+                                      u8 path, u8 rate_section,
+                                      u8 txnum, u8 value)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (path > RF90_PATH_D) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
+                         path);
+               return;
+       }
+
+       if (band == BAND_ON_2_4G) {
+               switch (rate_section) {
+               case CCK:
+                       rtlphy->txpwr_by_rate_base_24g[path][txnum][0] = value;
+                       break;
+               case OFDM:
+                       rtlphy->txpwr_by_rate_base_24g[path][txnum][1] = value;
+                       break;
+               case HT_MCS0_MCS7:
+                       rtlphy->txpwr_by_rate_base_24g[path][txnum][2] = value;
+                       break;
+               case HT_MCS8_MCS15:
+                       rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "Invalid RateSection %d in Band 2.4G, Rf Path"
+                                 " %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+                                 rate_section, path, txnum);
+                       break;
+               };
+       } else {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
+                         band);
+       }
+}
+
+static u8 phy_get_txpwr_by_rate_base(struct ieee80211_hw *hw, u8 band, u8 path,
+                                    u8 txnum, u8 rate_section)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 value = 0;
+       if (path > RF90_PATH_D) {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
+                         path);
+               return 0;
+       }
+
+       if (band == BAND_ON_2_4G) {
+               switch (rate_section) {
+               case CCK:
+                       value = rtlphy->txpwr_by_rate_base_24g[path][txnum][0];
+                       break;
+               case OFDM:
+                       value = rtlphy->txpwr_by_rate_base_24g[path][txnum][1];
+                       break;
+               case HT_MCS0_MCS7:
+                       value = rtlphy->txpwr_by_rate_base_24g[path][txnum][2];
+                       break;
+               case HT_MCS8_MCS15:
+                       value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "Invalid RateSection %d in Band 2.4G, Rf Path"
+                                 " %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+                                 rate_section, path, txnum);
+                       break;
+               };
+       } else {
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
+                         band);
+       }
+
+       return value;
+}
+
+static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u16 raw_value = 0;
+       u8 base = 0, path = 0;
+
+       for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) {
+               if (path == RF90_PATH_A) {
+                       raw_value = (u16) (rtlphy->tx_power_by_rate_offset
+                               [BAND_ON_2_4G][path][RF_1TX][3] >> 24) & 0xFF;
+                       base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+                       phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, CCK,
+                                                  RF_1TX, base);
+               } else if (path == RF90_PATH_B) {
+                       raw_value = (u16) (rtlphy->tx_power_by_rate_offset
+                               [BAND_ON_2_4G][path][RF_1TX][3] >> 0) & 0xFF;
+                       base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+                       phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
+                                                  CCK, RF_1TX, base);
+               }
+               raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                                         [path][RF_1TX][1] >> 24) & 0xFF;
+               base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+               phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX,
+                                          base);
+
+               raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                                         [path][RF_1TX][5] >> 24) & 0xFF;
+               base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+               phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7,
+                                          RF_1TX, base);
+
+               raw_value = (u16) (rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                                         [path][RF_2TX][7] >> 24) & 0xFF;
+               base = (raw_value >> 4) * 10 + (raw_value & 0xF);
+               phy_set_txpwr_by_rate_base(hw, BAND_ON_2_4G, path,
+                                          HT_MCS8_MCS15, RF_2TX, base);
+       }
+}
+
+static void phy_conv_dbm_to_rel(u32 *data, u8 start, u8 end, u8 base_val)
+{
+       char i = 0;
+       u8 temp_value = 0;
+       u32 temp_data = 0;
+
+       for (i = 3; i >= 0; --i) {
+               if (i >= start && i <= end) {
+                       /* Get the exact value */
+                       temp_value = (u8) (*data >> (i * 8)) & 0xF;
+                       temp_value += ((u8) ((*data >> (i*8 + 4)) & 0xF)) * 10;
+
+                       /* Change the value to a relative value */
+                       temp_value = (temp_value > base_val) ?
+                                    temp_value - base_val :
+                                    base_val - temp_value;
+               } else {
+                       temp_value = (u8) (*data >> (i * 8)) & 0xFF;
+               }
+               temp_data <<= 8;
+               temp_data |= temp_value;
+       }
+       *data = temp_data;
+}
+
+static void conv_dbm_to_rel(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 base = 0, rfpath = RF90_PATH_A;
+
+       base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+                                         RF_1TX, CCK);
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_1TX][2]), 1, 1, base);
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_1TX][3]), 1, 3, base);
+
+       base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+                                         RF_1TX, OFDM);
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_1TX][0]), 0, 3, base);
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_1TX][1]), 0, 3, base);
+
+       base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+                                         RF_1TX, HT_MCS0_MCS7);
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_1TX][4]), 0, 3, base);
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_1TX][5]), 0, 3, base);
+
+       base = phy_get_txpwr_by_rate_base(hw, BAND_ON_2_4G, rfpath,
+                                         RF_2TX, HT_MCS8_MCS15);
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_2TX][6]), 0, 3, base);
+
+       phy_conv_dbm_to_rel(&(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G]
+                           [rfpath][RF_2TX][7]), 0, 3, base);
+
+       RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+                "<=== conv_dbm_to_rel()\n");
+}
+
+static void _rtl8723be_phy_txpower_by_rate_configuration(
+                                                       struct ieee80211_hw *hw)
+{
+       _rtl8723be_phy_store_txpower_by_rate_base(hw);
+       conv_dbm_to_rel(hw);
+}
+
+static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       bool rtstatus;
+
+       rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
+                                               BASEBAND_CONFIG_PHY_REG);
+       if (!rtstatus) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
+               return false;
+       }
+       _rtl8723be_phy_init_tx_power_by_rate(hw);
+       if (!rtlefuse->autoload_failflag) {
+               rtlphy->pwrgroup_cnt = 0;
+               rtstatus = _rtl8723be_phy_config_bb_with_pgheaderfile(hw,
+                                               BASEBAND_CONFIG_PHY_REG);
+       }
+       _rtl8723be_phy_txpower_by_rate_configuration(hw);
+       if (!rtstatus) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
+               return false;
+       }
+       rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
+                                               BASEBAND_CONFIG_AGC_TAB);
+       if (!rtstatus) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               return false;
+       }
+       rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
+                                                      RFPGA0_XA_HSSIPARAMETER2,
+                                                      0x200));
+       return true;
+}
+
+static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw,
+                                             u32 band, u32 rfpath,
+                                             u32 txnum, u32 regaddr,
+                                             u32 bitmask, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 rate_section = _rtl8723be_get_rate_section_index(regaddr);
+
+       if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
+               RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+                        "Invalid Band %d\n", band);
+               return;
+       }
+
+       if (rfpath > TX_PWR_BY_RATE_NUM_RF) {
+               RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+                        "Invalid RfPath %d\n", rfpath);
+               return;
+       }
+       if (txnum > TX_PWR_BY_RATE_NUM_RF) {
+               RT_TRACE(rtlpriv, COMP_POWER, PHY_TXPWR,
+                        "Invalid TxNum %d\n", txnum);
+               return;
+       }
+       rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] =
+                                                                       data;
+}
+
+static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
+                                                      u8 configtype)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int i;
+       u32 *phy_regarray_table_pg;
+       u16 phy_regarray_pg_len;
+       u32 v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;
+
+       phy_regarray_pg_len = RTL8723BEPHY_REG_ARRAY_PGLEN;
+       phy_regarray_table_pg = RTL8723BEPHY_REG_ARRAY_PG;
+
+       if (configtype == BASEBAND_CONFIG_PHY_REG) {
+               for (i = 0; i < phy_regarray_pg_len; i = i + 6) {
+                       v1 = phy_regarray_table_pg[i];
+                       v2 = phy_regarray_table_pg[i+1];
+                       v3 = phy_regarray_table_pg[i+2];
+                       v4 = phy_regarray_table_pg[i+3];
+                       v5 = phy_regarray_table_pg[i+4];
+                       v6 = phy_regarray_table_pg[i+5];
+
+                       if (v1 < 0xcdcdcdcd) {
+                               if (phy_regarray_table_pg[i] == 0xfe ||
+                                   phy_regarray_table_pg[i] == 0xffe)
+                                       mdelay(50);
+                               else
+                                       _rtl8723be_store_tx_power_by_rate(hw,
+                                                       v1, v2, v3, v4, v5, v6);
+                               continue;
+                       } else {
+                               /*don't need the hw_body*/
+                               if (!_rtl8723be_check_condition(hw,
+                                               phy_regarray_table_pg[i])) {
+                                       i += 2; /* skip the pair of expression*/
+                                       v1 = phy_regarray_table_pg[i];
+                                       v2 = phy_regarray_table_pg[i+1];
+                                       v3 = phy_regarray_table_pg[i+2];
+                                       while (v2 != 0xDEAD) {
+                                               i += 3;
+                                               v1 = phy_regarray_table_pg[i];
+                                               v2 = phy_regarray_table_pg[i+1];
+                                               v3 = phy_regarray_table_pg[i+2];
+                                       }
+                               }
+                       }
+               }
+       } else {
+               RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                        "configtype != BaseBand_Config_PHY_REG\n");
+       }
+       return true;
+}
+
+bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                            enum radio_path rfpath)
+{
+       #define READ_NEXT_RF_PAIR(v1, v2, i) \
+               do { \
+                       i += 2; \
+                       v1 = radioa_array_table[i]; \
+                       v2 = radioa_array_table[i+1]; \
+               } while (0)
+
+       int i;
+       bool rtstatus = true;
+       u32 *radioa_array_table;
+       u16 radioa_arraylen;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u32 v1 = 0, v2 = 0;
+
+       radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN;
+       radioa_array_table = RTL8723BE_RADIOA_1TARRAY;
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen);
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+       rtstatus = true;
+       switch (rfpath) {
+       case RF90_PATH_A:
+               for (i = 0; i < radioa_arraylen; i = i + 2) {
+                       v1 = radioa_array_table[i];
+                       v2 = radioa_array_table[i+1];
+                       if (v1 < 0xcdcdcdcd) {
+                               _rtl8723be_config_rf_radio_a(hw, v1, v2);
+                       } else { /*This line is the start line of branch.*/
+                               if (!_rtl8723be_check_condition(hw,
+                                               radioa_array_table[i])) {
+                                       /* Discard the following
+                                        * (offset, data) pairs
+                                        */
+                                       READ_NEXT_RF_PAIR(v1, v2, i);
+                                       while (v2 != 0xDEAD &&
+                                              v2 != 0xCDEF &&
+                                              v2 != 0xCDCD &&
+                                              i < radioa_arraylen - 2)
+                                               READ_NEXT_RF_PAIR(v1, v2, i);
+                                       i -= 2; /* prevent from for-loop += 2*/
+                               } else {
+                                       /* Configure matched pairs
+                                        * and skip to end of if-else.
+                                        */
+                                       READ_NEXT_RF_PAIR(v1, v2, i);
+                                       while (v2 != 0xDEAD &&
+                                              v2 != 0xCDEF &&
+                                              v2 != 0xCDCD &&
+                                              i < radioa_arraylen - 2) {
+                                               _rtl8723be_config_rf_radio_a(hw,
+                                                                       v1, v2);
+                                               READ_NEXT_RF_PAIR(v1, v2, i);
+                                       }
+
+                                       while (v2 != 0xDEAD &&
+                                              i < radioa_arraylen - 2) {
+                                               READ_NEXT_RF_PAIR(v1, v2, i);
+                                       }
+                               }
+                       }
+               }
+
+               if (rtlhal->oem_id == RT_CID_819X_HP)
+                       _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD);
+
+               break;
+       case RF90_PATH_B:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process\n");
+               break;
+       case RF90_PATH_C:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process\n");
+               break;
+       case RF90_PATH_D:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process\n");
+               break;
+       }
+       return true;
+}
+
+void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->default_initialgain[0] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[1] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[2] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+       rtlphy->default_initialgain[3] =
+           (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                "Default initial gain (c50 = 0x%x, "
+                 "c58 = 0x%x, c60 = 0x%x, c68 = 0x%x\n",
+                 rtlphy->default_initialgain[0],
+                 rtlphy->default_initialgain[1],
+                 rtlphy->default_initialgain[2],
+                 rtlphy->default_initialgain[3]);
+
+       rtlphy->framesync = (u8) rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
+                                              MASKBYTE0);
+       rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
+                                             MASKDWORD);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                "Default framesync (0x%x) = 0x%x\n",
+                 ROFDM0_RXDETECTOR3, rtlphy->framesync);
+}
+
+void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 txpwr_level;
+       long txpwr_dbm;
+
+       txpwr_level = rtlphy->cur_cck_txpwridx;
+       txpwr_dbm = rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B,
+                                                txpwr_level);
+       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+       if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) >
+           txpwr_dbm)
+               txpwr_dbm =
+                   rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
+                                                txpwr_level);
+       txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
+       if (rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+                                        txpwr_level) > txpwr_dbm)
+               txpwr_dbm =
+                   rtl8723_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
+                                                txpwr_level);
+       *powerlevel = txpwr_dbm;
+}
+
+static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
+                                                         u8 rate)
+{
+       u8 rate_section = 0;
+
+       switch (rate) {
+       case DESC92C_RATE1M:
+               rate_section = 2;
+               break;
+       case DESC92C_RATE2M:
+       case DESC92C_RATE5_5M:
+               if (path == RF90_PATH_A)
+                       rate_section = 3;
+               else if (path == RF90_PATH_B)
+                       rate_section = 2;
+               break;
+       case DESC92C_RATE11M:
+               rate_section = 3;
+               break;
+       case DESC92C_RATE6M:
+       case DESC92C_RATE9M:
+       case DESC92C_RATE12M:
+       case DESC92C_RATE18M:
+               rate_section = 0;
+               break;
+       case DESC92C_RATE24M:
+       case DESC92C_RATE36M:
+       case DESC92C_RATE48M:
+       case DESC92C_RATE54M:
+               rate_section = 1;
+               break;
+       case DESC92C_RATEMCS0:
+       case DESC92C_RATEMCS1:
+       case DESC92C_RATEMCS2:
+       case DESC92C_RATEMCS3:
+               rate_section = 4;
+               break;
+       case DESC92C_RATEMCS4:
+       case DESC92C_RATEMCS5:
+       case DESC92C_RATEMCS6:
+       case DESC92C_RATEMCS7:
+               rate_section = 5;
+               break;
+       case DESC92C_RATEMCS8:
+       case DESC92C_RATEMCS9:
+       case DESC92C_RATEMCS10:
+       case DESC92C_RATEMCS11:
+               rate_section = 6;
+               break;
+       case DESC92C_RATEMCS12:
+       case DESC92C_RATEMCS13:
+       case DESC92C_RATEMCS14:
+       case DESC92C_RATEMCS15:
+               rate_section = 7;
+               break;
+       default:
+               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               break;
+       }
+       return rate_section;
+}
+
+static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw,
+                                        enum band_type band,
+                                        enum radio_path rfpath, u8 rate)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u8 shift = 0, rate_section, tx_num;
+       char tx_pwr_diff = 0;
+
+       rate_section = _rtl8723be_phy_get_ratesection_intxpower_byrate(rfpath,
+                                                                      rate);
+       tx_num = RF_TX_NUM_NONIMPLEMENT;
+
+       if (tx_num == RF_TX_NUM_NONIMPLEMENT) {
+               if (rate >= DESC92C_RATEMCS8 && rate <= DESC92C_RATEMCS15)
+                       tx_num = RF_2TX;
+               else
+                       tx_num = RF_1TX;
+       }
+
+       switch (rate) {
+       case DESC92C_RATE6M:
+       case DESC92C_RATE24M:
+       case DESC92C_RATEMCS0:
+       case DESC92C_RATEMCS4:
+       case DESC92C_RATEMCS8:
+       case DESC92C_RATEMCS12:
+               shift = 0;
+               break;
+       case DESC92C_RATE1M:
+       case DESC92C_RATE2M:
+       case DESC92C_RATE9M:
+       case DESC92C_RATE36M:
+       case DESC92C_RATEMCS1:
+       case DESC92C_RATEMCS5:
+       case DESC92C_RATEMCS9:
+       case DESC92C_RATEMCS13:
+               shift = 8;
+               break;
+       case DESC92C_RATE5_5M:
+       case DESC92C_RATE12M:
+       case DESC92C_RATE48M:
+       case DESC92C_RATEMCS2:
+       case DESC92C_RATEMCS6:
+       case DESC92C_RATEMCS10:
+       case DESC92C_RATEMCS14:
+               shift = 16;
+               break;
+       case DESC92C_RATE11M:
+       case DESC92C_RATE18M:
+       case DESC92C_RATE54M:
+       case DESC92C_RATEMCS3:
+       case DESC92C_RATEMCS7:
+       case DESC92C_RATEMCS11:
+       case DESC92C_RATEMCS15:
+               shift = 24;
+               break;
+       default:
+               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               break;
+       }
+       tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num]
+                                         [rate_section] >> shift) & 0xff;
+
+       return  tx_pwr_diff;
+}
+
+static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
+                                      u8 rate, u8 bandwidth, u8 channel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 index = (channel - 1);
+       u8 txpower;
+       u8 power_diff_byrate = 0;
+
+       if (channel > 14 || channel < 1) {
+               index = 0;
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "Illegal channel!\n");
+       }
+       if (RTL8723E_RX_HAL_IS_CCK_RATE(rate))
+               txpower = rtlefuse->txpwrlevel_cck[path][index];
+       else if (DESC92C_RATE6M <= rate)
+               txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
+       else
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "invalid rate\n");
+
+       if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M &&
+           !RTL8723E_RX_HAL_IS_CCK_RATE(rate))
+               txpower += rtlefuse->txpwr_legacyhtdiff[0][TX_1S];
+
+       if (bandwidth == HT_CHANNEL_WIDTH_20) {
+               if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
+                       txpower += rtlefuse->txpwr_ht20diff[0][TX_1S];
+               if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
+                       txpower += rtlefuse->txpwr_ht20diff[0][TX_2S];
+       } else if (bandwidth == HT_CHANNEL_WIDTH_20_40) {
+               if (DESC92C_RATEMCS0 <= rate && rate <= DESC92C_RATEMCS15)
+                       txpower += rtlefuse->txpwr_ht40diff[0][TX_1S];
+               if (DESC92C_RATEMCS8 <= rate && rate <= DESC92C_RATEMCS15)
+                       txpower += rtlefuse->txpwr_ht40diff[0][TX_2S];
+       }
+       if (rtlefuse->eeprom_regulatory != 2)
+               power_diff_byrate = _rtl8723be_get_txpower_by_rate(hw,
+                                                                  BAND_ON_2_4G,
+                                                                  path, rate);
+
+       txpower += power_diff_byrate;
+
+       if (txpower > MAX_POWER_INDEX)
+               txpower = MAX_POWER_INDEX;
+
+       return txpower;
+}
+
+static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
+                                            u8 power_index, u8 path, u8 rate)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       if (path == RF90_PATH_A) {
+               switch (rate) {
+               case DESC92C_RATE1M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_CCK1_MCS32,
+                                              MASKBYTE1, power_index);
+                       break;
+               case DESC92C_RATE2M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+                                              MASKBYTE1, power_index);
+                       break;
+               case DESC92C_RATE5_5M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+                                              MASKBYTE2, power_index);
+                       break;
+               case DESC92C_RATE11M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_B_CCK11_A_CCK2_11,
+                                              MASKBYTE3, power_index);
+                       break;
+               case DESC92C_RATE6M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+                                              MASKBYTE0, power_index);
+                       break;
+               case DESC92C_RATE9M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+                                              MASKBYTE1, power_index);
+                       break;
+               case DESC92C_RATE12M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+                                              MASKBYTE2, power_index);
+                       break;
+               case DESC92C_RATE18M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE18_06,
+                                              MASKBYTE3, power_index);
+                       break;
+               case DESC92C_RATE24M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+                                              MASKBYTE0, power_index);
+                       break;
+               case DESC92C_RATE36M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+                                              MASKBYTE1, power_index);
+                       break;
+               case DESC92C_RATE48M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+                                              MASKBYTE2, power_index);
+                       break;
+               case DESC92C_RATE54M:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_RATE54_24,
+                                              MASKBYTE3, power_index);
+                       break;
+               case DESC92C_RATEMCS0:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+                                              MASKBYTE0, power_index);
+                       break;
+               case DESC92C_RATEMCS1:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+                                              MASKBYTE1, power_index);
+                       break;
+               case DESC92C_RATEMCS2:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+                                              MASKBYTE2, power_index);
+                       break;
+               case DESC92C_RATEMCS3:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS03_MCS00,
+                                              MASKBYTE3, power_index);
+                       break;
+               case DESC92C_RATEMCS4:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+                                              MASKBYTE0, power_index);
+                       break;
+               case DESC92C_RATEMCS5:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+                                              MASKBYTE1, power_index);
+                       break;
+               case DESC92C_RATEMCS6:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+                                              MASKBYTE2, power_index);
+                       break;
+               case DESC92C_RATEMCS7:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS07_MCS04,
+                                              MASKBYTE3, power_index);
+                       break;
+               case DESC92C_RATEMCS8:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+                                              MASKBYTE0, power_index);
+                       break;
+               case DESC92C_RATEMCS9:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+                                              MASKBYTE1, power_index);
+                       break;
+               case DESC92C_RATEMCS10:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+                                              MASKBYTE2, power_index);
+                       break;
+               case DESC92C_RATEMCS11:
+                       rtl8723_phy_set_bb_reg(hw, RTXAGC_A_MCS11_MCS08,
+                                              MASKBYTE3, power_index);
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+                                "Invalid Rate!!\n");
+                       break;
+               }
+       } else {
+               RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
+       }
+}
+
+void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 cck_rates[]  = {DESC92C_RATE1M, DESC92C_RATE2M,
+                          DESC92C_RATE5_5M, DESC92C_RATE11M};
+       u8 ofdm_rates[]  = {DESC92C_RATE6M, DESC92C_RATE9M,
+                           DESC92C_RATE12M, DESC92C_RATE18M,
+                           DESC92C_RATE24M, DESC92C_RATE36M,
+                           DESC92C_RATE48M, DESC92C_RATE54M};
+       u8 ht_rates_1t[]  = {DESC92C_RATEMCS0, DESC92C_RATEMCS1,
+                            DESC92C_RATEMCS2, DESC92C_RATEMCS3,
+                            DESC92C_RATEMCS4, DESC92C_RATEMCS5,
+                            DESC92C_RATEMCS6, DESC92C_RATEMCS7};
+       u8 i, size;
+       u8 power_index;
+
+       if (!rtlefuse->txpwr_fromeprom)
+               return;
+
+       size = sizeof(cck_rates) / sizeof(u8);
+       for (i = 0; i < size; i++) {
+               power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+                                       cck_rates[i],
+                                       rtl_priv(hw)->phy.current_chan_bw,
+                                       channel);
+               _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+                                                cck_rates[i]);
+       }
+       size = sizeof(ofdm_rates) / sizeof(u8);
+       for (i = 0; i < size; i++) {
+               power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+                                       ofdm_rates[i],
+                                       rtl_priv(hw)->phy.current_chan_bw,
+                                       channel);
+               _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+                                                ofdm_rates[i]);
+       }
+       size = sizeof(ht_rates_1t) / sizeof(u8);
+       for (i = 0; i < size; i++) {
+               power_index = _rtl8723be_get_txpower_index(hw, RF90_PATH_A,
+                                       ht_rates_1t[i],
+                                       rtl_priv(hw)->phy.current_chan_bw,
+                                       channel);
+               _rtl8723be_phy_set_txpower_index(hw, power_index, RF90_PATH_A,
+                                                ht_rates_1t[i]);
+       }
+}
+
+void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       enum io_type iotype;
+
+       if (!is_hal_stop(rtlhal)) {
+               switch (operation) {
+               case SCAN_OPT_BACKUP:
+                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+                       break;
+               case SCAN_OPT_RESTORE:
+                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "Unknown Scan Backup operation.\n");
+                       break;
+               }
+       }
+}
+
+void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       u8 reg_bw_opmode;
+       u8 reg_prsr_rsc;
+
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+                "Switch to %s bandwidth\n",
+                rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+                "20MHz" : "40MHz");
+
+       if (is_hal_stop(rtlhal)) {
+               rtlphy->set_bwmode_inprogress = false;
+               return;
+       }
+
+       reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+       reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+
+       switch (rtlphy->current_chan_bw) {
+       case HT_CHANNEL_WIDTH_20:
+               reg_bw_opmode |= BW_OPMODE_20MHZ;
+               rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+               rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+               reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
+                              (mac->cur_40_prime_sc << 5);
+               rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               break;
+       }
+
+       switch (rtlphy->current_chan_bw) {
+       case HT_CHANNEL_WIDTH_20:
+               rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+               rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+               rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+               rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
+                             (mac->cur_40_prime_sc >> 1));
+               rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+               rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
+                             (mac->cur_40_prime_sc ==
+                              HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               break;
+       }
+       rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+       rtlphy->set_bwmode_inprogress = false;
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+}
+
+void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       u8 tmp_bw = rtlphy->current_chan_bw;
+
+       if (rtlphy->set_bwmode_inprogress)
+               return;
+       rtlphy->set_bwmode_inprogress = true;
+       if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+               rtl8723be_phy_set_bw_mode_callback(hw);
+       } else {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                        "false driver sleep or unload\n");
+               rtlphy->set_bwmode_inprogress = false;
+               rtlphy->current_chan_bw = tmp_bw;
+       }
+}
+
+void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 delay;
+
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
+                "switch to channel%d\n", rtlphy->current_channel);
+       if (is_hal_stop(rtlhal))
+               return;
+       do {
+               if (!rtlphy->sw_chnl_inprogress)
+                       break;
+               if (!rtl8723be_phy_sw_chn_step_by_step(hw,
+                                                      rtlphy->current_channel,
+                                                      &rtlphy->sw_chnl_stage,
+                                                      &rtlphy->sw_chnl_step,
+                                                      &delay)) {
+                       if (delay > 0)
+                               mdelay(delay);
+                       else
+                               continue;
+               } else {
+                       rtlphy->sw_chnl_inprogress = false;
+               }
+               break;
+       } while (true);
+       RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+}
+
+u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       if (rtlphy->sw_chnl_inprogress)
+               return 0;
+       if (rtlphy->set_bwmode_inprogress)
+               return 0;
+       RT_ASSERT((rtlphy->current_channel <= 14),
+                 "WIRELESS_MODE_G but channel>14");
+       rtlphy->sw_chnl_inprogress = true;
+       rtlphy->sw_chnl_stage = 0;
+       rtlphy->sw_chnl_step = 0;
+       if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
+               rtl8723be_phy_sw_chnl_callback(hw);
+               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+                        "sw_chnl_inprogress false schdule "
+                         "workitem current channel %d\n",
+                         rtlphy->current_channel);
+               rtlphy->sw_chnl_inprogress = false;
+       } else {
+               RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
+                        "sw_chnl_inprogress false driver sleep or"
+                         " unload\n");
+               rtlphy->sw_chnl_inprogress = false;
+       }
+       return 1;
+}
+
+static bool rtl8723be_phy_sw_chn_step_by_step(struct ieee80211_hw *hw,
+                                             u8 channel, u8 *stage,
+                                             u8 *step, u32 *delay)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
+       u32 precommoncmdcnt;
+       struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
+       u32 postcommoncmdcnt;
+       struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
+       u32 rfdependcmdcnt;
+       struct swchnlcmd *currentcmd = NULL;
+       u8 rfpath;
+       u8 num_total_rfpath = rtlphy->num_total_rfpath;
+
+       precommoncmdcnt = 0;
+       rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT,
+                                        CMDID_SET_TXPOWEROWER_LEVEL,
+                                        0, 0, 0);
+       rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
+                                        MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
+       postcommoncmdcnt = 0;
+       rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
+                                        MAX_POSTCMD_CNT, CMDID_END,
+                                        0, 0, 0);
+       rfdependcmdcnt = 0;
+
+       RT_ASSERT((channel >= 1 && channel <= 14),
+                 "illegal channel for Zebra: %d\n", channel);
+
+       rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT,
+                                        CMDID_RF_WRITEREG,
+                                        RF_CHNLBW, channel, 10);
+
+       rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
+                                        MAX_RFDEPENDCMD_CNT,
+                                        CMDID_END, 0, 0, 0);
+
+       do {
+               switch (*stage) {
+               case 0:
+                       currentcmd = &precommoncmd[*step];
+                       break;
+               case 1:
+                       currentcmd = &rfdependcmd[*step];
+                       break;
+               case 2:
+                       currentcmd = &postcommoncmd[*step];
+                       break;
+               }
+
+               if (currentcmd->cmdid == CMDID_END) {
+                       if ((*stage) == 2) {
+                               return true;
+                       } else {
+                               (*stage)++;
+                               (*step) = 0;
+                               continue;
+                       }
+               }
+
+               switch (currentcmd->cmdid) {
+               case CMDID_SET_TXPOWEROWER_LEVEL:
+                       rtl8723be_phy_set_txpower_level(hw, channel);
+                       break;
+               case CMDID_WRITEPORT_ULONG:
+                       rtl_write_dword(rtlpriv, currentcmd->para1,
+                                       currentcmd->para2);
+                       break;
+               case CMDID_WRITEPORT_USHORT:
+                       rtl_write_word(rtlpriv, currentcmd->para1,
+                                      (u16) currentcmd->para2);
+                       break;
+               case CMDID_WRITEPORT_UCHAR:
+                       rtl_write_byte(rtlpriv, currentcmd->para1,
+                                      (u8) currentcmd->para2);
+                       break;
+               case CMDID_RF_WRITEREG:
+                       for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+                               rtlphy->rfreg_chnlval[rfpath] =
+                                   ((rtlphy->rfreg_chnlval[rfpath] &
+                                     0xfffffc00) | currentcmd->para2);
+
+                               rtl_set_rfreg(hw, (enum radio_path)rfpath,
+                                             currentcmd->para1,
+                                             RFREG_OFFSET_MASK,
+                                             rtlphy->rfreg_chnlval[rfpath]);
+                       }
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "switch case not process\n");
+                       break;
+               }
+
+               break;
+       } while (true);
+
+       (*delay) = currentcmd->msdelay;
+       (*step)++;
+       return false;
+}
+
+static u8 _rtl8723be_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
+{
+       u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
+       u8 result = 0x00;
+
+       rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1c);
+       rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x30008c1c);
+       rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x8214032a);
+       rtl_set_bbreg(hw, 0xe3c, MASKDWORD, 0x28160000);
+
+       rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x00462911);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
+       rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
+
+       mdelay(IQK_DELAY_TIME);
+
+       reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
+       reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
+       reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
+       reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
+
+       if (!(reg_eac & BIT(28)) &&
+           (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
+           (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
+               result |= 0x01;
+       return result;
+}
+
+static bool phy_similarity_cmp(struct ieee80211_hw *hw, long result[][8],
+                              u8 c1, u8 c2)
+{
+       u32 i, j, diff, simularity_bitmap, bound;
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+
+       u8 final_candidate[2] = { 0xFF, 0xFF };
+       bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
+
+       if (is2t)
+               bound = 8;
+       else
+               bound = 4;
+
+       simularity_bitmap = 0;
+
+       for (i = 0; i < bound; i++) {
+               diff = (result[c1][i] > result[c2][i]) ?
+                   (result[c1][i] - result[c2][i]) :
+                   (result[c2][i] - result[c1][i]);
+
+               if (diff > MAX_TOLERANCE) {
+                       if ((i == 2 || i == 6) && !simularity_bitmap) {
+                               if (result[c1][i] + result[c1][i + 1] == 0)
+                                       final_candidate[(i / 4)] = c2;
+                               else if (result[c2][i] + result[c2][i + 1] == 0)
+                                       final_candidate[(i / 4)] = c1;
+                               else
+                                       simularity_bitmap |= (1 << i);
+                       } else {
+                               simularity_bitmap |= (1 << i);
+                       }
+               }
+       }
+
+       if (simularity_bitmap == 0) {
+               for (i = 0; i < (bound / 4); i++) {
+                       if (final_candidate[i] != 0xFF) {
+                               for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+                                       result[3][j] =
+                                               result[final_candidate[i]][j];
+                               bresult = false;
+                       }
+               }
+               return bresult;
+       } else if (!(simularity_bitmap & 0x0F)) {
+               for (i = 0; i < 4; i++)
+                       result[3][i] = result[c1][i];
+               return false;
+       } else if (!(simularity_bitmap & 0xF0) && is2t) {
+               for (i = 4; i < 8; i++)
+                       result[3][i] = result[c1][i];
+               return false;
+       } else {
+               return false;
+       }
+}
+
+static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
+                                       long result[][8], u8 t, bool is2t)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 i;
+       u8 patha_ok;
+       u32 adda_reg[IQK_ADDA_REG_NUM] = {
+               0x85c, 0xe6c, 0xe70, 0xe74,
+               0xe78, 0xe7c, 0xe80, 0xe84,
+               0xe88, 0xe8c, 0xed0, 0xed4,
+               0xed8, 0xedc, 0xee0, 0xeec
+       };
+
+       u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+               0x522, 0x550, 0x551, 0x040
+       };
+       u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+               ROFDM0_TRXPATHENABLE, ROFDM0_TRMUXPAR,
+               RFPGA0_XCD_RFINTERFACESW, 0xb68, 0xb6c,
+               0x870, 0x860,
+               0x864, 0x800
+       };
+       const u32 retrycount = 2;
+       u32 path_sel_bb, path_sel_rf;
+       u8 tmp_reg_c50, tmp_reg_c58;
+
+       tmp_reg_c50 = rtl_get_bbreg(hw, 0xc50, MASKBYTE0);
+       tmp_reg_c58 = rtl_get_bbreg(hw, 0xc58, MASKBYTE0);
+
+       if (t == 0) {
+               rtl8723_save_adda_registers(hw, adda_reg,
+                                           rtlphy->adda_backup, 16);
+               rtl8723_phy_save_mac_registers(hw, iqk_mac_reg,
+                                              rtlphy->iqk_mac_backup);
+               rtl8723_save_adda_registers(hw, iqk_bb_reg,
+                                           rtlphy->iqk_bb_backup,
+                                           IQK_BB_REG_NUM);
+       }
+       rtl8723_phy_path_adda_on(hw, adda_reg, true, is2t);
+       if (t == 0) {
+               rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
+                                               RFPGA0_XA_HSSIPARAMETER1,
+                                               BIT(8));
+       }
+       if (!rtlphy->rfpi_enable)
+               rtl8723_phy_pi_mode_switch(hw, true);
+
+       path_sel_bb = rtl_get_bbreg(hw, 0x948, MASKDWORD);
+       path_sel_rf = rtl_get_rfreg(hw, RF90_PATH_A, 0xb0, 0xfffff);
+
+       /*BB Setting*/
+       rtl_set_bbreg(hw, 0x800, BIT(24), 0x00);
+       rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
+       rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
+       rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
+
+       rtl_set_bbreg(hw, 0x870, BIT(10), 0x01);
+       rtl_set_bbreg(hw, 0x870, BIT(26), 0x01);
+       rtl_set_bbreg(hw, 0x860, BIT(10), 0x00);
+       rtl_set_bbreg(hw, 0x864, BIT(10), 0x00);
+
+       if (is2t)
+               rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASKDWORD, 0x10000);
+       rtl8723_phy_mac_setting_calibration(hw, iqk_mac_reg,
+                                           rtlphy->iqk_mac_backup);
+       rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
+
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+       rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
+       rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x81004800);
+       for (i = 0; i < retrycount; i++) {
+               patha_ok = _rtl8723be_phy_path_a_iqk(hw, is2t);
+               if (patha_ok == 0x01) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "Path A Tx IQK Success!!\n");
+                       result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
+                                       0x3FF0000) >> 16;
+                       break;
+               }
+       }
+
+       if (0 == patha_ok)
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "Path A IQK Success!!\n");
+       if (is2t) {
+               rtl8723_phy_path_a_standby(hw);
+               rtl8723_phy_path_adda_on(hw, adda_reg, false, is2t);
+       }
+
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
+
+       if (t != 0) {
+               if (!rtlphy->rfpi_enable)
+                       rtl8723_phy_pi_mode_switch(hw, false);
+               rtl8723_phy_reload_adda_registers(hw, adda_reg,
+                                                 rtlphy->adda_backup, 16);
+               rtl8723_phy_reload_mac_registers(hw, iqk_mac_reg,
+                                                rtlphy->iqk_mac_backup);
+               rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+                                                 rtlphy->iqk_bb_backup,
+                                                 IQK_BB_REG_NUM);
+
+               rtl_set_bbreg(hw, 0x948, MASKDWORD, path_sel_bb);
+               rtl_set_rfreg(hw, RF90_PATH_B, 0xb0, 0xfffff, path_sel_rf);
+
+               rtl_set_bbreg(hw, 0xc50, MASKBYTE0, 0x50);
+               rtl_set_bbreg(hw, 0xc50, MASKBYTE0, tmp_reg_c50);
+               if (is2t) {
+                       rtl_set_bbreg(hw, 0xc58, MASKBYTE0, 0x50);
+                       rtl_set_bbreg(hw, 0xc58, MASKBYTE0, tmp_reg_c58);
+               }
+               rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
+               rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
+}
+
+static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 tmpreg;
+       u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
+
+       tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+
+       if ((tmpreg & 0x70) != 0)
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+       else
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+       if ((tmpreg & 0x70) != 0) {
+               rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
+
+               if (is2t)
+                       rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
+                                                 MASK12BITS);
+
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
+                             (rf_a_mode & 0x8FFFF) | 0x10000);
+
+               if (is2t)
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
+                                     (rf_b_mode & 0x8FFFF) | 0x10000);
+       }
+       lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
+
+       rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdfbe0);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, 0x8c0a);
+
+       mdelay(100);
+
+       rtl_set_rfreg(hw, RF90_PATH_A, 0xb0, RFREG_OFFSET_MASK, 0xdffe0);
+
+       if ((tmpreg & 0x70) != 0) {
+               rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+               rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
+
+               if (is2t)
+                       rtl_set_rfreg(hw, RF90_PATH_B, 0x00,
+                                     MASK12BITS, rf_b_mode);
+       } else {
+               rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+       }
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+}
+
+static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
+                                            bool bmain, bool is2t)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+
+       if (is_hal_stop(rtlhal)) {
+               u8 u1btmp;
+               u1btmp = rtl_read_byte(rtlpriv, REG_LEDCFG0);
+               rtl_write_byte(rtlpriv, REG_LEDCFG0, u1btmp | BIT(7));
+               rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
+       }
+       if (is2t) {
+               if (bmain)
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(6), 0x1);
+               else
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(6), 0x2);
+       } else {
+               rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, BIT(8) | BIT(9), 0);
+               rtl_set_bbreg(hw, 0x914, MASKLWORD, 0x0201);
+
+               /* We use the RF definition of MAIN and AUX,
+                * left antenna and right antenna repectively.
+                * Default output at AUX.
+                */
+               if (bmain) {
+                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+                                     BIT(14) | BIT(13) | BIT(12), 0);
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(4) | BIT(3), 0);
+                       if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
+                               rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 0);
+               } else {
+                       rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+                                     BIT(14) | BIT(13) | BIT(12), 1);
+                       rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+                                     BIT(5) | BIT(4) | BIT(3), 1);
+                       if (rtlefuse->antenna_div_type == CGCS_RX_HW_ANTDIV)
+                               rtl_set_bbreg(hw, CONFIG_RAM64X16, BIT(31), 1);
+               }
+       }
+}
+
+#undef IQK_ADDA_REG_NUM
+#undef IQK_DELAY_TIME
+
+void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       long result[4][8];
+       u8 i, final_candidate;
+       bool patha_ok, pathb_ok;
+       long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
+           reg_ecc, reg_tmp = 0;
+       bool is12simular, is13simular, is23simular;
+       u32 iqk_bb_reg[9] = {
+               ROFDM0_XARXIQIMBALANCE,
+               ROFDM0_XBRXIQIMBALANCE,
+               ROFDM0_ECCATHRESHOLD,
+               ROFDM0_AGCRSSITABLE,
+               ROFDM0_XATXIQIMBALANCE,
+               ROFDM0_XBTXIQIMBALANCE,
+               ROFDM0_XCTXAFE,
+               ROFDM0_XDTXAFE,
+               ROFDM0_RXIQEXTANTA
+       };
+
+       if (recovery) {
+               rtl8723_phy_reload_adda_registers(hw, iqk_bb_reg,
+                                                 rtlphy->iqk_bb_backup, 9);
+               return;
+       }
+
+       for (i = 0; i < 8; i++) {
+               result[0][i] = 0;
+               result[1][i] = 0;
+               result[2][i] = 0;
+               result[3][i] = 0;
+       }
+       final_candidate = 0xff;
+       patha_ok = false;
+       pathb_ok = false;
+       is12simular = false;
+       is23simular = false;
+       is13simular = false;
+       for (i = 0; i < 3; i++) {
+               if (get_rf_type(rtlphy) == RF_2T2R)
+                       _rtl8723be_phy_iq_calibrate(hw, result, i, true);
+               else
+                       _rtl8723be_phy_iq_calibrate(hw, result, i, false);
+               if (i == 1) {
+                       is12simular = phy_similarity_cmp(hw, result, 0, 1);
+                       if (is12simular) {
+                               final_candidate = 0;
+                               break;
+                       }
+               }
+               if (i == 2) {
+                       is13simular = phy_similarity_cmp(hw, result, 0, 2);
+                       if (is13simular) {
+                               final_candidate = 0;
+                               break;
+                       }
+                       is23simular = phy_similarity_cmp(hw, result, 1, 2);
+                       if (is23simular) {
+                               final_candidate = 1;
+                       } else {
+                               for (i = 0; i < 8; i++)
+                                       reg_tmp += result[3][i];
+
+                               if (reg_tmp != 0)
+                                       final_candidate = 3;
+                               else
+                                       final_candidate = 0xFF;
+                       }
+               }
+       }
+       for (i = 0; i < 4; i++) {
+               reg_e94 = result[i][0];
+               reg_e9c = result[i][1];
+               reg_ea4 = result[i][2];
+               reg_eac = result[i][3];
+               reg_eb4 = result[i][4];
+               reg_ebc = result[i][5];
+               reg_ec4 = result[i][6];
+               reg_ecc = result[i][7];
+       }
+       if (final_candidate != 0xff) {
+               reg_e94 = result[final_candidate][0];
+               rtlphy->reg_e94 = reg_e94;
+               reg_e9c = result[final_candidate][1];
+               rtlphy->reg_e9c = reg_e9c;
+               reg_ea4 = result[final_candidate][2];
+               reg_eac = result[final_candidate][3];
+               reg_eb4 = result[final_candidate][4];
+               rtlphy->reg_eb4 = reg_eb4;
+               reg_ebc = result[final_candidate][5];
+               rtlphy->reg_ebc = reg_ebc;
+               reg_ec4 = result[final_candidate][6];
+               reg_ecc = result[final_candidate][7];
+               patha_ok = true;
+               pathb_ok = true;
+       } else {
+               rtlphy->reg_e94 = 0x100;
+               rtlphy->reg_eb4 = 0x100;
+               rtlphy->reg_e9c = 0x0;
+               rtlphy->reg_ebc = 0x0;
+       }
+       if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
+               rtl8723_phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
+                                                  final_candidate,
+                                                  (reg_ea4 == 0));
+       if (final_candidate != 0xFF) {
+               for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
+                       rtlphy->iqk_matrix[0].value[0][i] =
+                                               result[final_candidate][i];
+               rtlphy->iqk_matrix[0].iqk_done = true;
+       }
+       rtl8723_save_adda_registers(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 9);
+}
+
+void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
+       u32 timeout = 2000, timecount = 0;
+
+       while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+               udelay(50);
+               timecount += 50;
+       }
+
+       rtlphy->lck_inprogress = true;
+       RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
+               "LCK:Start!!! currentband %x delay %d ms\n",
+               rtlhal->current_bandtype, timecount);
+
+       _rtl8723be_phy_lc_calibrate(hw, false);
+
+       rtlphy->lck_inprogress = false;
+}
+
+void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (rtlphy->apk_done)
+               return;
+
+       return;
+}
+
+void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
+{
+       _rtl8723be_phy_set_rfpath_switch(hw, bmain, false);
+}
+
+static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                "--->Cmd(%#x), set_io_inprogress(%d)\n",
+                 rtlphy->current_io_type, rtlphy->set_io_inprogress);
+       switch (rtlphy->current_io_type) {
+       case IO_CMD_RESUME_DM_BY_SCAN:
+               rtlpriv->dm_digtable.cur_igvalue =
+                                rtlphy->initgain_backup.xaagccore1;
+               /*rtl92c_dm_write_dig(hw);*/
+               rtl8723be_phy_set_txpower_level(hw, rtlphy->current_channel);
+               rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x83);
+               break;
+       case IO_CMD_PAUSE_DM_BY_SCAN:
+               rtlphy->initgain_backup.xaagccore1 =
+                                rtlpriv->dm_digtable.cur_igvalue;
+               rtlpriv->dm_digtable.cur_igvalue = 0x17;
+               rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process\n");
+               break;
+       }
+       rtlphy->set_io_inprogress = false;
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                "(%#x)\n", rtlphy->current_io_type);
+}
+
+bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       bool postprocessing = false;
+
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+                 iotype, rtlphy->set_io_inprogress);
+       do {
+               switch (iotype) {
+               case IO_CMD_RESUME_DM_BY_SCAN:
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                                "[IO CMD] Resume DM after scan.\n");
+                       postprocessing = true;
+                       break;
+               case IO_CMD_PAUSE_DM_BY_SCAN:
+                       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
+                                "[IO CMD] Pause DM before scan.\n");
+                       postprocessing = true;
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "switch case not process\n");
+                       break;
+               }
+       } while (false);
+       if (postprocessing && !rtlphy->set_io_inprogress) {
+               rtlphy->set_io_inprogress = true;
+               rtlphy->current_io_type = iotype;
+       } else {
+               return false;
+       }
+       rtl8723be_phy_set_io(hw);
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+       return true;
+}
+
+static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+static void _rtl8723be_phy_set_rf_sleep(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+       rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+       rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+
+static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                             enum rf_pwrstate rfpwr_state)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       bool bresult = true;
+       u8 i, queue_id;
+       struct rtl8192_tx_ring *ring = NULL;
+
+       switch (rfpwr_state) {
+       case ERFON:
+               if ((ppsc->rfpwr_state == ERFOFF) &&
+                   RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+                       bool rtstatus;
+                       u32 initialize_count = 0;
+                       do {
+                               initialize_count++;
+                               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                        "IPS Set eRf nic enable\n");
+                               rtstatus = rtl_ps_enable_nic(hw);
+                       } while (!rtstatus && (initialize_count < 10));
+                               RT_CLEAR_PS_LEVEL(ppsc,
+                                                 RT_RF_OFF_LEVL_HALT_NIC);
+               } else {
+                       RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                "Set ERFON sleeped:%d ms\n",
+                                 jiffies_to_msecs(jiffies -
+                                                  ppsc->last_sleep_jiffies));
+                       ppsc->last_awake_jiffies = jiffies;
+                       rtl8723be_phy_set_rf_on(hw);
+               }
+               if (mac->link_state == MAC80211_LINKED)
+                       rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
+               else
+                       rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
+               break;
+       case ERFOFF:
+               for (queue_id = 0, i = 0;
+                    queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+                       ring = &pcipriv->dev.tx_ring[queue_id];
+                       if (skb_queue_len(&ring->queue) == 0) {
+                               queue_id++;
+                               continue;
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        "eRf Off/Sleep: %d times "
+                                         "TcbBusyQueue[%d] =%d before "
+                                         "doze!\n", (i + 1), queue_id,
+                                         skb_queue_len(&ring->queue));
+
+                               udelay(10);
+                               i++;
+                       }
+                       if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        "\n ERFSLEEP: %d times "
+                                         "TcbBusyQueue[%d] = %d !\n",
+                                         MAX_DOZE_WAITING_TIMES_9x,
+                                         queue_id,
+                                         skb_queue_len(&ring->queue));
+                               break;
+                       }
+               }
+
+               if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+                       RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                                "IPS Set eRf nic disable\n");
+                       rtl_ps_disable_nic(hw);
+                       RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+               } else {
+                       if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
+                               rtlpriv->cfg->ops->led_control(hw,
+                                                              LED_CTL_NO_LINK);
+                       } else {
+                               rtlpriv->cfg->ops->led_control(hw,
+                                                            LED_CTL_POWER_OFF);
+                       }
+               }
+               break;
+       case ERFSLEEP:
+               if (ppsc->rfpwr_state == ERFOFF)
+                       break;
+               for (queue_id = 0, i = 0;
+                    queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
+                       ring = &pcipriv->dev.tx_ring[queue_id];
+                       if (skb_queue_len(&ring->queue) == 0) {
+                               queue_id++;
+                               continue;
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        "eRf Off/Sleep: %d times "
+                                         "TcbBusyQueue[%d] =%d before "
+                                         "doze!\n", (i + 1), queue_id,
+                                         skb_queue_len(&ring->queue));
+
+                               udelay(10);
+                               i++;
+                       }
+                       if (i >= MAX_DOZE_WAITING_TIMES_9x) {
+                               RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
+                                        "\n ERFSLEEP: %d times "
+                                         "TcbBusyQueue[%d] = %d !\n",
+                                         MAX_DOZE_WAITING_TIMES_9x,
+                                         queue_id,
+                                         skb_queue_len(&ring->queue));
+                               break;
+                       }
+               }
+               RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
+                        "Set ERFSLEEP awaked:%d ms\n",
+                         jiffies_to_msecs(jiffies -
+                                          ppsc->last_awake_jiffies));
+               ppsc->last_sleep_jiffies = jiffies;
+               _rtl8723be_phy_set_rf_sleep(hw);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "switch case not process\n");
+               bresult = false;
+               break;
+       }
+       if (bresult)
+               ppsc->rfpwr_state = rfpwr_state;
+       return bresult;
+}
+
+bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                     enum rf_pwrstate rfpwr_state)
+{
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+       bool bresult = false;
+
+       if (rfpwr_state == ppsc->rfpwr_state)
+               return bresult;
+       bresult = _rtl8723be_phy_set_rf_power_state(hw, rfpwr_state);
+       return bresult;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/rtlwifi/rtl8723be/phy.h
new file mode 100644 (file)
index 0000000..444ef95
--- /dev/null
@@ -0,0 +1,217 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PHY_H__
+#define __RTL8723BE_PHY_H__
+
+/*It must always set to 4, otherwise read efuse table secquence will be wrong.*/
+#define MAX_TX_COUNT           4
+#define        TX_1S                   0
+#define        TX_2S                   1
+
+#define        MAX_POWER_INDEX         0x3F
+
+#define MAX_PRECMD_CNT                 16
+#define MAX_RFDEPENDCMD_CNT            16
+#define MAX_POSTCMD_CNT                16
+
+#define MAX_DOZE_WAITING_TIMES_9x      64
+
+#define RT_CANNOT_IO(hw)               false
+#define HIGHPOWER_RADIOA_ARRAYLEN      22
+
+#define IQK_ADDA_REG_NUM               16
+#define IQK_BB_REG_NUM                 9
+#define MAX_TOLERANCE                  5
+#define        IQK_DELAY_TIME                  10
+#define        index_mapping_NUM               15
+
+#define        APK_BB_REG_NUM                  5
+#define        APK_AFE_REG_NUM                 16
+#define        APK_CURVE_REG_NUM               4
+#define        PATH_NUM                        1
+
+#define LOOP_LIMIT                     5
+#define MAX_STALL_TIME                 50
+#define ANTENNADIVERSITYVALUE          0x80
+#define MAX_TXPWR_IDX_NMODE_92S                63
+#define RESET_CNT_LIMIT                        3
+
+#define IQK_ADDA_REG_NUM               16
+#define IQK_MAC_REG_NUM                        4
+
+#define RF6052_MAX_PATH                        2
+
+#define CT_OFFSET_MAC_ADDR             0X16
+
+#define CT_OFFSET_CCK_TX_PWR_IDX               0x5A
+#define CT_OFFSET_HT401S_TX_PWR_IDX            0x60
+#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF       0x66
+#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF         0x69
+#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF         0x6C
+
+#define CT_OFFSET_HT40_MAX_PWR_OFFSET          0x6F
+#define CT_OFFSET_HT20_MAX_PWR_OFFSET          0x72
+
+#define CT_OFFSET_CHANNEL_PLAH                 0x75
+#define CT_OFFSET_THERMAL_METER                        0x78
+#define CT_OFFSET_RF_OPTION                    0x79
+#define CT_OFFSET_VERSION                      0x7E
+#define CT_OFFSET_CUSTOMER_ID                  0x7F
+
+#define RTL92C_MAX_PATH_NUM                    2
+
+enum hw90_block_e {
+       HW90_BLOCK_MAC = 0,
+       HW90_BLOCK_PHY0 = 1,
+       HW90_BLOCK_PHY1 = 2,
+       HW90_BLOCK_RF = 3,
+       HW90_BLOCK_MAXIMUM = 4,
+};
+
+enum baseband_config_type {
+       BASEBAND_CONFIG_PHY_REG = 0,
+       BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum ra_offset_area {
+       RA_OFFSET_LEGACY_OFDM1,
+       RA_OFFSET_LEGACY_OFDM2,
+       RA_OFFSET_HT_OFDM1,
+       RA_OFFSET_HT_OFDM2,
+       RA_OFFSET_HT_OFDM3,
+       RA_OFFSET_HT_OFDM4,
+       RA_OFFSET_HT_CCK,
+};
+
+enum antenna_path {
+       ANTENNA_NONE,
+       ANTENNA_D,
+       ANTENNA_C,
+       ANTENNA_CD,
+       ANTENNA_B,
+       ANTENNA_BD,
+       ANTENNA_BC,
+       ANTENNA_BCD,
+       ANTENNA_A,
+       ANTENNA_AD,
+       ANTENNA_AC,
+       ANTENNA_ACD,
+       ANTENNA_AB,
+       ANTENNA_ABD,
+       ANTENNA_ABC,
+       ANTENNA_ABCD
+};
+
+struct r_antenna_select_ofdm {
+       u32 r_tx_antenna:4;
+       u32 r_ant_l:4;
+       u32 r_ant_non_ht:4;
+       u32 r_ant_ht1:4;
+       u32 r_ant_ht2:4;
+       u32 r_ant_ht_s1:4;
+       u32 r_ant_non_ht_s1:4;
+       u32 ofdm_txsc:2;
+       u32 reserved:2;
+};
+
+struct r_antenna_select_cck {
+       u8 r_cckrx_enable_2:2;
+       u8 r_cckrx_enable:2;
+       u8 r_ccktx_enable:4;
+};
+
+
+struct efuse_contents {
+       u8 mac_addr[ETH_ALEN];
+       u8 cck_tx_power_idx[6];
+       u8 ht40_1s_tx_power_idx[6];
+       u8 ht40_2s_tx_power_idx_diff[3];
+       u8 ht20_tx_power_idx_diff[3];
+       u8 ofdm_tx_power_idx_diff[3];
+       u8 ht40_max_power_offset[3];
+       u8 ht20_max_power_offset[3];
+       u8 channel_plan;
+       u8 thermal_meter;
+       u8 rf_option[5];
+       u8 version;
+       u8 oem_id;
+       u8 regulatory;
+};
+
+struct tx_power_struct {
+       u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 legacy_ht_txpowerdiff;
+       u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
+       u8 pwrgroup_cnt;
+       u32 mcs_original_offset[4][16];
+};
+
+enum _ANT_DIV_TYPE {
+       NO_ANTDIV                               = 0xFF,
+       CG_TRX_HW_ANTDIV                = 0x01,
+       CGCS_RX_HW_ANTDIV               = 0x02,
+       FIXED_HW_ANTDIV         = 0x03,
+       CG_TRX_SMART_ANTDIV             = 0x04,
+       CGCS_RX_SW_ANTDIV               = 0x05,
+};
+
+u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw,
+                              enum radio_path rfpath,
+                              u32 regaddr, u32 bitmask);
+void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw,
+                             enum radio_path rfpath,
+                             u32 regaddr, u32 bitmask, u32 data);
+bool rtl8723be_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723be_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw);
+void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723be_phy_get_txpower_level(struct ieee80211_hw *hw,
+                                    long *powerlevel);
+void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw,
+                                    u8 channel);
+void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw,
+                                        u8 operation);
+void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
+                              enum nl80211_channel_type ch_type);
+void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
+                               bool b_recovery);
+void rtl23b_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
+void rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
+bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+                                            enum radio_path rfpath);
+bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+bool rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                     enum rf_pwrstate rfpwr_state);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.c
new file mode 100644 (file)
index 0000000..b5167e7
--- /dev/null
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseqcmd.h"
+#include "pwrseq.h"
+
+
+/* drivers should parse below arrays and do the corresponding actions */
+/*3 Power on  Array*/
+struct wlan_pwr_cfg rtl8723B_power_on_flow[RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
+                                          RTL8723B_TRANS_END_STEPS] = {
+       RTL8723B_TRANS_CARDEMU_TO_ACT
+       RTL8723B_TRANS_END
+};
+
+/*3Radio off GPIO Array */
+struct wlan_pwr_cfg rtl8723B_radio_off_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS
+                                           + RTL8723B_TRANS_END_STEPS] = {
+       RTL8723B_TRANS_ACT_TO_CARDEMU
+       RTL8723B_TRANS_END
+};
+
+/*3Card Disable Array*/
+struct wlan_pwr_cfg rtl8723B_card_disable_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+                                RTL8723B_TRANS_END_STEPS] = {
+       RTL8723B_TRANS_ACT_TO_CARDEMU
+       RTL8723B_TRANS_CARDEMU_TO_CARDDIS
+       RTL8723B_TRANS_END
+};
+
+/*3 Card Enable Array*/
+struct wlan_pwr_cfg rtl8723B_card_enable_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+                                RTL8723B_TRANS_END_STEPS] = {
+       RTL8723B_TRANS_CARDDIS_TO_CARDEMU
+       RTL8723B_TRANS_CARDEMU_TO_ACT
+       RTL8723B_TRANS_END
+};
+
+/*3Suspend Array*/
+struct wlan_pwr_cfg rtl8723B_suspend_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                         RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+                                         RTL8723B_TRANS_END_STEPS] = {
+       RTL8723B_TRANS_ACT_TO_CARDEMU
+       RTL8723B_TRANS_CARDEMU_TO_SUS
+       RTL8723B_TRANS_END
+};
+
+/*3 Resume Array*/
+struct wlan_pwr_cfg rtl8723B_resume_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                        RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+                                        RTL8723B_TRANS_END_STEPS] = {
+       RTL8723B_TRANS_SUS_TO_CARDEMU
+       RTL8723B_TRANS_CARDEMU_TO_ACT
+       RTL8723B_TRANS_END
+};
+
+/*3HWPDN Array*/
+struct wlan_pwr_cfg rtl8723B_hwpdn_flow[RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                       RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+                                       RTL8723B_TRANS_END_STEPS] = {
+       RTL8723B_TRANS_ACT_TO_CARDEMU
+       RTL8723B_TRANS_CARDEMU_TO_PDN
+       RTL8723B_TRANS_END
+};
+
+/*3 Enter LPS */
+struct wlan_pwr_cfg rtl8723B_enter_lps_flow[RTL8723B_TRANS_ACT_TO_LPS_STEPS +
+                                           RTL8723B_TRANS_END_STEPS] = {
+       /*FW behavior*/
+       RTL8723B_TRANS_ACT_TO_LPS
+       RTL8723B_TRANS_END
+};
+
+/*3 Leave LPS */
+struct wlan_pwr_cfg rtl8723B_leave_lps_flow[RTL8723B_TRANS_LPS_TO_ACT_STEPS +
+                                           RTL8723B_TRANS_END_STEPS] = {
+       /*FW behavior*/
+       RTL8723B_TRANS_LPS_TO_ACT
+       RTL8723B_TRANS_END
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseq.h
new file mode 100644 (file)
index 0000000..a62f43e
--- /dev/null
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PWRSEQ_H__
+#define __RTL8723BE_PWRSEQ_H__
+
+/*     Check document WM-20130425-JackieLau-RTL8723B_Power_Architecture v05.vsd
+ *     There are 6 HW Power States:
+ *     0: POFF--Power Off
+ *     1: PDN--Power Down
+ *     2: CARDEMU--Card Emulation
+ *     3: ACT--Active Mode
+ *     4: LPS--Low Power State
+ *     5: SUS--Suspend
+ *
+ *     The transition from different states are defined below
+ *     TRANS_CARDEMU_TO_ACT
+ *     TRANS_ACT_TO_CARDEMU
+ *     TRANS_CARDEMU_TO_SUS
+ *     TRANS_SUS_TO_CARDEMU
+ *     TRANS_CARDEMU_TO_PDN
+ *     TRANS_ACT_TO_LPS
+ *     TRANS_LPS_TO_ACT
+ *
+ *     TRANS_END
+ */
+#define        RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS     23
+#define        RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS     15
+#define        RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS     15
+#define        RTL8723B_TRANS_SUS_TO_CARDEMU_STEPS     15
+#define        RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS     15
+#define        RTL8723B_TRANS_PDN_TO_CARDEMU_STEPS     15
+#define        RTL8723B_TRANS_ACT_TO_LPS_STEPS         15
+#define        RTL8723B_TRANS_LPS_TO_ACT_STEPS         15
+#define        RTL8723B_TRANS_END_STEPS                1
+
+#define RTL8723B_TRANS_CARDEMU_TO_ACT                                  \
+       {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK,                          \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},              \
+       {0x0067, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK,                          \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0},                   \
+       {0x0001, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK,                          \
+        PWR_BASEADDR_MAC, PWR_CMD_DELAY, 1, PWRSEQ_DELAY_MS},          \
+       {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK,                          \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), 0},                   \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)|BIT(2)), 0},   \
+       {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , 0},                  \
+       {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)},            \
+       {0x0075, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0) , BIT(0)},             \
+       {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},              \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},                   \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0},          \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},              \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0},                 \
+       {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)},              \
+       {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)},              \
+       {0x0063, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)},              \
+       {0x0062, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},                   \
+       {0x0058, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)},              \
+       {0x005A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)},              \
+       {0x0068, PWR_CUT_TESTCHIP_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,\
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3), BIT(3)},              \
+       {0x0069, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), BIT(6)},
+
+#define RTL8723B_TRANS_ACT_TO_CARDEMU                                  \
+       {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},                     \
+       {0x004F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},                   \
+       {0x0049, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},                   \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)},              \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0},                 \
+       {0x0010, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6), 0},                   \
+       {0x0000, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC,        \
+        PWR_CMD_WRITE, BIT(5), BIT(5)},                                \
+       {0x0020, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC,        \
+        PWR_CMD_WRITE, BIT(0), 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_SUS                                  \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4) | BIT(3), (BIT(4) | BIT(3))}, \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC,        \
+        PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},                       \
+       {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)},              \
+       {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20},                  \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) | BIT(4)},\
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)},             \
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
+
+#define RTL8723B_TRANS_SUS_TO_CARDEMU                                  \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0},          \
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0},                  \
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)},           \
+       {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0},                   \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_CARDDIS                              \
+       {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x20},                  \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_USB_MSK | PWR_INTF_SDIO_MSK,                          \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)},       \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)},              \
+       {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 1},                   \
+       {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)},              \
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)},             \
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0},
+
+#define RTL8723B_TRANS_CARDDIS_TO_CARDEMU                              \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3) | BIT(7), 0},          \
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0},                  \
+       {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)},           \
+       {0x004A, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},                   \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0},            \
+       {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0},                   \
+       {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
+
+#define RTL8723B_TRANS_CARDEMU_TO_PDN                                  \
+       {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)},              \
+       {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,                      \
+        PWR_INTF_SDIO_MSK | PWR_INTF_USB_MSK, PWR_BASEADDR_MAC,        \
+        PWR_CMD_WRITE, 0xFF, 0x20},                                    \
+       {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},                   \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},
+
+#define RTL8723B_TRANS_PDN_TO_CARDEMU                                  \
+       {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},
+
+#define RTL8723B_TRANS_ACT_TO_LPS                                      \
+       {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},                  \
+       {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},                  \
+       {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},                   \
+       {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},                   \
+       {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},                   \
+       {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},                   \
+       {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},                   \
+       {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},          \
+       {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},                   \
+       {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x03},                  \
+       {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},                   \
+       {0x0093, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x00},                  \
+       {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},
+
+#define RTL8723B_TRANS_LPS_TO_ACT                                      \
+       {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK,   \
+        PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84},         \
+       {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84},                  \
+       {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84},                  \
+       {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS},          \
+       {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0},                   \
+       {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0},         \
+       {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0},            \
+       {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)},              \
+       {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF},                  \
+       {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1) | BIT(0), BIT(1) | BIT(0)}, \
+       {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,    \
+        PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},
+
+#define RTL8723B_TRANS_END                                             \
+       {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, 0, \
+        PWR_CMD_END, 0, 0},
+
+extern struct wlan_pwr_cfg rtl8723B_power_on_flow
+                               [RTL8723B_TRANS_CARDEMU_TO_ACT_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_radio_off_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_card_disable_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_card_enable_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_suspend_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_resume_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_CARDEMU_TO_SUS_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_hwpdn_flow
+                               [RTL8723B_TRANS_ACT_TO_CARDEMU_STEPS +
+                                RTL8723B_TRANS_CARDEMU_TO_PDN_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_enter_lps_flow
+                               [RTL8723B_TRANS_ACT_TO_LPS_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+extern struct wlan_pwr_cfg rtl8723B_leave_lps_flow
+                               [RTL8723B_TRANS_LPS_TO_ACT_STEPS +
+                                RTL8723B_TRANS_END_STEPS];
+
+/* RTL8723 Power Configuration CMDs for PCIe interface */
+#define RTL8723_NIC_PWR_ON_FLOW                rtl8723B_power_on_flow
+#define RTL8723_NIC_RF_OFF_FLOW                rtl8723B_radio_off_flow
+#define RTL8723_NIC_DISABLE_FLOW       rtl8723B_card_disable_flow
+#define RTL8723_NIC_ENABLE_FLOW                rtl8723B_card_enable_flow
+#define RTL8723_NIC_SUSPEND_FLOW       rtl8723B_suspend_flow
+#define RTL8723_NIC_RESUME_FLOW                rtl8723B_resume_flow
+#define RTL8723_NIC_PDN_FLOW           rtl8723B_hwpdn_flow
+#define RTL8723_NIC_LPS_ENTER_FLOW     rtl8723B_enter_lps_flow
+#define RTL8723_NIC_LPS_LEAVE_FLOW     rtl8723B_leave_lps_flow
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
new file mode 100644 (file)
index 0000000..e4a507a
--- /dev/null
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "pwrseq.h"
+
+/*     Description:
+ *             This routine deal with the Power Configuration CMDs
+ *              parsing for RTL8723/RTL8188E Series IC.
+ *     Assumption:
+ *             We should follow specific format which was released from HW SD.
+ *
+ *     2011.07.07, added by Roger.
+ */
+bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+                                u8 fab_version, u8 interface_type,
+                                struct wlan_pwr_cfg pwrcfgcmd[])
+
+{
+       struct wlan_pwr_cfg pwr_cfg_cmd = {0};
+       bool b_polling_bit = false;
+       u32 ary_idx = 0;
+       u8 value = 0;
+       u32 offset = 0;
+       u32 polling_count = 0;
+       u32 max_polling_cnt = 5000;
+
+       do {
+               pwr_cfg_cmd = pwrcfgcmd[ary_idx];
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                        "rtlbe_hal_pwrseqcmdparsing(): "
+                        "offset(%#x),cut_msk(%#x), fab_msk(%#x),"
+                        "interface_msk(%#x), base(%#x), "
+                        "cmd(%#x), msk(%#x), value(%#x)\n",
+                        GET_PWR_CFG_OFFSET(pwr_cfg_cmd),
+                        GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd),
+                        GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd),
+                        GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd),
+                        GET_PWR_CFG_BASE(pwr_cfg_cmd),
+                        GET_PWR_CFG_CMD(pwr_cfg_cmd),
+                        GET_PWR_CFG_MASK(pwr_cfg_cmd),
+                        GET_PWR_CFG_VALUE(pwr_cfg_cmd));
+
+               if ((GET_PWR_CFG_FAB_MASK(pwr_cfg_cmd)&fab_version) &&
+                   (GET_PWR_CFG_CUT_MASK(pwr_cfg_cmd)&cut_version) &&
+                   (GET_PWR_CFG_INTF_MASK(pwr_cfg_cmd)&interface_type)) {
+                       switch (GET_PWR_CFG_CMD(pwr_cfg_cmd)) {
+                       case PWR_CMD_READ:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                        "rtlbe_hal_pwrseqcmdparsing(): "
+                                         "PWR_CMD_READ\n");
+                               break;
+                       case PWR_CMD_WRITE:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                        "rtlbe_hal_pwrseqcmdparsing(): "
+                                         "PWR_CMD_WRITE\n");
+                               offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+                               /*Read the value from system register*/
+                               value = rtl_read_byte(rtlpriv, offset);
+                               value &= (~(GET_PWR_CFG_MASK(pwr_cfg_cmd)));
+                               value = value | (GET_PWR_CFG_VALUE(pwr_cfg_cmd)
+                                       & GET_PWR_CFG_MASK(pwr_cfg_cmd));
+
+                               /*Write the value back to sytem register*/
+                               rtl_write_byte(rtlpriv, offset, value);
+                               break;
+                       case PWR_CMD_POLLING:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                        "rtlbe_hal_pwrseqcmdparsing(): "
+                                         "PWR_CMD_POLLING\n");
+                               b_polling_bit = false;
+                               offset = GET_PWR_CFG_OFFSET(pwr_cfg_cmd);
+
+                               do {
+                                       value = rtl_read_byte(rtlpriv, offset);
+
+                                       value &= GET_PWR_CFG_MASK(pwr_cfg_cmd);
+                                       if (value ==
+                                           (GET_PWR_CFG_VALUE(pwr_cfg_cmd) &
+                                            GET_PWR_CFG_MASK(pwr_cfg_cmd)))
+                                               b_polling_bit = true;
+                                       else
+                                               udelay(10);
+
+                                       if (polling_count++ > max_polling_cnt)
+                                               return false;
+
+                               } while (!b_polling_bit);
+                               break;
+                       case PWR_CMD_DELAY:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                        "rtlbe_hal_pwrseqcmdparsing(): "
+                                        "PWR_CMD_DELAY\n");
+                               if (GET_PWR_CFG_VALUE(pwr_cfg_cmd) ==
+                                   PWRSEQ_DELAY_US)
+                                       udelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+                               else
+                                       mdelay(GET_PWR_CFG_OFFSET(pwr_cfg_cmd));
+                               break;
+                       case PWR_CMD_END:
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                        "rtlbe_hal_pwrseqcmdparsing(): "
+                                        "PWR_CMD_END\n");
+                               return true;
+                               break;
+                       default:
+                               RT_ASSERT(false,
+                                         "rtlbe_hal_pwrseqcmdparsing(): "
+                                         "Unknown CMD!!\n");
+                               break;
+                       }
+               }
+
+               ary_idx++;
+       } while (1);
+
+       return true;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.h
new file mode 100644 (file)
index 0000000..ce14a3b
--- /dev/null
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_PWRSEQCMD_H__
+#define __RTL8723BE_PWRSEQCMD_H__
+
+#include "../wifi.h"
+/*---------------------------------------------*/
+/*The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ           0x00
+#define PWR_CMD_WRITE          0x01
+#define PWR_CMD_POLLING                0x02
+#define PWR_CMD_DELAY          0x03
+#define PWR_CMD_END            0x04
+
+/* define the base address of each block */
+#define PWR_BASEADDR_MAC       0x00
+#define PWR_BASEADDR_USB       0x01
+#define PWR_BASEADDR_PCIE      0x02
+#define PWR_BASEADDR_SDIO      0x03
+
+#define        PWR_INTF_SDIO_MSK       BIT(0)
+#define        PWR_INTF_USB_MSK        BIT(1)
+#define        PWR_INTF_PCI_MSK        BIT(2)
+#define        PWR_INTF_ALL_MSK        (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define        PWR_FAB_TSMC_MSK        BIT(0)
+#define        PWR_FAB_UMC_MSK         BIT(1)
+#define        PWR_FAB_ALL_MSK         (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define        PWR_CUT_TESTCHIP_MSK    BIT(0)
+#define        PWR_CUT_A_MSK           BIT(1)
+#define        PWR_CUT_B_MSK           BIT(2)
+#define        PWR_CUT_C_MSK           BIT(3)
+#define        PWR_CUT_D_MSK           BIT(4)
+#define        PWR_CUT_E_MSK           BIT(5)
+#define        PWR_CUT_F_MSK           BIT(6)
+#define        PWR_CUT_G_MSK           BIT(7)
+#define        PWR_CUT_ALL_MSK         0xFF
+
+
+enum pwrseq_delay_unit {
+       PWRSEQ_DELAY_US,
+       PWRSEQ_DELAY_MS,
+};
+
+struct wlan_pwr_cfg {
+       u16 offset;
+       u8 cut_msk;
+       u8 fab_msk:4;
+       u8 interface_msk:4;
+       u8 base:4;
+       u8 cmd:4;
+       u8 msk;
+       u8 value;
+
+};
+
+#define        GET_PWR_CFG_OFFSET(__PWR_CMD)   __PWR_CMD.offset
+#define        GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define        GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define        GET_PWR_CFG_INTF_MASK(__PWR_CMD)        __PWR_CMD.interface_msk
+#define        GET_PWR_CFG_BASE(__PWR_CMD)     __PWR_CMD.base
+#define        GET_PWR_CFG_CMD(__PWR_CMD)      __PWR_CMD.cmd
+#define        GET_PWR_CFG_MASK(__PWR_CMD)     __PWR_CMD.msk
+#define        GET_PWR_CFG_VALUE(__PWR_CMD)    __PWR_CMD.value
+
+bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
+                                u8 fab_version, u8 interface_type,
+                                struct wlan_pwr_cfg pwrcfgcmd[]);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/rtlwifi/rtl8723be/reg.h
new file mode 100644 (file)
index 0000000..4c653fa
--- /dev/null
@@ -0,0 +1,2277 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_REG_H__
+#define __RTL8723BE_REG_H__
+
+#define TXPKT_BUF_SELECT                       0x69
+#define RXPKT_BUF_SELECT                       0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS              0x0
+
+#define REG_SYS_ISO_CTRL                       0x0000
+#define REG_SYS_FUNC_EN                                0x0002
+#define REG_APS_FSMCO                          0x0004
+#define REG_SYS_CLKR                           0x0008
+#define REG_9346CR                             0x000A
+#define REG_EE_VPD                             0x000C
+#define REG_AFE_MISC                           0x0010
+#define REG_SPS0_CTRL                          0x0011
+#define REG_SPS_OCP_CFG                                0x0018
+#define REG_RSV_CTRL                           0x001C
+#define REG_RF_CTRL                            0x001F
+#define REG_LDOA15_CTRL                                0x0020
+#define REG_LDOV12D_CTRL                       0x0021
+#define REG_LDOHCI12_CTRL                      0x0022
+#define REG_LPLDO_CTRL                         0x0023
+#define REG_AFE_XTAL_CTRL                      0x0024
+/* 1.5v for 8188EE test chip, 1.4v for MP chip */
+#define REG_AFE_LDO_CTRL                       0x0027
+#define REG_AFE_PLL_CTRL                       0x0028
+#define REG_MAC_PHY_CTRL                       0x002c
+#define REG_EFUSE_CTRL                         0x0030
+#define REG_EFUSE_TEST                         0x0034
+#define REG_PWR_DATA                           0x0038
+#define REG_CAL_TIMER                          0x003C
+#define REG_ACLK_MON                           0x003E
+#define REG_GPIO_MUXCFG                                0x0040
+#define REG_GPIO_IO_SEL                                0x0042
+#define REG_MAC_PINMUX_CFG                     0x0043
+#define REG_GPIO_PIN_CTRL                      0x0044
+#define REG_GPIO_INTM                          0x0048
+#define REG_LEDCFG0                            0x004C
+#define REG_LEDCFG1                            0x004D
+#define REG_LEDCFG2                            0x004E
+#define REG_LEDCFG3                            0x004F
+#define REG_FSIMR                              0x0050
+#define REG_FSISR                              0x0054
+#define REG_HSIMR                              0x0058
+#define REG_HSISR                              0x005c
+#define REG_GPIO_PIN_CTRL_2                    0x0060
+#define REG_GPIO_IO_SEL_2                      0x0062
+#define REG_MULTI_FUNC_CTRL                    0x0068
+#define REG_GPIO_OUTPUT                                0x006c
+#define REG_AFE_XTAL_CTRL_EXT                  0x0078
+#define REG_XCK_OUT_CTRL                       0x007c
+#define REG_MCUFWDL                            0x0080
+#define REG_WOL_EVENT                          0x0081
+#define REG_MCUTSTCFG                          0x0084
+
+
+#define REG_HIMR                               0x00B0
+#define REG_HISR                               0x00B4
+#define REG_HIMRE                              0x00B8
+#define REG_HISRE                              0x00BC
+
+#define REG_EFUSE_ACCESS                       0x00CF
+
+#define REG_BIST_SCAN                          0x00D0
+#define REG_BIST_RPT                           0x00D4
+#define REG_BIST_ROM_RPT                       0x00D8
+#define REG_USB_SIE_INTF                       0x00E0
+#define REG_PCIE_MIO_INTF                      0x00E4
+#define REG_PCIE_MIO_INTD                      0x00E8
+#define REG_HPON_FSM                           0x00EC
+#define REG_SYS_CFG                            0x00F0
+#define REG_GPIO_OUTSTS                                0x00F4
+#define REG_SYS_CFG1                           0x00F0
+#define REG_ROM_VERSION                                0x00FD
+
+#define REG_CR                                 0x0100
+#define REG_PBP                                        0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL               0x0106
+#define REG_TRXDMA_CTRL                                0x010C
+#define REG_TRXFF_BNDY                         0x0114
+#define REG_TRXFF_STATUS                       0x0118
+#define REG_RXFF_PTR                           0x011C
+
+#define REG_CPWM                               0x012F
+#define REG_FWIMR                              0x0130
+#define REG_FWISR                              0x0134
+#define REG_PKTBUF_DBG_CTRL                    0x0140
+#define REG_PKTBUF_DBG_DATA_L                  0x0144
+#define REG_PKTBUF_DBG_DATA_H                  0x0148
+#define REG_RXPKTBUF_CTRL                      (REG_PKTBUF_DBG_CTRL + 2)
+
+#define REG_TC0_CTRL                           0x0150
+#define REG_TC1_CTRL                           0x0154
+#define REG_TC2_CTRL                           0x0158
+#define REG_TC3_CTRL                           0x015C
+#define REG_TC4_CTRL                           0x0160
+#define REG_TCUNIT_BASE                                0x0164
+#define REG_MBIST_START                                0x0174
+#define REG_MBIST_DONE                         0x0178
+#define REG_MBIST_FAIL                         0x017C
+#define REG_32K_CTRL                           0x0194
+#define REG_C2HEVT_MSG_NORMAL                  0x01A0
+#define REG_C2HEVT_CLEAR                       0x01AF
+#define REG_C2HEVT_MSG_TEST                    0x01B8
+#define REG_MCUTST_1                           0x01c0
+#define REG_FMETHR                             0x01C8
+#define REG_HMETFR                             0x01CC
+#define REG_HMEBOX_0                           0x01D0
+#define REG_HMEBOX_1                           0x01D4
+#define REG_HMEBOX_2                           0x01D8
+#define REG_HMEBOX_3                           0x01DC
+
+#define REG_LLT_INIT                           0x01E0
+#define REG_BB_ACCEESS_CTRL                    0x01E8
+#define REG_BB_ACCESS_DATA                     0x01EC
+
+#define REG_HMEBOX_EXT_0                       0x01F0
+#define REG_HMEBOX_EXT_1                       0x01F4
+#define REG_HMEBOX_EXT_2                       0x01F8
+#define REG_HMEBOX_EXT_3                       0x01FC
+
+#define REG_RQPN                               0x0200
+#define REG_FIFOPAGE                           0x0204
+#define REG_TDECTRL                            0x0208
+#define REG_TXDMA_OFFSET_CHK                   0x020C
+#define REG_TXDMA_STATUS                       0x0210
+#define REG_RQPN_NPQ                           0x0214
+
+#define REG_RXDMA_AGG_PG_TH                    0x0280
+/* FW shall update this register before FW write RXPKT_RELEASE_POLL to 1 */
+#define REG_FW_UPD_RDPTR                       0x0284
+/* Control the RX DMA.*/
+#define REG_RXDMA_CONTROL                      0x0286
+/* The number of packets in RXPKTBUF.  */
+#define REG_RXPKT_NUM                          0x0287
+
+#define        REG_PCIE_CTRL_REG                       0x0300
+#define        REG_INT_MIG                             0x0304
+#define        REG_BCNQ_DESA                           0x0308
+#define        REG_HQ_DESA                             0x0310
+#define        REG_MGQ_DESA                            0x0318
+#define        REG_VOQ_DESA                            0x0320
+#define        REG_VIQ_DESA                            0x0328
+#define        REG_BEQ_DESA                            0x0330
+#define        REG_BKQ_DESA                            0x0338
+#define        REG_RX_DESA                             0x0340
+
+#define        REG_DBI                                 0x0348
+#define        REG_MDIO                                0x0354
+#define        REG_DBG_SEL                             0x0360
+#define        REG_PCIE_HRPWM                          0x0361
+#define        REG_PCIE_HCPWM                          0x0363
+#define        REG_UART_CTRL                           0x0364
+#define        REG_WATCH_DOG                           0x0368
+#define        REG_UART_TX_DESA                        0x0370
+#define        REG_UART_RX_DESA                        0x0378
+
+
+#define        REG_HDAQ_DESA_NODEF                     0x0000
+#define        REG_CMDQ_DESA_NODEF                     0x0000
+
+#define REG_VOQ_INFORMATION                    0x0400
+#define REG_VIQ_INFORMATION                    0x0404
+#define REG_BEQ_INFORMATION                    0x0408
+#define REG_BKQ_INFORMATION                    0x040C
+#define REG_MGQ_INFORMATION                    0x0410
+#define REG_HGQ_INFORMATION                    0x0414
+#define REG_BCNQ_INFORMATION                   0x0418
+#define REG_TXPKT_EMPTY                                0x041A
+
+
+#define REG_CPU_MGQ_INFORMATION                        0x041C
+#define REG_FWHW_TXQ_CTRL                      0x0420
+#define REG_HWSEQ_CTRL                         0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY                 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY                  0x0425
+#define REG_MULTI_BCNQ_EN                      0x0426
+#define REG_MULTI_BCNQ_OFFSET                  0x0427
+#define REG_SPEC_SIFS                          0x0428
+#define REG_RL                                 0x042A
+#define REG_DARFRC                             0x0430
+#define REG_RARFRC                             0x0438
+#define REG_RRSR                               0x0440
+#define REG_ARFR0                              0x0444
+#define REG_ARFR1                              0x0448
+#define REG_ARFR2                              0x044C
+#define REG_ARFR3                              0x0450
+#define REG_AMPDU_MAX_TIME                     0x0456
+#define REG_AGGLEN_LMT                         0x0458
+#define REG_AMPDU_MIN_SPACE                    0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD            0x045D
+#define REG_FAST_EDCA_CTRL                     0x0460
+#define REG_RD_RESP_PKT_TH                     0x0463
+#define REG_INIRTS_RATE_SEL                    0x0480
+#define REG_INIDATA_RATE_SEL                   0x0484
+#define REG_POWER_STATUS                       0x04A4
+#define REG_POWER_STAGE1                       0x04B4
+#define REG_POWER_STAGE2                       0x04B8
+#define REG_PKT_LIFE_TIME                      0x04C0
+#define REG_STBC_SETTING                       0x04C4
+#define REG_PROT_MODE_CTRL                     0x04C8
+#define REG_BAR_MODE_CTRL                      0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT                        0x04CF
+#define REG_EARLY_MODE_CONTROL                 0x04D0
+#define REG_NQOS_SEQ                           0x04DC
+#define REG_QOS_SEQ                            0x04DE
+#define REG_NEED_CPU_HANDLE                    0x04E0
+#define REG_PKT_LOSE_RPT                       0x04E1
+#define REG_PTCL_ERR_STATUS                    0x04E2
+#define REG_TX_RPT_CTRL                                0x04EC
+#define REG_TX_RPT_TIME                                0x04F0
+#define REG_DUMMY                              0x04FC
+
+#define REG_EDCA_VO_PARAM                      0x0500
+#define REG_EDCA_VI_PARAM                      0x0504
+#define REG_EDCA_BE_PARAM                      0x0508
+#define REG_EDCA_BK_PARAM                      0x050C
+#define REG_BCNTCFG                            0x0510
+#define REG_PIFS                               0x0512
+#define REG_RDG_PIFS                           0x0513
+#define REG_SIFS_CTX                           0x0514
+#define REG_SIFS_TRX                           0x0516
+#define REG_AGGR_BREAK_TIME                    0x051A
+#define REG_SLOT                               0x051B
+#define REG_TX_PTCL_CTRL                       0x0520
+#define REG_TXPAUSE                            0x0522
+#define REG_DIS_TXREQ_CLR                      0x0523
+#define REG_RD_CTRL                            0x0524
+#define REG_TBTT_PROHIBIT                      0x0540
+#define REG_RD_NAV_NXT                         0x0544
+#define REG_NAV_PROT_LEN                       0x0546
+#define REG_BCN_CTRL                           0x0550
+#define REG_USTIME_TSF                         0x0551
+#define REG_MBID_NUM                           0x0552
+#define REG_DUAL_TSF_RST                       0x0553
+#define REG_BCN_INTERVAL                       0x0554
+#define REG_MBSSID_BCN_SPACE                   0x0554
+#define REG_DRVERLYINT                         0x0558
+#define REG_BCNDMATIM                          0x0559
+#define REG_ATIMWND                            0x055A
+#define REG_BCN_MAX_ERR                                0x055D
+#define REG_RXTSF_OFFSET_CCK                   0x055E
+#define REG_RXTSF_OFFSET_OFDM                  0x055F
+#define REG_TSFTR                              0x0560
+#define REG_INIT_TSFTR                         0x0564
+#define REG_SECONDARY_CCA_CTRL                 0x0577
+#define REG_PSTIMER                            0x0580
+#define REG_TIMER0                             0x0584
+#define REG_TIMER1                             0x0588
+#define REG_ACMHWCTRL                          0x05C0
+#define REG_ACMRSTCTRL                         0x05C1
+#define REG_ACMAVG                             0x05C2
+#define REG_VO_ADMTIME                         0x05C4
+#define REG_VI_ADMTIME                         0x05C6
+#define REG_BE_ADMTIME                         0x05C8
+#define REG_EDCA_RANDOM_GEN                    0x05CC
+#define REG_SCH_TXCMD                          0x05D0
+
+#define REG_APSD_CTRL                          0x0600
+#define REG_BWOPMODE                           0x0603
+#define REG_TCR                                        0x0604
+#define REG_RCR                                        0x0608
+#define REG_RX_PKT_LIMIT                       0x060C
+#define REG_RX_DLK_TIME                                0x060D
+#define REG_RX_DRVINFO_SZ                      0x060F
+
+#define REG_MACID                              0x0610
+#define REG_BSSID                              0x0618
+#define REG_MAR                                        0x0620
+#define REG_MBIDCAMCFG                         0x0628
+
+#define REG_USTIME_EDCA                                0x0638
+#define REG_MAC_SPEC_SIFS                      0x063A
+#define REG_RESP_SIFS_CCK                      0x063C
+#define REG_RESP_SIFS_OFDM                     0x063E
+#define REG_ACKTO                              0x0640
+#define REG_CTS2TO                             0x0641
+#define REG_EIFS                               0x0642
+
+#define REG_NAV_CTRL                           0x0650
+#define REG_BACAMCMD                           0x0654
+#define REG_BACAMCONTENT                       0x0658
+#define REG_LBDLY                              0x0660
+#define REG_FWDLY                              0x0661
+#define REG_RXERR_RPT                          0x0664
+#define REG_TRXPTCL_CTL                                0x0668
+
+#define REG_CAMCMD                             0x0670
+#define REG_CAMWRITE                           0x0674
+#define REG_CAMREAD                            0x0678
+#define REG_CAMDBG                             0x067C
+#define REG_SECCFG                             0x0680
+
+#define REG_WOW_CTRL                           0x0690
+#define REG_PSSTATUS                           0x0691
+#define REG_PS_RX_INFO                         0x0692
+#define REG_UAPSD_TID                          0x0693
+#define REG_LPNAV_CTRL                         0x0694
+#define REG_WKFMCAM_NUM                                0x0698
+#define REG_WKFMCAM_RWD                                0x069C
+#define REG_RXFLTMAP0                          0x06A0
+#define REG_RXFLTMAP1                          0x06A2
+#define REG_RXFLTMAP2                          0x06A4
+#define REG_BCN_PSR_RPT                                0x06A8
+#define REG_CALB32K_CTRL                       0x06AC
+#define REG_PKT_MON_CTRL                       0x06B4
+#define REG_BT_COEX_TABLE                      0x06C0
+#define REG_WMAC_RESP_TXINFO                   0x06D8
+
+#define REG_USB_INFO                           0xFE17
+#define REG_USB_SPECIAL_OPTION                 0xFE55
+#define REG_USB_DMA_AGG_TO                     0xFE5B
+#define REG_USB_AGG_TO                         0xFE5C
+#define REG_USB_AGG_TH                         0xFE5D
+
+#define REG_TEST_USB_TXQS                      0xFE48
+#define REG_TEST_SIE_VID                       0xFE60
+#define REG_TEST_SIE_PID                       0xFE62
+#define REG_TEST_SIE_OPTIONAL                  0xFE64
+#define REG_TEST_SIE_CHIRP_K                   0xFE65
+#define REG_TEST_SIE_PHY                       0xFE66
+#define REG_TEST_SIE_MAC_ADDR                  0xFE70
+#define REG_TEST_SIE_STRING                    0xFE80
+
+#define REG_NORMAL_SIE_VID                     0xFE60
+#define REG_NORMAL_SIE_PID                     0xFE62
+#define REG_NORMAL_SIE_OPTIONAL                        0xFE64
+#define REG_NORMAL_SIE_EP                      0xFE65
+#define REG_NORMAL_SIE_PHY                     0xFE68
+#define REG_NORMAL_SIE_MAC_ADDR                        0xFE70
+#define REG_NORMAL_SIE_STRING                  0xFE80
+
+#define        CR9346                                  REG_9346CR
+#define        MSR                                     (REG_CR + 2)
+#define        ISR                                     REG_HISR
+#define        TSFR                                    REG_TSFTR
+
+#define        MACIDR0                                 REG_MACID
+#define        MACIDR4                                 (REG_MACID + 4)
+
+#define PBP                                    REG_PBP
+
+#define        IDR0                                    MACIDR0
+#define        IDR4                                    MACIDR4
+
+#define        UNUSED_REGISTER                         0x1BF
+#define        DCAM                                    UNUSED_REGISTER
+#define        PSR                                     UNUSED_REGISTER
+#define BBADDR                                 UNUSED_REGISTER
+#define        PHYDATAR                                UNUSED_REGISTER
+
+#define        INVALID_BBRF_VALUE                      0x12345678
+
+#define        MAX_MSS_DENSITY_2T                      0x13
+#define        MAX_MSS_DENSITY_1T                      0x0A
+
+#define        CMDEEPROM_EN                            BIT(5)
+#define        CMDEEPROM_SEL                           BIT(4)
+#define        CMD9346CR_9356SEL                       BIT(4)
+#define        AUTOLOAD_EEPROM                         (CMDEEPROM_EN | CMDEEPROM_SEL)
+#define        AUTOLOAD_EFUSE                          CMDEEPROM_EN
+
+#define        GPIOSEL_GPIO                            0
+#define        GPIOSEL_ENBT                            BIT(5)
+
+#define        GPIO_IN                                 REG_GPIO_PIN_CTRL
+#define        GPIO_OUT                                (REG_GPIO_PIN_CTRL + 1)
+#define        GPIO_IO_SEL                             (REG_GPIO_PIN_CTRL + 2)
+#define        GPIO_MOD                                (REG_GPIO_PIN_CTRL + 3)
+
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define        HSIMR_GPIO12_0_INT_EN                   BIT(0)
+#define        HSIMR_SPS_OCP_INT_EN                    BIT(5)
+#define        HSIMR_RON_INT_EN                        BIT(6)
+#define        HSIMR_PDN_INT_EN                        BIT(7)
+#define        HSIMR_GPIO9_INT_EN                      BIT(25)
+
+/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
+
+#define        HSISR_GPIO12_0_INT                      BIT(0)
+#define        HSISR_SPS_OCP_INT                       BIT(5)
+#define        HSISR_RON_INT_EN                        BIT(6)
+#define        HSISR_PDNINT                            BIT(7)
+#define        HSISR_GPIO9_INT                         BIT(25)
+
+#define        MSR_NOLINK                              0x00
+#define        MSR_ADHOC                               0x01
+#define        MSR_INFRA                               0x02
+#define        MSR_AP                                  0x03
+
+#define        RRSR_RSC_OFFSET                         21
+#define        RRSR_SHORT_OFFSET                       23
+#define        RRSR_RSC_BW_40M                         0x600000
+#define        RRSR_RSC_UPSUBCHNL                      0x400000
+#define        RRSR_RSC_LOWSUBCHNL                     0x200000
+#define        RRSR_SHORT                              0x800000
+#define        RRSR_1M                                 BIT(0)
+#define        RRSR_2M                                 BIT(1)
+#define        RRSR_5_5M                               BIT(2)
+#define        RRSR_11M                                BIT(3)
+#define        RRSR_6M                                 BIT(4)
+#define        RRSR_9M                                 BIT(5)
+#define        RRSR_12M                                BIT(6)
+#define        RRSR_18M                                BIT(7)
+#define        RRSR_24M                                BIT(8)
+#define        RRSR_36M                                BIT(9)
+#define        RRSR_48M                                BIT(10)
+#define        RRSR_54M                                BIT(11)
+#define        RRSR_MCS0                               BIT(12)
+#define        RRSR_MCS1                               BIT(13)
+#define        RRSR_MCS2                               BIT(14)
+#define        RRSR_MCS3                               BIT(15)
+#define        RRSR_MCS4                               BIT(16)
+#define        RRSR_MCS5                               BIT(17)
+#define        RRSR_MCS6                               BIT(18)
+#define        RRSR_MCS7                               BIT(19)
+#define        BRSR_ACKSHORTPMB                        BIT(23)
+
+#define        RATR_1M                                 0x00000001
+#define        RATR_2M                                 0x00000002
+#define        RATR_55M                                0x00000004
+#define        RATR_11M                                0x00000008
+#define        RATR_6M                                 0x00000010
+#define        RATR_9M                                 0x00000020
+#define        RATR_12M                                0x00000040
+#define        RATR_18M                                0x00000080
+#define        RATR_24M                                0x00000100
+#define        RATR_36M                                0x00000200
+#define        RATR_48M                                0x00000400
+#define        RATR_54M                                0x00000800
+#define        RATR_MCS0                               0x00001000
+#define        RATR_MCS1                               0x00002000
+#define        RATR_MCS2                               0x00004000
+#define        RATR_MCS3                               0x00008000
+#define        RATR_MCS4                               0x00010000
+#define        RATR_MCS5                               0x00020000
+#define        RATR_MCS6                               0x00040000
+#define        RATR_MCS7                               0x00080000
+#define        RATR_MCS8                               0x00100000
+#define        RATR_MCS9                               0x00200000
+#define        RATR_MCS10                              0x00400000
+#define        RATR_MCS11                              0x00800000
+#define        RATR_MCS12                              0x01000000
+#define        RATR_MCS13                              0x02000000
+#define        RATR_MCS14                              0x04000000
+#define        RATR_MCS15                              0x08000000
+
+#define RATE_1M                                        BIT(0)
+#define RATE_2M                                        BIT(1)
+#define RATE_5_5M                              BIT(2)
+#define RATE_11M                               BIT(3)
+#define RATE_6M                                        BIT(4)
+#define RATE_9M                                        BIT(5)
+#define RATE_12M                               BIT(6)
+#define RATE_18M                               BIT(7)
+#define RATE_24M                               BIT(8)
+#define RATE_36M                               BIT(9)
+#define RATE_48M                               BIT(10)
+#define RATE_54M                               BIT(11)
+#define RATE_MCS0                              BIT(12)
+#define RATE_MCS1                              BIT(13)
+#define RATE_MCS2                              BIT(14)
+#define RATE_MCS3                              BIT(15)
+#define RATE_MCS4                              BIT(16)
+#define RATE_MCS5                              BIT(17)
+#define RATE_MCS6                              BIT(18)
+#define RATE_MCS7                              BIT(19)
+#define RATE_MCS8                              BIT(20)
+#define RATE_MCS9                              BIT(21)
+#define RATE_MCS10                             BIT(22)
+#define RATE_MCS11                             BIT(23)
+#define RATE_MCS12                             BIT(24)
+#define RATE_MCS13                             BIT(25)
+#define RATE_MCS14                             BIT(26)
+#define RATE_MCS15                             BIT(27)
+
+#define        RATE_ALL_CCK            (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define        RATE_ALL_OFDM_AG        (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
+                                RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define        RATE_ALL_OFDM_1SS       (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
+                                RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
+                                RATR_MCS6 | RATR_MCS7)
+#define        RATE_ALL_OFDM_2SS       (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
+                                RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\
+                                RATR_MCS14 | RATR_MCS15)
+
+#define        BW_OPMODE_20MHZ                         BIT(2)
+#define        BW_OPMODE_5G                            BIT(1)
+#define        BW_OPMODE_11J                           BIT(0)
+
+#define        CAM_VALID                               BIT(15)
+#define        CAM_NOTVALID                            0x0000
+#define        CAM_USEDK                               BIT(5)
+
+#define        CAM_NONE                                0x0
+#define        CAM_WEP40                               0x01
+#define        CAM_TKIP                                0x02
+#define        CAM_AES                                 0x04
+#define        CAM_WEP104                              0x05
+
+#define        TOTAL_CAM_ENTRY                         32
+#define        HALF_CAM_ENTRY                          16
+
+#define        CAM_WRITE                               BIT(16)
+#define        CAM_READ                                0x00000000
+#define        CAM_POLLINIG                            BIT(31)
+
+#define        SCR_USEDK                               0x01
+#define        SCR_TXSEC_ENABLE                        0x02
+#define        SCR_RXSEC_ENABLE                        0x04
+
+#define        WOW_PMEN                                BIT(0)
+#define        WOW_WOMEN                               BIT(1)
+#define        WOW_MAGIC                               BIT(2)
+#define        WOW_UWF                                 BIT(3)
+
+/*********************************************
+*       8723BE IMR/ISR bits
+**********************************************/
+#define        IMR_DISABLED                            0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define        IMR_TXCCK               BIT(30) /* TXRPT interrupt when
+                                        * CCX bit of the packet is set
+                                        */
+#define        IMR_PSTIMEOUT           BIT(29) /* Power Save Time Out Interrupt */
+#define        IMR_GTINT4              BIT(28) /* When GTIMER4 expires,
+                                        * this bit is set to 1
+                                        */
+#define        IMR_GTINT3              BIT(27) /* When GTIMER3 expires,
+                                        * this bit is set to 1
+                                        */
+#define        IMR_TBDER               BIT(26) /* Transmit Beacon0 Error */
+#define        IMR_TBDOK               BIT(25) /* Transmit Beacon0 OK */
+#define        IMR_TSF_BIT32_TOGGLE    BIT(24) /* TSF Timer BIT32 toggle
+                                        * indication interrupt
+                                        */
+#define        IMR_BCNDMAINT0          BIT(20) /* Beacon DMA Interrupt 0 */
+#define        IMR_BCNDOK0             BIT(16) /* Beacon Queue DMA OK0 */
+#define        IMR_HSISR_IND_ON_INT    BIT(15) /* HSISR Indicator (HSIMR & HSISR is
+                                        * true, this bit is set to 1)
+                                        */
+#define        IMR_BCNDMAINT_E         BIT(14) /* Beacon DMA Interrupt
+                                        * Extension for Win7
+                                        */
+#define        IMR_ATIMEND             BIT(12) /* CTWidnow End or ATIM Window End */
+#define        IMR_HISR1_IND_INT       BIT(11) /* HISR1 Indicator (HISR1 & HIMR1 is
+                                        * true, this bit is set to 1)
+                                        */
+#define        IMR_C2HCMD              BIT(10) /* CPU to Host Command INT Status,
+                                        * Write 1 clear
+                                        */
+#define        IMR_CPWM2               BIT(9)  /* CPU power Mode exchange INT Status,
+                                        * Write 1 clear
+                                        */
+#define        IMR_CPWM                BIT(8)  /* CPU power Mode exchange INT Status,
+                                        * Write 1 clear
+                                        */
+#define        IMR_HIGHDOK             BIT(7)  /* High Queue DMA OK */
+#define        IMR_MGNTDOK             BIT(6)  /* Management Queue DMA OK */
+#define        IMR_BKDOK               BIT(5)  /* AC_BK DMA OK */
+#define        IMR_BEDOK               BIT(4)  /* AC_BE DMA OK */
+#define        IMR_VIDOK               BIT(3)  /* AC_VI DMA OK */
+#define        IMR_VODOK               BIT(2)  /* AC_VO DMA OK */
+#define        IMR_RDU                 BIT(1)  /* Rx Descriptor Unavailable */
+#define        IMR_ROK                 BIT(0)  /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define        IMR_BCNDMAINT7          BIT(27) /* Beacon DMA Interrupt 7 */
+#define        IMR_BCNDMAINT6          BIT(26) /* Beacon DMA Interrupt 6 */
+#define        IMR_BCNDMAINT5          BIT(25) /* Beacon DMA Interrupt 5 */
+#define        IMR_BCNDMAINT4          BIT(24) /* Beacon DMA Interrupt 4 */
+#define        IMR_BCNDMAINT3          BIT(23) /* Beacon DMA Interrupt 3 */
+#define        IMR_BCNDMAINT2          BIT(22) /* Beacon DMA Interrupt 2 */
+#define        IMR_BCNDMAINT1          BIT(21) /* Beacon DMA Interrupt 1 */
+#define        IMR_BCNDOK7             BIT(20) /* Beacon Queue DMA OK Interrup 7 */
+#define        IMR_BCNDOK6             BIT(19) /* Beacon Queue DMA OK Interrup 6 */
+#define        IMR_BCNDOK5             BIT(18) /* Beacon Queue DMA OK Interrup 5 */
+#define        IMR_BCNDOK4             BIT(17) /* Beacon Queue DMA OK Interrup 4 */
+#define        IMR_BCNDOK3             BIT(16) /* Beacon Queue DMA OK Interrup 3 */
+#define        IMR_BCNDOK2             BIT(15) /* Beacon Queue DMA OK Interrup 2 */
+#define        IMR_BCNDOK1             BIT(14) /* Beacon Queue DMA OK Interrup 1 */
+#define        IMR_ATIMEND_E           BIT(13) /* ATIM Window End Extension for Win7 */
+#define        IMR_TXERR               BIT(11) /* Tx Error Flag Interrupt Status,
+                                        * write 1 clear.
+                                        */
+#define        IMR_RXERR               BIT(10) /* Rx Error Flag INT Status,
+                                        * Write 1 clear
+                                        */
+#define        IMR_TXFOVW              BIT(9)  /* Transmit FIFO Overflow */
+#define        IMR_RXFOVW              BIT(8)  /* Receive FIFO Overflow */
+
+#define        HWSET_MAX_SIZE                  512
+#define EFUSE_MAX_SECTION              64
+#define EFUSE_REAL_CONTENT_LEN         256
+#define EFUSE_OOB_PROTECT_BYTES                18 /* PG data exclude header,
+                                           * dummy 7 bytes frome CP test
+                                           * and reserved 1byte.
+                                           */
+
+#define        EEPROM_DEFAULT_TSSI                     0x0
+#define EEPROM_DEFAULT_TXPOWERDIFF             0x0
+#define EEPROM_DEFAULT_CRYSTALCAP              0x5
+#define EEPROM_DEFAULT_BOARDTYPE               0x02
+#define EEPROM_DEFAULT_TXPOWER                 0x1010
+#define        EEPROM_DEFAULT_HT2T_TXPWR               0x10
+
+#define        EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF      0x3
+#define        EEPROM_DEFAULT_THERMALMETER             0x18
+#define        EEPROM_DEFAULT_ANTTXPOWERDIFF           0x0
+#define        EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP      0x5
+#define        EEPROM_DEFAULT_TXPOWERLEVEL             0x22
+#define        EEPROM_DEFAULT_HT40_2SDIFF              0x0
+#define EEPROM_DEFAULT_HT20_DIFF               2
+#define        EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF      0x3
+#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET       0
+#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET       0
+
+#define RF_OPTION1                             0x79
+#define RF_OPTION2                             0x7A
+#define RF_OPTION3                             0x7B
+#define RF_OPTION4                             0xC3
+
+#define EEPROM_DEFAULT_PID                     0x1234
+#define EEPROM_DEFAULT_VID                     0x5678
+#define EEPROM_DEFAULT_CUSTOMERID              0xAB
+#define EEPROM_DEFAULT_SUBCUSTOMERID           0xCD
+#define EEPROM_DEFAULT_VERSION                 0
+
+#define        EEPROM_CHANNEL_PLAN_FCC                 0x0
+#define        EEPROM_CHANNEL_PLAN_IC                  0x1
+#define        EEPROM_CHANNEL_PLAN_ETSI                0x2
+#define        EEPROM_CHANNEL_PLAN_SPAIN               0x3
+#define        EEPROM_CHANNEL_PLAN_FRANCE              0x4
+#define        EEPROM_CHANNEL_PLAN_MKK                 0x5
+#define        EEPROM_CHANNEL_PLAN_MKK1                0x6
+#define        EEPROM_CHANNEL_PLAN_ISRAEL              0x7
+#define        EEPROM_CHANNEL_PLAN_TELEC               0x8
+#define        EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN       0x9
+#define        EEPROM_CHANNEL_PLAN_WORLD_WIDE_13       0xA
+#define        EEPROM_CHANNEL_PLAN_NCC                 0xB
+#define        EEPROM_CHANNEL_PLAN_BY_HW_MASK          0x80
+
+#define EEPROM_CID_DEFAULT                     0x0
+#define EEPROM_CID_TOSHIBA                     0x4
+#define        EEPROM_CID_CCX                          0x10
+#define        EEPROM_CID_QMI                          0x0D
+#define EEPROM_CID_WHQL                                0xFE
+
+#define        RTL8723BE_EEPROM_ID                     0x8129
+
+#define EEPROM_HPON                            0x02
+#define EEPROM_CLK                             0x06
+#define EEPROM_TESTR                           0x08
+
+
+#define EEPROM_TXPOWERCCK                      0x10
+#define        EEPROM_TXPOWERHT40_1S                   0x16
+#define EEPROM_TXPOWERHT20DIFF                 0x1B
+#define EEPROM_TXPOWER_OFDMDIFF                        0x1B
+
+
+
+#define        EEPROM_TX_PWR_INX                       0x10
+
+#define        EEPROM_CHANNELPLAN                      0xB8
+#define        EEPROM_XTAL_8723BE                      0xB9
+#define        EEPROM_THERMAL_METER_88E                0xBA
+#define        EEPROM_IQK_LCK_88E                      0xBB
+
+#define        EEPROM_RF_BOARD_OPTION_88E              0xC1
+#define        EEPROM_RF_FEATURE_OPTION_88E            0xC2
+#define        EEPROM_RF_BT_SETTING_88E                0xC3
+#define        EEPROM_VERSION                          0xC4
+#define        EEPROM_CUSTOMER_ID                      0xC5
+#define        EEPROM_RF_ANTENNA_OPT_88E               0xC9
+
+#define        EEPROM_MAC_ADDR                         0xD0
+#define EEPROM_VID                             0xD6
+#define EEPROM_DID                             0xD8
+#define EEPROM_SVID                            0xDA
+#define EEPROM_SMID                            0xDC
+
+#define        STOPBECON                               BIT(6)
+#define        STOPHIGHT                               BIT(5)
+#define        STOPMGT                                 BIT(4)
+#define        STOPVO                                  BIT(3)
+#define        STOPVI                                  BIT(2)
+#define        STOPBE                                  BIT(1)
+#define        STOPBK                                  BIT(0)
+
+#define        RCR_APPFCS                              BIT(31)
+#define        RCR_APP_MIC                             BIT(30)
+#define        RCR_APP_ICV                             BIT(29)
+#define        RCR_APP_PHYST_RXFF                      BIT(28)
+#define        RCR_APP_BA_SSN                          BIT(27)
+#define        RCR_ENMBID                              BIT(24)
+#define        RCR_LSIGEN                              BIT(23)
+#define        RCR_MFBEN                               BIT(22)
+#define        RCR_HTC_LOC_CTRL                        BIT(14)
+#define        RCR_AMF                                 BIT(13)
+#define        RCR_ACF                                 BIT(12)
+#define        RCR_ADF                                 BIT(11)
+#define        RCR_AICV                                BIT(9)
+#define        RCR_ACRC32                              BIT(8)
+#define        RCR_CBSSID_BCN                          BIT(7)
+#define        RCR_CBSSID_DATA                         BIT(6)
+#define        RCR_CBSSID                              RCR_CBSSID_DATA
+#define        RCR_APWRMGT                             BIT(5)
+#define        RCR_ADD3                                BIT(4)
+#define        RCR_AB                                  BIT(3)
+#define        RCR_AM                                  BIT(2)
+#define        RCR_APM                                 BIT(1)
+#define        RCR_AAP                                 BIT(0)
+#define        RCR_MXDMA_OFFSET                        8
+#define        RCR_FIFO_OFFSET                         13
+
+#define RSV_CTRL                               0x001C
+#define RD_CTRL                                        0x0524
+
+#define REG_USB_INFO                           0xFE17
+#define REG_USB_SPECIAL_OPTION                 0xFE55
+#define REG_USB_DMA_AGG_TO                     0xFE5B
+#define REG_USB_AGG_TO                         0xFE5C
+#define REG_USB_AGG_TH                         0xFE5D
+
+#define REG_USB_VID                            0xFE60
+#define REG_USB_PID                            0xFE62
+#define REG_USB_OPTIONAL                       0xFE64
+#define REG_USB_CHIRP_K                                0xFE65
+#define REG_USB_PHY                            0xFE66
+#define REG_USB_MAC_ADDR                       0xFE70
+#define REG_USB_HRPWM                          0xFE58
+#define REG_USB_HCPWM                          0xFE57
+
+#define SW18_FPWM                              BIT(3)
+
+#define ISO_MD2PP                              BIT(0)
+#define ISO_UA2USB                             BIT(1)
+#define ISO_UD2CORE                            BIT(2)
+#define ISO_PA2PCIE                            BIT(3)
+#define ISO_PD2CORE                            BIT(4)
+#define ISO_IP2MAC                             BIT(5)
+#define ISO_DIOP                               BIT(6)
+#define ISO_DIOE                               BIT(7)
+#define ISO_EB2CORE                            BIT(8)
+#define ISO_DIOR                               BIT(9)
+
+#define PWC_EV25V                              BIT(14)
+#define PWC_EV12V                              BIT(15)
+
+#define FEN_BBRSTB                             BIT(0)
+#define FEN_BB_GLB_RSTN                                BIT(1)
+#define FEN_USBA                               BIT(2)
+#define FEN_UPLL                               BIT(3)
+#define FEN_USBD                               BIT(4)
+#define FEN_DIO_PCIE                           BIT(5)
+#define FEN_PCIEA                              BIT(6)
+#define FEN_PPLL                               BIT(7)
+#define FEN_PCIED                              BIT(8)
+#define FEN_DIOE                               BIT(9)
+#define FEN_CPUEN                              BIT(10)
+#define FEN_DCORE                              BIT(11)
+#define FEN_ELDR                               BIT(12)
+#define FEN_DIO_RF                             BIT(13)
+#define FEN_HWPDN                              BIT(14)
+#define FEN_MREGEN                             BIT(15)
+
+#define PFM_LDALL                              BIT(0)
+#define PFM_ALDN                               BIT(1)
+#define PFM_LDKP                               BIT(2)
+#define PFM_WOWL                               BIT(3)
+#define ENPDN                                  BIT(4)
+#define PDN_PL                                 BIT(5)
+#define APFM_ONMAC                             BIT(8)
+#define APFM_OFF                               BIT(9)
+#define APFM_RSM                               BIT(10)
+#define AFSM_HSUS                              BIT(11)
+#define AFSM_PCIE                              BIT(12)
+#define APDM_MAC                               BIT(13)
+#define APDM_HOST                              BIT(14)
+#define APDM_HPDN                              BIT(15)
+#define RDY_MACON                              BIT(16)
+#define SUS_HOST                               BIT(17)
+#define ROP_ALD                                        BIT(20)
+#define ROP_PWR                                        BIT(21)
+#define ROP_SPS                                        BIT(22)
+#define SOP_MRST                               BIT(25)
+#define SOP_FUSE                               BIT(26)
+#define SOP_ABG                                        BIT(27)
+#define SOP_AMB                                        BIT(28)
+#define SOP_RCK                                        BIT(29)
+#define SOP_A8M                                        BIT(30)
+#define XOP_BTCK                               BIT(31)
+
+#define ANAD16V_EN                             BIT(0)
+#define ANA8M                                  BIT(1)
+#define MACSLP                                 BIT(4)
+#define LOADER_CLK_EN                          BIT(5)
+#define _80M_SSC_DIS                           BIT(7)
+#define _80M_SSC_EN_HO                         BIT(8)
+#define PHY_SSC_RSTB                           BIT(9)
+#define SEC_CLK_EN                             BIT(10)
+#define MAC_CLK_EN                             BIT(11)
+#define SYS_CLK_EN                             BIT(12)
+#define RING_CLK_EN                            BIT(13)
+
+#define        BOOT_FROM_EEPROM                        BIT(4)
+#define        EEPROM_EN                               BIT(5)
+
+#define AFE_BGEN                               BIT(0)
+#define AFE_MBEN                               BIT(1)
+#define MAC_ID_EN                              BIT(7)
+
+#define WLOCK_ALL                              BIT(0)
+#define WLOCK_00                               BIT(1)
+#define WLOCK_04                               BIT(2)
+#define WLOCK_08                               BIT(3)
+#define WLOCK_40                               BIT(4)
+#define R_DIS_PRST_0                           BIT(5)
+#define R_DIS_PRST_1                           BIT(6)
+#define LOCK_ALL_EN                            BIT(7)
+
+#define RF_EN                                  BIT(0)
+#define RF_RSTB                                        BIT(1)
+#define RF_SDMRSTB                             BIT(2)
+
+#define LDA15_EN                               BIT(0)
+#define LDA15_STBY                             BIT(1)
+#define LDA15_OBUF                             BIT(2)
+#define LDA15_REG_VOS                          BIT(3)
+#define _LDA15_VOADJ(x)                                (((x) & 0x7) << 4)
+
+#define LDV12_EN                               BIT(0)
+#define LDV12_SDBY                             BIT(1)
+#define LPLDO_HSM                              BIT(2)
+#define LPLDO_LSM_DIS                          BIT(3)
+#define _LDV12_VADJ(x)                         (((x) & 0xF) << 4)
+
+#define XTAL_EN                                        BIT(0)
+#define XTAL_BSEL                              BIT(1)
+#define _XTAL_BOSC(x)                          (((x) & 0x3) << 2)
+#define _XTAL_CADJ(x)                          (((x) & 0xF) << 4)
+#define XTAL_GATE_USB                          BIT(8)
+#define _XTAL_USB_DRV(x)                       (((x) & 0x3) << 9)
+#define XTAL_GATE_AFE                          BIT(11)
+#define _XTAL_AFE_DRV(x)                       (((x) & 0x3) << 12)
+#define XTAL_RF_GATE                           BIT(14)
+#define _XTAL_RF_DRV(x)                                (((x) & 0x3) << 15)
+#define XTAL_GATE_DIG                          BIT(17)
+#define _XTAL_DIG_DRV(x)                       (((x) & 0x3) << 18)
+#define XTAL_BT_GATE                           BIT(20)
+#define _XTAL_BT_DRV(x)                                (((x) & 0x3) << 21)
+#define _XTAL_GPIO(x)                          (((x) & 0x7) << 23)
+
+#define CKDLY_AFE                              BIT(26)
+#define CKDLY_USB                              BIT(27)
+#define CKDLY_DIG                              BIT(28)
+#define CKDLY_BT                               BIT(29)
+
+#define APLL_EN                                        BIT(0)
+#define APLL_320_EN                            BIT(1)
+#define APLL_FREF_SEL                          BIT(2)
+#define APLL_EDGE_SEL                          BIT(3)
+#define APLL_WDOGB                             BIT(4)
+#define APLL_LPFEN                             BIT(5)
+
+#define APLL_REF_CLK_13MHZ                     0x1
+#define APLL_REF_CLK_19_2MHZ                   0x2
+#define APLL_REF_CLK_20MHZ                     0x3
+#define APLL_REF_CLK_25MHZ                     0x4
+#define APLL_REF_CLK_26MHZ                     0x5
+#define APLL_REF_CLK_38_4MHZ                   0x6
+#define APLL_REF_CLK_40MHZ                     0x7
+
+#define APLL_320EN                             BIT(14)
+#define APLL_80EN                              BIT(15)
+#define APLL_1MEN                              BIT(24)
+
+#define ALD_EN                                 BIT(18)
+#define EF_PD                                  BIT(19)
+#define EF_FLAG                                        BIT(31)
+
+#define EF_TRPT                                        BIT(7)
+#define LDOE25_EN                              BIT(31)
+
+#define RSM_EN                                 BIT(0)
+#define TIMER_EN                               BIT(4)
+
+#define TRSW0EN                                        BIT(2)
+#define TRSW1EN                                        BIT(3)
+#define EROM_EN                                        BIT(4)
+#define ENBT                                   BIT(5)
+#define ENUART                                 BIT(8)
+#define UART_910                               BIT(9)
+#define ENPMAC                                 BIT(10)
+#define SIC_SWRST                              BIT(11)
+#define ENSIC                                  BIT(12)
+#define SIC_23                                 BIT(13)
+#define ENHDP                                  BIT(14)
+#define SIC_LBK                                        BIT(15)
+
+#define LED0PL                                 BIT(4)
+#define LED1PL                                 BIT(12)
+#define LED0DIS                                        BIT(7)
+
+#define MCUFWDL_EN                             BIT(0)
+#define MCUFWDL_RDY                            BIT(1)
+#define FWDL_CHKSUM_RPT                                BIT(2)
+#define MACINI_RDY                             BIT(3)
+#define BBINI_RDY                              BIT(4)
+#define RFINI_RDY                              BIT(5)
+#define WINTINI_RDY                            BIT(6)
+#define CPRST                                  BIT(23)
+
+#define XCLK_VLD                               BIT(0)
+#define ACLK_VLD                               BIT(1)
+#define UCLK_VLD                               BIT(2)
+#define PCLK_VLD                               BIT(3)
+#define PCIRSTB                                        BIT(4)
+#define V15_VLD                                        BIT(5)
+#define TRP_B15V_EN                            BIT(7)
+#define SIC_IDLE                               BIT(8)
+#define BD_MAC2                                        BIT(9)
+#define BD_MAC1                                        BIT(10)
+#define IC_MACPHY_MODE                         BIT(11)
+#define VENDOR_ID                              BIT(19)
+#define PAD_HWPD_IDN                           BIT(22)
+#define TRP_VAUX_EN                            BIT(23)
+#define TRP_BT_EN                              BIT(24)
+#define BD_PKG_SEL                             BIT(25)
+#define BD_HCI_SEL                             BIT(26)
+#define TYPE_ID                                        BIT(27)
+
+#define CHIP_VER_RTL_MASK                      0xF000
+#define CHIP_VER_RTL_SHIFT                     12
+
+#define REG_LBMODE                             (REG_CR + 3)
+
+#define HCI_TXDMA_EN                           BIT(0)
+#define HCI_RXDMA_EN                           BIT(1)
+#define TXDMA_EN                               BIT(2)
+#define RXDMA_EN                               BIT(3)
+#define PROTOCOL_EN                            BIT(4)
+#define SCHEDULE_EN                            BIT(5)
+#define MACTXEN                                        BIT(6)
+#define MACRXEN                                        BIT(7)
+#define ENSWBCN                                        BIT(8)
+#define ENSEC                                  BIT(9)
+
+#define _NETTYPE(x)                            (((x) & 0x3) << 16)
+#define MASK_NETTYPE                           0x30000
+#define NT_NO_LINK                             0x0
+#define NT_LINK_AD_HOC                         0x1
+#define NT_LINK_AP                             0x2
+#define NT_AS_AP                               0x3
+
+#define _LBMODE(x)                             (((x) & 0xF) << 24)
+#define MASK_LBMODE                            0xF000000
+#define LOOPBACK_NORMAL                                0x0
+#define LOOPBACK_IMMEDIATELY                   0xB
+#define LOOPBACK_MAC_DELAY                     0x3
+#define LOOPBACK_PHY                           0x1
+#define LOOPBACK_DMA                           0x7
+
+#define GET_RX_PAGE_SIZE(value)                        ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value)                        (((value) & 0xF0) >> 4)
+#define _PSRX_MASK                             0xF
+#define _PSTX_MASK                             0xF0
+#define _PSRX(x)                               (x)
+#define _PSTX(x)                               ((x) << 4)
+
+#define PBP_64                                 0x0
+#define PBP_128                                        0x1
+#define PBP_256                                        0x2
+#define PBP_512                                        0x3
+#define PBP_1024                               0x4
+
+#define RXDMA_ARBBW_EN                         BIT(0)
+#define RXSHFT_EN                              BIT(1)
+#define RXDMA_AGG_EN                           BIT(2)
+#define QS_VO_QUEUE                            BIT(8)
+#define QS_VI_QUEUE                            BIT(9)
+#define QS_BE_QUEUE                            BIT(10)
+#define QS_BK_QUEUE                            BIT(11)
+#define QS_MANAGER_QUEUE                       BIT(12)
+#define QS_HIGH_QUEUE                          BIT(13)
+
+#define HQSEL_VOQ                              BIT(0)
+#define HQSEL_VIQ                              BIT(1)
+#define HQSEL_BEQ                              BIT(2)
+#define HQSEL_BKQ                              BIT(3)
+#define HQSEL_MGTQ                             BIT(4)
+#define HQSEL_HIQ                              BIT(5)
+
+#define _TXDMA_HIQ_MAP(x)                      (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x)                      (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x)                      (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x)                      (((x)&0x3) << 8)
+#define _TXDMA_VIQ_MAP(x)                      (((x)&0x3) << 6)
+#define _TXDMA_VOQ_MAP(x)                      (((x)&0x3) << 4)
+
+#define QUEUE_LOW                              1
+#define QUEUE_NORMAL                           2
+#define QUEUE_HIGH                             3
+
+#define _LLT_NO_ACTIVE                         0x0
+#define _LLT_WRITE_ACCESS                      0x1
+#define _LLT_READ_ACCESS                       0x2
+
+#define _LLT_INIT_DATA(x)                      ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x)                      (((x) & 0xFF) << 8)
+#define _LLT_OP(x)                             (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x)                       (((x) >> 30) & 0x3)
+
+#define BB_WRITE_READ_MASK                     (BIT(31) | BIT(30))
+#define BB_WRITE_EN                            BIT(30)
+#define BB_READ_EN                             BIT(31)
+
+#define _HPQ(x)                                        ((x) & 0xFF)
+#define _LPQ(x)                                        (((x) & 0xFF) << 8)
+#define _PUBQ(x)                               (((x) & 0xFF) << 16)
+#define _NPQ(x)                                        ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS                         BIT(24)
+#define LPQ_PUBLIC_DIS                         BIT(25)
+#define LD_RQPN                                        BIT(31)
+
+#define BCN_VALID                              BIT(16)
+#define BCN_HEAD(x)                            (((x) & 0xFF) << 8)
+#define        BCN_HEAD_MASK                           0xFF00
+
+#define BLK_DESC_NUM_SHIFT                     4
+#define BLK_DESC_NUM_MASK                      0xF
+
+#define DROP_DATA_EN                           BIT(9)
+
+#define EN_AMPDU_RTY_NEW                       BIT(7)
+
+#define _INIRTSMCS_SEL(x)                      ((x) & 0x3F)
+
+#define _SPEC_SIFS_CCK(x)                      ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x)                     (((x) & 0xFF) << 8)
+
+#define RATE_REG_BITMAP_ALL                    0xFFFFF
+
+#define _RRSC_BITMAP(x)                                ((x) & 0xFFFFF)
+
+#define _RRSR_RSC(x)                           (((x) & 0x3) << 21)
+#define RRSR_RSC_RESERVED                      0x0
+#define RRSR_RSC_UPPER_SUBCHANNEL              0x1
+#define RRSR_RSC_LOWER_SUBCHANNEL              0x2
+#define RRSR_RSC_DUPLICATE_MODE                        0x3
+
+#define USE_SHORT_G1                           BIT(20)
+
+#define _AGGLMT_MCS0(x)                                ((x) & 0xF)
+#define _AGGLMT_MCS1(x)                                (((x) & 0xF) << 4)
+#define _AGGLMT_MCS2(x)                                (((x) & 0xF) << 8)
+#define _AGGLMT_MCS3(x)                                (((x) & 0xF) << 12)
+#define _AGGLMT_MCS4(x)                                (((x) & 0xF) << 16)
+#define _AGGLMT_MCS5(x)                                (((x) & 0xF) << 20)
+#define _AGGLMT_MCS6(x)                                (((x) & 0xF) << 24)
+#define _AGGLMT_MCS7(x)                                (((x) & 0xF) << 28)
+
+#define        RETRY_LIMIT_SHORT_SHIFT                 8
+#define        RETRY_LIMIT_LONG_SHIFT                  0
+
+#define _DARF_RC1(x)                           ((x) & 0x1F)
+#define _DARF_RC2(x)                           (((x) & 0x1F) << 8)
+#define _DARF_RC3(x)                           (((x) & 0x1F) << 16)
+#define _DARF_RC4(x)                           (((x) & 0x1F) << 24)
+#define _DARF_RC5(x)                           ((x) & 0x1F)
+#define _DARF_RC6(x)                           (((x) & 0x1F) << 8)
+#define _DARF_RC7(x)                           (((x) & 0x1F) << 16)
+#define _DARF_RC8(x)                           (((x) & 0x1F) << 24)
+
+#define _RARF_RC1(x)                           ((x) & 0x1F)
+#define _RARF_RC2(x)                           (((x) & 0x1F) << 8)
+#define _RARF_RC3(x)                           (((x) & 0x1F) << 16)
+#define _RARF_RC4(x)                           (((x) & 0x1F) << 24)
+#define _RARF_RC5(x)                           ((x) & 0x1F)
+#define _RARF_RC6(x)                           (((x) & 0x1F) << 8)
+#define _RARF_RC7(x)                           (((x) & 0x1F) << 16)
+#define _RARF_RC8(x)                           (((x) & 0x1F) << 24)
+
+#define AC_PARAM_TXOP_LIMIT_OFFSET             16
+#define AC_PARAM_ECW_MAX_OFFSET                        12
+#define AC_PARAM_ECW_MIN_OFFSET                        8
+#define AC_PARAM_AIFS_OFFSET                   0
+
+#define _AIFS(x)                               (x)
+#define _ECW_MAX_MIN(x)                                ((x) << 8)
+#define _TXOP_LIMIT(x)                         ((x) << 16)
+
+#define _BCNIFS(x)                             ((x) & 0xFF)
+#define _BCNECW(x)                             ((((x) & 0xF)) << 8)
+
+#define _LRL(x)                                        ((x) & 0x3F)
+#define _SRL(x)                                        (((x) & 0x3F) << 8)
+
+#define _SIFS_CCK_CTX(x)                       ((x) & 0xFF)
+#define _SIFS_CCK_TRX(x)                       (((x) & 0xFF) << 8)
+
+#define _SIFS_OFDM_CTX(x)                      ((x) & 0xFF)
+#define _SIFS_OFDM_TRX(x)                      (((x) & 0xFF) << 8)
+
+#define _TBTT_PROHIBIT_HOLD(x)                 (((x) & 0xFF) << 8)
+
+#define DIS_EDCA_CNT_DWN                       BIT(11)
+
+#define EN_MBSSID                              BIT(1)
+#define EN_TXBCN_RPT                           BIT(2)
+#define        EN_BCN_FUNCTION                         BIT(3)
+
+#define TSFTR_RST                              BIT(0)
+#define TSFTR1_RST                             BIT(1)
+
+#define STOP_BCNQ                              BIT(6)
+
+#define        DIS_TSF_UDT0_NORMAL_CHIP                BIT(4)
+#define        DIS_TSF_UDT0_TEST_CHIP                  BIT(5)
+
+#define        ACMHW_HWEN                              BIT(0)
+#define        ACMHW_BEQEN                             BIT(1)
+#define        ACMHW_VIQEN                             BIT(2)
+#define        ACMHW_VOQEN                             BIT(3)
+#define        ACMHW_BEQSTATUS                         BIT(4)
+#define        ACMHW_VIQSTATUS                         BIT(5)
+#define        ACMHW_VOQSTATUS                         BIT(6)
+
+#define APSDOFF                                        BIT(6)
+#define APSDOFF_STATUS                         BIT(7)
+
+#define BW_20MHZ                               BIT(2)
+
+#define RATE_BITMAP_ALL                                0xFFFFF
+
+#define RATE_RRSR_CCK_ONLY_1M                  0xFFFF1
+
+#define TSFRST                                 BIT(0)
+#define DIS_GCLK                               BIT(1)
+#define PAD_SEL                                        BIT(2)
+#define PWR_ST                                 BIT(6)
+#define PWRBIT_OW_EN                           BIT(7)
+#define ACRC                                   BIT(8)
+#define CFENDFORM                              BIT(9)
+#define ICV                                    BIT(10)
+
+#define AAP                                    BIT(0)
+#define APM                                    BIT(1)
+#define AM                                     BIT(2)
+#define AB                                     BIT(3)
+#define ADD3                                   BIT(4)
+#define APWRMGT                                        BIT(5)
+#define CBSSID                                 BIT(6)
+#define CBSSID_DATA                            BIT(6)
+#define CBSSID_BCN                             BIT(7)
+#define ACRC32                                 BIT(8)
+#define AICV                                   BIT(9)
+#define ADF                                    BIT(11)
+#define ACF                                    BIT(12)
+#define AMF                                    BIT(13)
+#define HTC_LOC_CTRL                           BIT(14)
+#define UC_DATA_EN                             BIT(16)
+#define BM_DATA_EN                             BIT(17)
+#define MFBEN                                  BIT(22)
+#define LSIGEN                                 BIT(23)
+#define ENMBID                                 BIT(24)
+#define APP_BASSN                              BIT(27)
+#define APP_PHYSTS                             BIT(28)
+#define APP_ICV                                        BIT(29)
+#define APP_MIC                                        BIT(30)
+#define APP_FCS                                        BIT(31)
+
+#define _MIN_SPACE(x)                  ((x) & 0x7)
+#define _SHORT_GI_PADDING(x)           (((x) & 0x1F) << 3)
+
+#define RXERR_TYPE_OFDM_PPDU           0
+#define RXERR_TYPE_OFDM_FALSE_ALARM    1
+#define        RXERR_TYPE_OFDM_MPDU_OK         2
+#define RXERR_TYPE_OFDM_MPDU_FAIL      3
+#define RXERR_TYPE_CCK_PPDU            4
+#define RXERR_TYPE_CCK_FALSE_ALARM     5
+#define RXERR_TYPE_CCK_MPDU_OK         6
+#define RXERR_TYPE_CCK_MPDU_FAIL       7
+#define RXERR_TYPE_HT_PPDU             8
+#define RXERR_TYPE_HT_FALSE_ALARM      9
+#define RXERR_TYPE_HT_MPDU_TOTAL       10
+#define RXERR_TYPE_HT_MPDU_OK          11
+#define RXERR_TYPE_HT_MPDU_FAIL                12
+#define RXERR_TYPE_RX_FULL_DROP                15
+
+#define RXERR_COUNTER_MASK             0xFFFFF
+#define RXERR_RPT_RST                  BIT(27)
+#define _RXERR_RPT_SEL(type)           ((type) << 28)
+
+#define        SCR_TXUSEDK                     BIT(0)
+#define        SCR_RXUSEDK                     BIT(1)
+#define        SCR_TXENCENABLE                 BIT(2)
+#define        SCR_RXDECENABLE                 BIT(3)
+#define        SCR_SKBYA2                      BIT(4)
+#define        SCR_NOSKMC                      BIT(5)
+#define SCR_TXBCUSEDK                  BIT(6)
+#define SCR_RXBCUSEDK                  BIT(7)
+
+#define XCLK_VLD                       BIT(0)
+#define ACLK_VLD                       BIT(1)
+#define UCLK_VLD                       BIT(2)
+#define PCLK_VLD                       BIT(3)
+#define PCIRSTB                                BIT(4)
+#define V15_VLD                                BIT(5)
+#define TRP_B15V_EN                    BIT(7)
+#define SIC_IDLE                       BIT(8)
+#define BD_MAC2                                BIT(9)
+#define BD_MAC1                                BIT(10)
+#define IC_MACPHY_MODE                 BIT(11)
+#define BT_FUNC                                BIT(16)
+#define VENDOR_ID                      BIT(19)
+#define PAD_HWPD_IDN                   BIT(22)
+#define TRP_VAUX_EN                    BIT(23)
+#define TRP_BT_EN                      BIT(24)
+#define BD_PKG_SEL                     BIT(25)
+#define BD_HCI_SEL                     BIT(26)
+#define TYPE_ID                                BIT(27)
+
+#define USB_IS_HIGH_SPEED              0
+#define USB_IS_FULL_SPEED              1
+#define USB_SPEED_MASK                 BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK         0xF
+#define USB_NORMAL_SIE_EP_SHIFT                4
+
+#define USB_TEST_EP_MASK               0x30
+#define USB_TEST_EP_SHIFT              4
+
+#define USB_AGG_EN                     BIT(3)
+
+#define MAC_ADDR_LEN                   6
+#define LAST_ENTRY_OF_TX_PKT_BUFFER    175/*255    88e*/
+
+#define POLLING_LLT_THRESHOLD          20
+#define POLLING_READY_TIMEOUT_COUNT    3000
+
+#define        MAX_MSS_DENSITY_2T              0x13
+#define        MAX_MSS_DENSITY_1T              0x0A
+
+#define EPROM_CMD_OPERATING_MODE_MASK  ((1<<7)|(1<<6))
+#define EPROM_CMD_CONFIG               0x3
+#define EPROM_CMD_LOAD                 1
+
+#define        HWSET_MAX_SIZE_92S              HWSET_MAX_SIZE
+
+#define        HAL_8192C_HW_GPIO_WPS_BIT       BIT(2)
+
+#define        RPMAC_RESET                     0x100
+#define        RPMAC_TXSTART                   0x104
+#define        RPMAC_TXLEGACYSIG               0x108
+#define        RPMAC_TXHTSIG1                  0x10c
+#define        RPMAC_TXHTSIG2                  0x110
+#define        RPMAC_PHYDEBUG                  0x114
+#define        RPMAC_TXPACKETNUM               0x118
+#define        RPMAC_TXIDLE                    0x11c
+#define        RPMAC_TXMACHEADER0              0x120
+#define        RPMAC_TXMACHEADER1              0x124
+#define        RPMAC_TXMACHEADER2              0x128
+#define        RPMAC_TXMACHEADER3              0x12c
+#define        RPMAC_TXMACHEADER4              0x130
+#define        RPMAC_TXMACHEADER5              0x134
+#define        RPMAC_TXDADATYPE                0x138
+#define        RPMAC_TXRANDOMSEED              0x13c
+#define        RPMAC_CCKPLCPPREAMBLE           0x140
+#define        RPMAC_CCKPLCPHEADER             0x144
+#define        RPMAC_CCKCRC16                  0x148
+#define        RPMAC_OFDMRXCRC32OK             0x170
+#define        RPMAC_OFDMRXCRC32ER             0x174
+#define        RPMAC_OFDMRXPARITYER            0x178
+#define        RPMAC_OFDMRXCRC8ER              0x17c
+#define        RPMAC_CCKCRXRC16ER              0x180
+#define        RPMAC_CCKCRXRC32ER              0x184
+#define        RPMAC_CCKCRXRC32OK              0x188
+#define        RPMAC_TXSTATUS                  0x18c
+
+#define        RFPGA0_RFMOD                    0x800
+
+#define        RFPGA0_TXINFO                   0x804
+#define        RFPGA0_PSDFUNCTION              0x808
+
+#define        RFPGA0_TXGAINSTAGE              0x80c
+
+#define        RFPGA0_RFTIMING1                0x810
+#define        RFPGA0_RFTIMING2                0x814
+
+#define        RFPGA0_XA_HSSIPARAMETER1        0x820
+#define        RFPGA0_XA_HSSIPARAMETER2        0x824
+#define        RFPGA0_XB_HSSIPARAMETER1        0x828
+#define        RFPGA0_XB_HSSIPARAMETER2        0x82c
+
+#define        RFPGA0_XA_LSSIPARAMETER         0x840
+#define        RFPGA0_XB_LSSIPARAMETER         0x844
+
+#define        RFPGA0_RFWAKEUPPARAMETER        0x850
+#define        RFPGA0_RFSLEEPUPPARAMETER       0x854
+
+#define        RFPGA0_XAB_SWITCHCONTROL        0x858
+#define        RFPGA0_XCD_SWITCHCONTROL        0x85c
+
+#define        RFPGA0_XA_RFINTERFACEOE         0x860
+#define        RFPGA0_XB_RFINTERFACEOE         0x864
+
+#define        RFPGA0_XAB_RFINTERFACESW        0x870
+#define        RFPGA0_XCD_RFINTERFACESW        0x874
+
+#define        RFPGA0_XAB_RFPARAMETER          0x878
+#define        RFPGA0_XCD_RFPARAMETER          0x87c
+
+#define        RFPGA0_ANALOGPARAMETER1         0x880
+#define        RFPGA0_ANALOGPARAMETER2         0x884
+#define        RFPGA0_ANALOGPARAMETER3         0x888
+#define        RFPGA0_ANALOGPARAMETER4         0x88c
+
+#define        RFPGA0_XA_LSSIREADBACK          0x8a0
+#define        RFPGA0_XB_LSSIREADBACK          0x8a4
+#define        RFPGA0_XC_LSSIREADBACK          0x8a8
+#define        RFPGA0_XD_LSSIREADBACK          0x8ac
+
+#define        RFPGA0_PSDREPORT                0x8b4
+#define        TRANSCEIVEA_HSPI_READBACK       0x8b8
+#define        TRANSCEIVEB_HSPI_READBACK       0x8bc
+#define        REG_SC_CNT                      0x8c4
+#define        RFPGA0_XAB_RFINTERFACERB        0x8e0
+#define        RFPGA0_XCD_RFINTERFACERB        0x8e4
+
+#define        RFPGA1_RFMOD                    0x900
+
+#define        RFPGA1_TXBLOCK                  0x904
+#define        RFPGA1_DEBUGSELECT              0x908
+#define        RFPGA1_TXINFO                   0x90c
+
+#define        RCCK0_SYSTEM                    0xa00
+
+#define        RCCK0_AFESETTING                0xa04
+#define        RCCK0_CCA                       0xa08
+
+#define        RCCK0_RXAGC1                    0xa0c
+#define        RCCK0_RXAGC2                    0xa10
+
+#define        RCCK0_RXHP                      0xa14
+
+#define        RCCK0_DSPPARAMETER1             0xa18
+#define        RCCK0_DSPPARAMETER2             0xa1c
+
+#define        RCCK0_TXFILTER1                 0xa20
+#define        RCCK0_TXFILTER2                 0xa24
+#define        RCCK0_DEBUGPORT                 0xa28
+#define        RCCK0_FALSEALARMREPORT          0xa2c
+#define        RCCK0_TRSSIREPORT               0xa50
+#define        RCCK0_RXREPORT                  0xa54
+#define        RCCK0_FACOUNTERLOWER            0xa5c
+#define        RCCK0_FACOUNTERUPPER            0xa58
+#define        RCCK0_CCA_CNT                   0xa60
+
+
+/* PageB(0xB00) */
+#define        RPDP_ANTA                       0xb00
+#define        RPDP_ANTA_4                     0xb04
+#define        RPDP_ANTA_8                     0xb08
+#define        RPDP_ANTA_C                     0xb0c
+#define        RPDP_ANTA_10                    0xb10
+#define        RPDP_ANTA_14                    0xb14
+#define        RPDP_ANTA_18                    0xb18
+#define        RPDP_ANTA_1C                    0xb1c
+#define        RPDP_ANTA_20                    0xb20
+#define        RPDP_ANTA_24                    0xb24
+
+#define        RCONFIG_PMPD_ANTA               0xb28
+#define        CONFIG_RAM64X16                 0xb2c
+
+#define        RBNDA                           0xb30
+#define        RHSSIPAR                        0xb34
+
+#define        RCONFIG_ANTA                    0xb68
+#define        RCONFIG_ANTB                    0xb6c
+
+#define        RPDP_ANTB                       0xb70
+#define        RPDP_ANTB_4                     0xb74
+#define        RPDP_ANTB_8                     0xb78
+#define        RPDP_ANTB_C                     0xb7c
+#define        RPDP_ANTB_10                    0xb80
+#define        RPDP_ANTB_14                    0xb84
+#define        RPDP_ANTB_18                    0xb88
+#define        RPDP_ANTB_1C                    0xb8c
+#define        RPDP_ANTB_20                    0xb90
+#define        RPDP_ANTB_24                    0xb94
+
+#define        RCONFIG_PMPD_ANTB               0xb98
+
+#define        RBNDB                           0xba0
+
+#define        RAPK                            0xbd8
+#define        RPM_RX0_ANTA                    0xbdc
+#define        RPM_RX1_ANTA                    0xbe0
+#define        RPM_RX2_ANTA                    0xbe4
+#define        RPM_RX3_ANTA                    0xbe8
+#define        RPM_RX0_ANTB                    0xbec
+#define        RPM_RX1_ANTB                    0xbf0
+#define        RPM_RX2_ANTB                    0xbf4
+#define        RPM_RX3_ANTB                    0xbf8
+
+/*Page C*/
+#define        ROFDM0_LSTF                     0xc00
+
+#define        ROFDM0_TRXPATHENABLE            0xc04
+#define        ROFDM0_TRMUXPAR                 0xc08
+#define        ROFDM0_TRSWISOLATION            0xc0c
+
+#define        ROFDM0_XARXAFE                  0xc10
+#define        ROFDM0_XARXIQIMBALANCE          0xc14
+#define        ROFDM0_XBRXAFE                  0xc18
+#define        ROFDM0_XBRXIQIMBALANCE          0xc1c
+#define        ROFDM0_XCRXAFE                  0xc20
+#define        ROFDM0_XCRXIQIMBANLANCE         0xc24
+#define        ROFDM0_XDRXAFE                  0xc28
+#define        ROFDM0_XDRXIQIMBALANCE          0xc2c
+
+#define        ROFDM0_RXDETECTOR1              0xc30
+#define        ROFDM0_RXDETECTOR2              0xc34
+#define        ROFDM0_RXDETECTOR3              0xc38
+#define        ROFDM0_RXDETECTOR4              0xc3c
+
+#define        ROFDM0_RXDSP                    0xc40
+#define        ROFDM0_CFOANDDAGC               0xc44
+#define        ROFDM0_CCADROPTHRESHOLD         0xc48
+#define        ROFDM0_ECCATHRESHOLD            0xc4c
+
+#define        ROFDM0_XAAGCCORE1               0xc50
+#define        ROFDM0_XAAGCCORE2               0xc54
+#define        ROFDM0_XBAGCCORE1               0xc58
+#define        ROFDM0_XBAGCCORE2               0xc5c
+#define        ROFDM0_XCAGCCORE1               0xc60
+#define        ROFDM0_XCAGCCORE2               0xc64
+#define        ROFDM0_XDAGCCORE1               0xc68
+#define        ROFDM0_XDAGCCORE2               0xc6c
+
+#define        ROFDM0_AGCPARAMETER1            0xc70
+#define        ROFDM0_AGCPARAMETER2            0xc74
+#define        ROFDM0_AGCRSSITABLE             0xc78
+#define        ROFDM0_HTSTFAGC                 0xc7c
+
+#define        ROFDM0_XATXIQIMBALANCE          0xc80
+#define        ROFDM0_XATXAFE                  0xc84
+#define        ROFDM0_XBTXIQIMBALANCE          0xc88
+#define        ROFDM0_XBTXAFE                  0xc8c
+#define        ROFDM0_XCTXIQIMBALANCE          0xc90
+#define        ROFDM0_XCTXAFE                  0xc94
+#define        ROFDM0_XDTXIQIMBALANCE          0xc98
+#define        ROFDM0_XDTXAFE                  0xc9c
+
+#define ROFDM0_RXIQEXTANTA             0xca0
+#define        ROFDM0_TXCOEFF1                 0xca4
+#define        ROFDM0_TXCOEFF2                 0xca8
+#define        ROFDM0_TXCOEFF3                 0xcac
+#define        ROFDM0_TXCOEFF4                 0xcb0
+#define        ROFDM0_TXCOEFF5                 0xcb4
+#define        ROFDM0_TXCOEFF6                 0xcb8
+
+#define        ROFDM0_RXHPPARAMETER            0xce0
+#define        ROFDM0_TXPSEUDONOISEWGT         0xce4
+#define        ROFDM0_FRAMESYNC                0xcf0
+#define        ROFDM0_DFSREPORT                0xcf4
+
+
+#define        ROFDM1_LSTF                     0xd00
+#define        ROFDM1_TRXPATHENABLE            0xd04
+
+#define        ROFDM1_CF0                      0xd08
+#define        ROFDM1_CSI1                     0xd10
+#define        ROFDM1_SBD                      0xd14
+#define        ROFDM1_CSI2                     0xd18
+#define        ROFDM1_CFOTRACKING              0xd2c
+#define        ROFDM1_TRXMESAURE1              0xd34
+#define        ROFDM1_INTFDET                  0xd3c
+#define        ROFDM1_PSEUDONOISESTATEAB       0xd50
+#define        ROFDM1_PSEUDONOISESTATECD       0xd54
+#define        ROFDM1_RXPSEUDONOISEWGT         0xd58
+
+#define        ROFDM_PHYCOUNTER1               0xda0
+#define        ROFDM_PHYCOUNTER2               0xda4
+#define        ROFDM_PHYCOUNTER3               0xda8
+
+#define        ROFDM_SHORTCFOAB                0xdac
+#define        ROFDM_SHORTCFOCD                0xdb0
+#define        ROFDM_LONGCFOAB                 0xdb4
+#define        ROFDM_LONGCFOCD                 0xdb8
+#define        ROFDM_TAILCF0AB                 0xdbc
+#define        ROFDM_TAILCF0CD                 0xdc0
+#define        ROFDM_PWMEASURE1                0xdc4
+#define        ROFDM_PWMEASURE2                0xdc8
+#define        ROFDM_BWREPORT                  0xdcc
+#define        ROFDM_AGCREPORT                 0xdd0
+#define        ROFDM_RXSNR                     0xdd4
+#define        ROFDM_RXEVMCSI                  0xdd8
+#define        ROFDM_SIGREPORT                 0xddc
+
+#define        RTXAGC_A_RATE18_06              0xe00
+#define        RTXAGC_A_RATE54_24              0xe04
+#define        RTXAGC_A_CCK1_MCS32             0xe08
+#define        RTXAGC_A_MCS03_MCS00            0xe10
+#define        RTXAGC_A_MCS07_MCS04            0xe14
+#define        RTXAGC_A_MCS11_MCS08            0xe18
+#define        RTXAGC_A_MCS15_MCS12            0xe1c
+
+#define        RTXAGC_B_RATE18_06              0x830
+#define        RTXAGC_B_RATE54_24              0x834
+#define        RTXAGC_B_CCK1_55_MCS32          0x838
+#define        RTXAGC_B_MCS03_MCS00            0x83c
+#define        RTXAGC_B_MCS07_MCS04            0x848
+#define        RTXAGC_B_MCS11_MCS08            0x84c
+#define        RTXAGC_B_MCS15_MCS12            0x868
+#define        RTXAGC_B_CCK11_A_CCK2_11        0x86c
+
+#define        RFPGA0_IQK                      0xe28
+#define        RTX_IQK_TONE_A                  0xe30
+#define        RRX_IQK_TONE_A                  0xe34
+#define        RTX_IQK_PI_A                    0xe38
+#define        RRX_IQK_PI_A                    0xe3c
+
+#define        RTX_IQK                         0xe40
+#define        RRX_IQK                         0xe44
+#define        RIQK_AGC_PTS                    0xe48
+#define        RIQK_AGC_RSP                    0xe4c
+#define        RTX_IQK_TONE_B                  0xe50
+#define        RRX_IQK_TONE_B                  0xe54
+#define        RTX_IQK_PI_B                    0xe58
+#define        RRX_IQK_PI_B                    0xe5c
+#define        RIQK_AGC_CONT                   0xe60
+
+#define        RBLUE_TOOTH                     0xe6c
+#define        RRX_WAIT_CCA                    0xe70
+#define        RTX_CCK_RFON                    0xe74
+#define        RTX_CCK_BBON                    0xe78
+#define        RTX_OFDM_RFON                   0xe7c
+#define        RTX_OFDM_BBON                   0xe80
+#define        RTX_TO_RX                       0xe84
+#define        RTX_TO_TX                       0xe88
+#define        RRX_CCK                         0xe8c
+
+#define        RTX_POWER_BEFORE_IQK_A          0xe94
+#define        RTX_POWER_AFTER_IQK_A           0xe9c
+
+#define        RRX_POWER_BEFORE_IQK_A          0xea0
+#define        RRX_POWER_BEFORE_IQK_A_2        0xea4
+#define        RRX_POWER_AFTER_IQK_A           0xea8
+#define        RRX_POWER_AFTER_IQK_A_2         0xeac
+
+#define        RTX_POWER_BEFORE_IQK_B          0xeb4
+#define        RTX_POWER_AFTER_IQK_B           0xebc
+
+#define        RRX_POWER_BEFORE_IQK_B          0xec0
+#define        RRX_POWER_BEFORE_IQK_B_2        0xec4
+#define        RRX_POWER_AFTER_IQK_B           0xec8
+#define        RRX_POWER_AFTER_IQK_B_2         0xecc
+
+#define        RRX_OFDM                        0xed0
+#define        RRX_WAIT_RIFS                   0xed4
+#define        RRX_TO_RX                       0xed8
+#define        RSTANDBY                        0xedc
+#define        RSLEEP                          0xee0
+#define        RPMPD_ANAEN                     0xeec
+
+#define        RZEBRA1_HSSIENABLE              0x0
+#define        RZEBRA1_TRXENABLE1              0x1
+#define        RZEBRA1_TRXENABLE2              0x2
+#define        RZEBRA1_AGC                     0x4
+#define        RZEBRA1_CHARGEPUMP              0x5
+#define        RZEBRA1_CHANNEL                 0x7
+
+#define        RZEBRA1_TXGAIN                  0x8
+#define        RZEBRA1_TXLPF                   0x9
+#define        RZEBRA1_RXLPF                   0xb
+#define        RZEBRA1_RXHPFCORNER             0xc
+
+#define        RGLOBALCTRL                     0
+#define        RRTL8256_TXLPF                  19
+#define        RRTL8256_RXLPF                  11
+#define        RRTL8258_TXLPF                  0x11
+#define        RRTL8258_RXLPF                  0x13
+#define        RRTL8258_RSSILPF                0xa
+
+#define        RF_AC                           0x00
+
+#define        RF_IQADJ_G1                     0x01
+#define        RF_IQADJ_G2                     0x02
+#define        RF_POW_TRSW                     0x05
+
+#define        RF_GAIN_RX                      0x06
+#define        RF_GAIN_TX                      0x07
+
+#define        RF_TXM_IDAC                     0x08
+#define        RF_BS_IQGEN                     0x0F
+
+#define        RF_MODE1                        0x10
+#define        RF_MODE2                        0x11
+
+#define        RF_RX_AGC_HP                    0x12
+#define        RF_TX_AGC                       0x13
+#define        RF_BIAS                         0x14
+#define        RF_IPA                          0x15
+#define        RF_POW_ABILITY                  0x17
+#define        RF_MODE_AG                      0x18
+#define        RRFCHANNEL                      0x18
+#define        RF_CHNLBW                       0x18
+#define        RF_TOP                          0x19
+
+#define        RF_RX_G1                        0x1A
+#define        RF_RX_G2                        0x1B
+
+#define        RF_RX_BB2                       0x1C
+#define        RF_RX_BB1                       0x1D
+
+#define        RF_RCK1                         0x1E
+#define        RF_RCK2                         0x1F
+
+#define        RF_TX_G1                        0x20
+#define        RF_TX_G2                        0x21
+#define        RF_TX_G3                        0x22
+
+#define        RF_TX_BB1                       0x23
+#define        RF_T_METER                      0x42
+
+#define        RF_SYN_G1                       0x25
+#define        RF_SYN_G2                       0x26
+#define        RF_SYN_G3                       0x27
+#define        RF_SYN_G4                       0x28
+#define        RF_SYN_G5                       0x29
+#define        RF_SYN_G6                       0x2A
+#define        RF_SYN_G7                       0x2B
+#define        RF_SYN_G8                       0x2C
+
+#define        RF_RCK_OS                       0x30
+#define        RF_TXPA_G1                      0x31
+#define        RF_TXPA_G2                      0x32
+#define        RF_TXPA_G3                      0x33
+
+#define        RF_TX_BIAS_A                    0x35
+#define        RF_TX_BIAS_D                    0x36
+#define        RF_LOBF_9                       0x38
+#define        RF_RXRF_A3                      0x3C
+#define        RF_TRSW                         0x3F
+
+#define        RF_TXRF_A2                      0x41
+#define        RF_TXPA_G4                      0x46
+#define        RF_TXPA_A4                      0x4B
+
+#define        RF_WE_LUT                       0xEF
+
+#define        BBBRESETB                       0x100
+#define        BGLOBALRESETB                   0x200
+#define        BOFDMTXSTART                    0x4
+#define        BCCKTXSTART                     0x8
+#define        BCRC32DEBUG                     0x100
+#define        BPMACLOOPBACK                   0x10
+#define        BTXLSIG                         0xffffff
+#define        BOFDMTXRATE                     0xf
+#define        BOFDMTXRESERVED                 0x10
+#define        BOFDMTXLENGTH                   0x1ffe0
+#define        BOFDMTXPARITY                   0x20000
+#define        BTXHTSIG1                       0xffffff
+#define        BTXHTMCSRATE                    0x7f
+#define        BTXHTBW                         0x80
+#define        BTXHTLENGTH                     0xffff00
+#define        BTXHTSIG2                       0xffffff
+#define        BTXHTSMOOTHING                  0x1
+#define        BTXHTSOUNDING                   0x2
+#define        BTXHTRESERVED                   0x4
+#define        BTXHTAGGREATION                 0x8
+#define        BTXHTSTBC                       0x30
+#define        BTXHTADVANCECODING              0x40
+#define        BTXHTSHORTGI                    0x80
+#define        BTXHTNUMBERHT_LTF               0x300
+#define        BTXHTCRC8                       0x3fc00
+#define        BCOUNTERRESET                   0x10000
+#define        BNUMOFOFDMTX                    0xffff
+#define        BNUMOFCCKTX                     0xffff0000
+#define        BTXIDLEINTERVAL                 0xffff
+#define        BOFDMSERVICE                    0xffff0000
+#define        BTXMACHEADER                    0xffffffff
+#define        BTXDATAINIT                     0xff
+#define        BTXHTMODE                       0x100
+#define        BTXDATATYPE                     0x30000
+#define        BTXRANDOMSEED                   0xffffffff
+#define        BCCKTXPREAMBLE                  0x1
+#define        BCCKTXSFD                       0xffff0000
+#define        BCCKTXSIG                       0xff
+#define        BCCKTXSERVICE                   0xff00
+#define        BCCKLENGTHEXT                   0x8000
+#define        BCCKTXLENGHT                    0xffff0000
+#define        BCCKTXCRC16                     0xffff
+#define        BCCKTXSTATUS                    0x1
+#define        BOFDMTXSTATUS                   0x2
+#define IS_BB_REG_OFFSET_92S(_offset)  \
+       ((_offset >= 0x800) && (_offset <= 0xfff))
+
+#define        BRFMOD                          0x1
+#define        BJAPANMODE                      0x2
+#define        BCCKTXSC                        0x30
+#define        BCCKEN                          0x1000000
+#define        BOFDMEN                         0x2000000
+
+#define        BOFDMRXADCPHASE                 0x10000
+#define        BOFDMTXDACPHASE                 0x40000
+#define        BXATXAGC                        0x3f
+
+#define        BXBTXAGC                        0xf00
+#define        BXCTXAGC                        0xf000
+#define        BXDTXAGC                        0xf0000
+
+#define        BPASTART                        0xf0000000
+#define        BTRSTART                        0x00f00000
+#define        BRFSTART                        0x0000f000
+#define        BBBSTART                        0x000000f0
+#define        BBBCCKSTART                     0x0000000f
+#define        BPAEND                          0xf
+#define        BTREND                          0x0f000000
+#define        BRFEND                          0x000f0000
+#define        BCCAMASK                        0x000000f0
+#define        BR2RCCAMASK                     0x00000f00
+#define        BHSSI_R2TDELAY                  0xf8000000
+#define        BHSSI_T2RDELAY                  0xf80000
+#define        BCONTXHSSI                      0x400
+#define        BIGFROMCCK                      0x200
+#define        BAGCADDRESS                     0x3f
+#define        BRXHPTX                         0x7000
+#define        BRXHP2RX                        0x38000
+#define        BRXHPCCKINI                     0xc0000
+#define        BAGCTXCODE                      0xc00000
+#define        BAGCRXCODE                      0x300000
+
+#define        B3WIREDATALENGTH                0x800
+#define        B3WIREADDREAALENGTH             0x400
+
+#define        B3WIRERFPOWERDOWN               0x1
+#define        B5GPAPEPOLARITY                 0x40000000
+#define        B2GPAPEPOLARITY                 0x80000000
+#define        BRFSW_TXDEFAULTANT              0x3
+#define        BRFSW_TXOPTIONANT               0x30
+#define        BRFSW_RXDEFAULTANT              0x300
+#define        BRFSW_RXOPTIONANT               0x3000
+#define        BRFSI_3WIREDATA                 0x1
+#define        BRFSI_3WIRECLOCK                0x2
+#define        BRFSI_3WIRELOAD                 0x4
+#define        BRFSI_3WIRERW                   0x8
+#define        BRFSI_3WIRE                     0xf
+
+#define        BRFSI_RFENV                     0x10
+
+#define        BRFSI_TRSW                      0x20
+#define        BRFSI_TRSWB                     0x40
+#define        BRFSI_ANTSW                     0x100
+#define        BRFSI_ANTSWB                    0x200
+#define        BRFSI_PAPE                      0x400
+#define        BRFSI_PAPE5G                    0x800
+#define        BBANDSELECT                     0x1
+#define        BHTSIG2_GI                      0x80
+#define        BHTSIG2_SMOOTHING               0x01
+#define        BHTSIG2_SOUNDING                0x02
+#define        BHTSIG2_AGGREATON               0x08
+#define        BHTSIG2_STBC                    0x30
+#define        BHTSIG2_ADVCODING               0x40
+#define        BHTSIG2_NUMOFHTLTF              0x300
+#define        BHTSIG2_CRC8                    0x3fc
+#define        BHTSIG1_MCS                     0x7f
+#define        BHTSIG1_BANDWIDTH               0x80
+#define        BHTSIG1_HTLENGTH                0xffff
+#define        BLSIG_RATE                      0xf
+#define        BLSIG_RESERVED                  0x10
+#define        BLSIG_LENGTH                    0x1fffe
+#define        BLSIG_PARITY                    0x20
+#define        BCCKRXPHASE                     0x4
+
+#define        BLSSIREADADDRESS                0x7f800000
+#define        BLSSIREADEDGE                   0x80000000
+
+#define        BLSSIREADBACKDATA               0xfffff
+
+#define        BLSSIREADOKFLAG                 0x1000
+#define        BCCKSAMPLERATE                  0x8
+#define        BREGULATOR0STANDBY              0x1
+#define        BREGULATORPLLSTANDBY            0x2
+#define        BREGULATOR1STANDBY              0x4
+#define        BPLLPOWERUP                     0x8
+#define        BDPLLPOWERUP                    0x10
+#define        BDA10POWERUP                    0x20
+#define        BAD7POWERUP                     0x200
+#define        BDA6POWERUP                     0x2000
+#define        BXTALPOWERUP                    0x4000
+#define        B40MDCLKPOWERUP                 0x8000
+#define        BDA6DEBUGMODE                   0x20000
+#define        BDA6SWING                       0x380000
+
+#define        BADCLKPHASE                     0x4000000
+#define        B80MCLKDELAY    0x18000000
+#define        BAFEWATCHDOGENABLE      0x20000000
+
+#define        BXTALCAP01      0xc0000000
+#define        BXTALCAP23      0x3
+#define        BXTALCAP92X                     0x0f000000
+#define BXTALCAP       0x0f000000
+
+#define        BINTDIFCLKENABLE                0x400
+#define        BEXTSIGCLKENABLE                0x800
+#define        BBANDGAP_MBIAS_POWERUP          0x10000
+#define        BAD11SH_GAIN                    0xc0000
+#define        BAD11NPUT_RANGE                 0x700000
+#define        BAD110P_CURRENT                 0x3800000
+#define        BLPATH_LOOPBACK                 0x4000000
+#define        BQPATH_LOOPBACK                 0x8000000
+#define        BAFE_LOOPBACK                   0x10000000
+#define        BDA10_SWING                     0x7e0
+#define        BDA10_REVERSE                   0x800
+#define        BDA_CLK_SOURCE                  0x1000
+#define        BDA7INPUT_RANGE                 0x6000
+#define        BDA7_GAIN                       0x38000
+#define        BDA7OUTPUT_CM_MODE              0x40000
+#define        BDA7INPUT_CM_MODE               0x380000
+#define        BDA7CURRENT                     0xc00000
+#define        BREGULATOR_ADJUST               0x7000000
+#define        BAD11POWERUP_ATTX               0x1
+#define        BDA10PS_ATTX                    0x10
+#define        BAD11POWERUP_ATRX               0x100
+#define        BDA10PS_ATRX                    0x1000
+#define        BCCKRX_AGC_FORMAT               0x200
+#define        BPSDFFT_SAMPLE_POINT            0xc000
+#define        BPSD_AVERAGE_NUM                0x3000
+#define        BIQPATH_CONTROL 0xc00
+#define        BPSD_FREQ                       0x3ff
+#define        BPSD_ANTENNA_PATH               0x30
+#define        BPSD_IQ_SWITCH                  0x40
+#define        BPSD_RX_TRIGGER                 0x400000
+#define        BPSD_TX_TRIGGER                 0x80000000
+#define        BPSD_SINE_TONE_SCALE            0x7f000000
+#define        BPSD_REPORT                     0xffff
+
+#define        BOFDM_TXSC                      0x30000000
+#define        BCCK_TXON                       0x1
+#define        BOFDM_TXON                      0x2
+#define        BDEBUG_PAGE                     0xfff
+#define        BDEBUG_ITEM                     0xff
+#define        BANTL                           0x10
+#define        BANT_NONHT                      0x100
+#define        BANT_HT1                        0x1000
+#define        BANT_HT2                        0x10000
+#define        BANT_HT1S1                      0x100000
+#define        BANT_NONHTS1                    0x1000000
+
+#define        BCCK_BBMODE                     0x3
+#define        BCCK_TXPOWERSAVING              0x80
+#define        BCCK_RXPOWERSAVING              0x40
+
+#define        BCCK_SIDEBAND                   0x10
+
+#define        BCCK_SCRAMBLE                   0x8
+#define        BCCK_ANTDIVERSITY               0x8000
+#define        BCCK_CARRIER_RECOVERY           0x4000
+#define        BCCK_TXRATE                     0x3000
+#define        BCCK_DCCANCEL                   0x0800
+#define        BCCK_ISICANCEL                  0x0400
+#define        BCCK_MATCH_FILTER               0x0200
+#define        BCCK_EQUALIZER                  0x0100
+#define        BCCK_PREAMBLE_DETECT            0x800000
+#define        BCCK_FAST_FALSECCA              0x400000
+#define        BCCK_CH_ESTSTART                0x300000
+#define        BCCK_CCA_COUNT                  0x080000
+#define        BCCK_CS_LIM                     0x070000
+#define        BCCK_BIST_MODE                  0x80000000
+#define        BCCK_CCAMASK                    0x40000000
+#define        BCCK_TX_DAC_PHASE               0x4
+#define        BCCK_RX_ADC_PHASE               0x20000000
+#define        BCCKR_CP_MODE                   0x0100
+#define        BCCK_TXDC_OFFSET                0xf0
+#define        BCCK_RXDC_OFFSET                0xf
+#define        BCCK_CCA_MODE                   0xc000
+#define        BCCK_FALSECS_LIM                0x3f00
+#define        BCCK_CS_RATIO                   0xc00000
+#define        BCCK_CORGBIT_SEL                0x300000
+#define        BCCK_PD_LIM                     0x0f0000
+#define        BCCK_NEWCCA                     0x80000000
+#define        BCCK_RXHP_OF_IG                 0x8000
+#define        BCCK_RXIG                       0x7f00
+#define        BCCK_LNA_POLARITY               0x800000
+#define        BCCK_RX1ST_BAIN                 0x7f0000
+#define        BCCK_RF_EXTEND                  0x20000000
+#define        BCCK_RXAGC_SATLEVEL             0x1f000000
+#define        BCCK_RXAGC_SATCOUNT             0xe0
+#define        BCCKRXRFSETTLE                  0x1f
+#define        BCCK_FIXED_RXAGC                0x8000
+#define        BCCK_ANTENNA_POLARITY           0x2000
+#define        BCCK_TXFILTER_TYPE              0x0c00
+#define        BCCK_RXAGC_REPORTTYPE           0x0300
+#define        BCCK_RXDAGC_EN                  0x80000000
+#define        BCCK_RXDAGC_PERIOD              0x20000000
+#define        BCCK_RXDAGC_SATLEVEL            0x1f000000
+#define        BCCK_TIMING_RECOVERY            0x800000
+#define        BCCK_TXC0                       0x3f0000
+#define        BCCK_TXC1                       0x3f000000
+#define        BCCK_TXC2                       0x3f
+#define        BCCK_TXC3                       0x3f00
+#define        BCCK_TXC4                       0x3f0000
+#define        BCCK_TXC5                       0x3f000000
+#define        BCCK_TXC6                       0x3f
+#define        BCCK_TXC7                       0x3f00
+#define        BCCK_DEBUGPORT                  0xff0000
+#define        BCCK_DAC_DEBUG                  0x0f000000
+#define        BCCK_FALSEALARM_ENABLE          0x8000
+#define        BCCK_FALSEALARM_READ            0x4000
+#define        BCCK_TRSSI                      0x7f
+#define        BCCK_RXAGC_REPORT               0xfe
+#define        BCCK_RXREPORT_ANTSEL            0x80000000
+#define        BCCK_RXREPORT_MFOFF             0x40000000
+#define        BCCK_RXREPORT_SQLOSS            0x20000000
+#define        BCCK_RXREPORT_PKTLOSS           0x10000000
+#define        BCCK_RXREPORT_LOCKEDBIT         0x08000000
+#define        BCCK_RXREPORT_RATEERROR         0x04000000
+#define        BCCK_RXREPORT_RXRATE            0x03000000
+#define        BCCK_RXFA_COUNTER_LOWER         0xff
+#define        BCCK_RXFA_COUNTER_UPPER         0xff000000
+#define        BCCK_RXHPAGC_START              0xe000
+#define        BCCK_RXHPAGC_FINAL              0x1c00
+#define        BCCK_RXFALSEALARM_ENABLE        0x8000
+#define        BCCK_FACOUNTER_FREEZE           0x4000
+#define        BCCK_TXPATH_SEL                 0x10000000
+#define        BCCK_DEFAULT_RXPATH             0xc000000
+#define        BCCK_OPTION_RXPATH              0x3000000
+
+#define        BNUM_OFSTF      0x3
+#define        BSHIFT_L        0xc0
+#define        BGI_TH  0xc
+#define        BRXPATH_A       0x1
+#define        BRXPATH_B       0x2
+#define        BRXPATH_C       0x4
+#define        BRXPATH_D       0x8
+#define        BTXPATH_A       0x1
+#define        BTXPATH_B       0x2
+#define        BTXPATH_C       0x4
+#define        BTXPATH_D       0x8
+#define        BTRSSI_FREQ     0x200
+#define        BADC_BACKOFF    0x3000
+#define        BDFIR_BACKOFF   0xc000
+#define        BTRSSI_LATCH_PHASE      0x10000
+#define        BRX_LDC_OFFSET  0xff
+#define        BRX_QDC_OFFSET  0xff00
+#define        BRX_DFIR_MODE   0x1800000
+#define        BRX_DCNF_TYPE   0xe000000
+#define        BRXIQIMB_A      0x3ff
+#define        BRXIQIMB_B      0xfc00
+#define        BRXIQIMB_C      0x3f0000
+#define        BRXIQIMB_D      0xffc00000
+#define        BDC_DC_NOTCH    0x60000
+#define        BRXNB_NOTCH     0x1f000000
+#define        BPD_TH  0xf
+#define        BPD_TH_OPT2     0xc000
+#define        BPWED_TH        0x700
+#define        BIFMF_WIN_L     0x800
+#define        BPD_OPTION      0x1000
+#define        BMF_WIN_L       0xe000
+#define        BBW_SEARCH_L    0x30000
+#define        BWIN_ENH_L      0xc0000
+#define        BBW_TH  0x700000
+#define        BED_TH2 0x3800000
+#define        BBW_OPTION      0x4000000
+#define        BRADIO_TH       0x18000000
+#define        BWINDOW_L       0xe0000000
+#define        BSBD_OPTION     0x1
+#define        BFRAME_TH       0x1c
+#define        BFS_OPTION      0x60
+#define        BDC_SLOPE_CHECK 0x80
+#define        BFGUARD_COUNTER_DC_L    0xe00
+#define        BFRAME_WEIGHT_SHORT     0x7000
+#define        BSUB_TUNE       0xe00000
+#define        BFRAME_DC_LENGTH        0xe000000
+#define        BSBD_START_OFFSET       0x30000000
+#define        BFRAME_TH_2     0x7
+#define        BFRAME_GI2_TH   0x38
+#define        BGI2_SYNC_EN    0x40
+#define        BSARCH_SHORT_EARLY      0x300
+#define        BSARCH_SHORT_LATE       0xc00
+#define        BSARCH_GI2_LATE 0x70000
+#define        BCFOANTSUM      0x1
+#define        BCFOACC 0x2
+#define        BCFOSTARTOFFSET 0xc
+#define        BCFOLOOPBACK    0x70
+#define        BCFOSUMWEIGHT   0x80
+#define        BDAGCENABLE     0x10000
+#define        BTXIQIMB_A      0x3ff
+#define        BTXIQIMB_b      0xfc00
+#define        BTXIQIMB_C      0x3f0000
+#define        BTXIQIMB_D      0xffc00000
+#define        BTXIDCOFFSET    0xff
+#define        BTXIQDCOFFSET   0xff00
+#define        BTXDFIRMODE     0x10000
+#define        BTXPESUDO_NOISEON       0x4000000
+#define        BTXPESUDO_NOISE_A       0xff
+#define        BTXPESUDO_NOISE_B       0xff00
+#define        BTXPESUDO_NOISE_C       0xff0000
+#define        BTXPESUDO_NOISE_D       0xff000000
+#define        BCCA_DROPOPTION 0x20000
+#define        BCCA_DROPTHRES  0xfff00000
+#define        BEDCCA_H        0xf
+#define        BEDCCA_L        0xf0
+#define        BLAMBDA_ED      0x300
+#define        BRX_INITIALGAIN 0x7f
+#define        BRX_ANTDIV_EN   0x80
+#define        BRX_AGC_ADDRESS_FOR_LNA         0x7f00
+#define        BRX_HIGHPOWER_FLOW      0x8000
+#define        BRX_AGC_FREEZE_THRES            0xc0000
+#define        BRX_FREEZESTEP_AGC1     0x300000
+#define        BRX_FREEZESTEP_AGC2     0xc00000
+#define        BRX_FREEZESTEP_AGC3     0x3000000
+#define        BRX_FREEZESTEP_AGC0     0xc000000
+#define        BRXRSSI_CMP_EN  0x10000000
+#define        BRXQUICK_AGCEN  0x20000000
+#define        BRXAGC_FREEZE_THRES_MODE        0x40000000
+#define        BRX_OVERFLOW_CHECKTYPE  0x80000000
+#define        BRX_AGCSHIFT    0x7f
+#define        BTRSW_TRI_ONLY  0x80
+#define        BPOWER_THRES    0x300
+#define        BRXAGC_EN       0x1
+#define        BRXAGC_TOGETHER_EN      0x2
+#define        BRXAGC_MIN      0x4
+#define        BRXHP_INI       0x7
+#define        BRXHP_TRLNA     0x70
+#define        BRXHP_RSSI      0x700
+#define        BRXHP_BBP1      0x7000
+#define        BRXHP_BBP2      0x70000
+#define        BRXHP_BBP3      0x700000
+#define        BRSSI_H 0x7f0000
+#define        BRSSI_GEN       0x7f000000
+#define        BRXSETTLE_TRSW  0x7
+#define        BRXSETTLE_LNA   0x38
+#define        BRXSETTLE_RSSI  0x1c0
+#define        BRXSETTLE_BBP   0xe00
+#define        BRXSETTLE_RXHP  0x7000
+#define        BRXSETTLE_ANTSW_RSSI    0x38000
+#define        BRXSETTLE_ANTSW 0xc0000
+#define        BRXPROCESS_TIME_DAGC    0x300000
+#define        BRXSETTLE_HSSI  0x400000
+#define        BRXPROCESS_TIME_BBPPW   0x800000
+#define        BRXANTENNA_POWER_SHIFT  0x3000000
+#define        BRSSI_TABLE_SELECT      0xc000000
+#define        BRXHP_FINAL     0x7000000
+#define        BRXHPSETTLE_BBP 0x7
+#define        BRXHTSETTLE_HSSI        0x8
+#define        BRXHTSETTLE_RXHP        0x70
+#define        BRXHTSETTLE_BBPPW       0x80
+#define        BRXHTSETTLE_IDLE        0x300
+#define        BRXHTSETTLE_RESERVED    0x1c00
+#define        BRXHT_RXHP_EN   0x8000
+#define        BRXAGC_FREEZE_THRES     0x30000
+#define        BRXAGC_TOGETHEREN       0x40000
+#define        BRXHTAGC_MIN    0x80000
+#define        BRXHTAGC_EN     0x100000
+#define        BRXHTDAGC_EN    0x200000
+#define        BRXHT_RXHP_BBP  0x1c00000
+#define        BRXHT_RXHP_FINAL        0xe0000000
+#define        BRXPW_RADIO_TH  0x3
+#define        BRXPW_RADIO_EN  0x4
+#define        BRXMF_HOLD      0x3800
+#define        BRXPD_DELAY_TH1 0x38
+#define        BRXPD_DELAY_TH2 0x1c0
+#define        BRXPD_DC_COUNT_MAX      0x600
+#define        BRXPD_DELAY_TH  0x8000
+#define        BRXPROCESS_DELAY        0xf0000
+#define        BRXSEARCHRANGE_GI2_EARLY        0x700000
+#define        BRXFRAME_FUARD_COUNTER_L        0x3800000
+#define        BRXSGI_GUARD_L  0xc000000
+#define        BRXSGI_SEARCH_L 0x30000000
+#define        BRXSGI_TH       0xc0000000
+#define        BDFSCNT0        0xff
+#define        BDFSCNT1        0xff00
+#define        BDFSFLAG        0xf0000
+#define        BMF_WEIGHT_SUM  0x300000
+#define        BMINIDX_TH      0x7f000000
+#define        BDAFORMAT       0x40000
+#define        BTXCH_EMU_ENABLE        0x01000000
+#define        BTRSW_ISOLATION_A       0x7f
+#define        BTRSW_ISOLATION_B       0x7f00
+#define        BTRSW_ISOLATION_C       0x7f0000
+#define        BTRSW_ISOLATION_D       0x7f000000
+#define        BEXT_LNA_GAIN   0x7c00
+
+#define        BSTBC_EN        0x4
+#define        BANTENNA_MAPPING        0x10
+#define        BNSS    0x20
+#define        BCFO_ANTSUM_ID                  0x200
+#define        BPHY_COUNTER_RESET      0x8000000
+#define        BCFO_REPORT_GET 0x4000000
+#define        BOFDM_CONTINUE_TX       0x10000000
+#define        BOFDM_SINGLE_CARRIER    0x20000000
+#define        BOFDM_SINGLE_TONE       0x40000000
+#define        BHT_DETECT      0x100
+#define        BCFOEN  0x10000
+#define        BCFOVALUE       0xfff00000
+#define        BSIGTONE_RE     0x3f
+#define        BSIGTONE_IM     0x7f00
+#define        BCOUNTER_CCA    0xffff
+#define        BCOUNTER_PARITYFAIL     0xffff0000
+#define        BCOUNTER_RATEILLEGAL    0xffff
+#define        BCOUNTER_CRC8FAIL       0xffff0000
+#define        BCOUNTER_MCSNOSUPPORT   0xffff
+#define        BCOUNTER_FASTSYNC       0xffff
+#define        BSHORTCFO       0xfff
+#define        BSHORTCFOT_LENGTH       12
+#define        BSHORTCFOF_LENGTH       11
+#define        BLONGCFO        0x7ff
+#define        BLONGCFOT_LENGTH        11
+#define        BLONGCFOF_LENGTH        11
+#define        BTAILCFO        0x1fff
+#define        BTAILCFOT_LENGTH        13
+#define        BTAILCFOF_LENGTH        12
+#define        BNOISE_EN_PWDB  0xffff
+#define        BCC_POWER_DB    0xffff0000
+#define        BMOISE_PWDB     0xffff
+#define        BPOWERMEAST_LENGTH      10
+#define        BPOWERMEASF_LENGTH      3
+#define        BRX_HT_BW       0x1
+#define        BRXSC   0x6
+#define        BRX_HT  0x8
+#define        BNB_INTF_DET_ON 0x1
+#define        BINTF_WIN_LEN_CFG       0x30
+#define        BNB_INTF_TH_CFG 0x1c0
+#define        BRFGAIN 0x3f
+#define        BTABLESEL       0x40
+#define        BTRSW   0x80
+#define        BRXSNR_A        0xff
+#define        BRXSNR_B        0xff00
+#define        BRXSNR_C        0xff0000
+#define        BRXSNR_D        0xff000000
+#define        BSNR_EVMT_LENGTH        8
+#define        BSNR_EVMF_LENGTH        1
+#define        BCSI1ST 0xff
+#define        BCSI2ND 0xff00
+#define        BRXEVM1ST       0xff0000
+#define        BRXEVM2ND       0xff000000
+#define        BSIGEVM 0xff
+#define        BPWDB   0xff00
+#define        BSGIEN  0x10000
+
+#define        BSFACTOR_QMA1   0xf
+#define        BSFACTOR_QMA2   0xf0
+#define        BSFACTOR_QMA3   0xf00
+#define        BSFACTOR_QMA4   0xf000
+#define        BSFACTOR_QMA5   0xf0000
+#define        BSFACTOR_QMA6   0xf0000
+#define        BSFACTOR_QMA7   0xf00000
+#define        BSFACTOR_QMA8   0xf000000
+#define        BSFACTOR_QMA9   0xf0000000
+#define        BCSI_SCHEME     0x100000
+
+#define        BNOISE_LVL_TOP_SET              0x3
+#define        BCHSMOOTH       0x4
+#define        BCHSMOOTH_CFG1  0x38
+#define        BCHSMOOTH_CFG2  0x1c0
+#define        BCHSMOOTH_CFG3  0xe00
+#define        BCHSMOOTH_CFG4  0x7000
+#define        BMRCMODE        0x800000
+#define        BTHEVMCFG       0x7000000
+
+#define        BLOOP_FIT_TYPE  0x1
+#define        BUPD_CFO        0x40
+#define        BUPD_CFO_OFFDATA        0x80
+#define        BADV_UPD_CFO    0x100
+#define        BADV_TIME_CTRL  0x800
+#define        BUPD_CLKO       0x1000
+#define        BFC     0x6000
+#define        BTRACKING_MODE  0x8000
+#define        BPHCMP_ENABLE   0x10000
+#define        BUPD_CLKO_LTF   0x20000
+#define        BCOM_CH_CFO     0x40000
+#define        BCSI_ESTI_MODE  0x80000
+#define        BADV_UPD_EQZ    0x100000
+#define        BUCHCFG 0x7000000
+#define        BUPDEQZ 0x8000000
+
+#define        BRX_PESUDO_NOISE_ON             0x20000000
+#define        BRX_PESUDO_NOISE_A      0xff
+#define        BRX_PESUDO_NOISE_B      0xff00
+#define        BRX_PESUDO_NOISE_C      0xff0000
+#define        BRX_PESUDO_NOISE_D      0xff000000
+#define        BRX_PESUDO_NOISESTATE_A         0xffff
+#define        BRX_PESUDO_NOISESTATE_B         0xffff0000
+#define        BRX_PESUDO_NOISESTATE_C         0xffff
+#define        BRX_PESUDO_NOISESTATE_D         0xffff0000
+
+#define        BZEBRA1_HSSIENABLE      0x8
+#define        BZEBRA1_TRXCONTROL      0xc00
+#define        BZEBRA1_TRXGAINSETTING  0x07f
+#define        BZEBRA1_RXCOUNTER       0xc00
+#define        BZEBRA1_TXCHANGEPUMP    0x38
+#define        BZEBRA1_RXCHANGEPUMP    0x7
+#define        BZEBRA1_CHANNEL_NUM     0xf80
+#define        BZEBRA1_TXLPFBW 0x400
+#define        BZEBRA1_RXLPFBW 0x600
+
+#define        BRTL8256REG_MODE_CTRL1          0x100
+#define        BRTL8256REG_MODE_CTRL0          0x40
+#define        BRTL8256REG_TXLPFBW             0x18
+#define        BRTL8256REG_RXLPFBW             0x600
+
+#define        BRTL8258_TXLPFBW        0xc
+#define        BRTL8258_RXLPFBW        0xc00
+#define        BRTL8258_RSSILPFBW      0xc0
+
+#define        BBYTE0  0x1
+#define        BBYTE1  0x2
+#define        BBYTE2  0x4
+#define        BBYTE3  0x8
+#define        BWORD0  0x3
+#define        BWORD1  0xc
+#define        BWORD   0xf
+
+#define        BENABLE 0x1
+#define        BDISABLE        0x0
+
+#define        LEFT_ANTENNA    0x0
+#define        RIGHT_ANTENNA   0x1
+
+#define        TCHECK_TXSTATUS 500
+#define        TUPDATE_RXCOUNTER       100
+
+#define        REG_UN_used_register            0x01bf
+
+/* WOL bit information */
+#define        HAL92C_WOL_PTK_UPDATE_EVENT     BIT(0)
+#define        HAL92C_WOL_GTK_UPDATE_EVENT     BIT(1)
+#define        HAL92C_WOL_DISASSOC_EVENT       BIT(2)
+#define        HAL92C_WOL_DEAUTH_EVENT         BIT(3)
+#define        HAL92C_WOL_FW_DISCONNECT_EVENT  BIT(4)
+
+#define                WOL_REASON_PTK_UPDATE           BIT(0)
+#define                WOL_REASON_GTK_UPDATE           BIT(1)
+#define                WOL_REASON_DISASSOC             BIT(2)
+#define                WOL_REASON_DEAUTH               BIT(3)
+#define                WOL_REASON_FW_DISCONNECT        BIT(4)
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EFUSE_SEL(x)                           (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK                         0x300
+#define EFUSE_WIFI_SEL_0                       0x0
+
+#define        WL_HWPDN_EN     BIT(0)  /* Enable GPIO[9] as WiFi HW PDn source*/
+#define        WL_HWPDN_SL     BIT(1)  /* WiFi HW PDn polarity control*/
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/rtlwifi/rtl8723be/rf.c
new file mode 100644 (file)
index 0000000..4862949
--- /dev/null
@@ -0,0 +1,504 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "rf.h"
+#include "dm.h"
+
+static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
+
+void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       switch (bandwidth) {
+       case HT_CHANNEL_WIDTH_20:
+               rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+                                            0xfffff3ff) | BIT(10) | BIT(11));
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+                             rtlphy->rfreg_chnlval[0]);
+               break;
+       case HT_CHANNEL_WIDTH_20_40:
+               rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
+                                            0xfffff3ff) | BIT(10));
+               rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+                             rtlphy->rfreg_chnlval[0]);
+               break;
+       default:
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
+                        "unknown bandwidth: %#X\n", bandwidth);
+               break;
+       }
+}
+
+void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                         u8 *ppowerlevel)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u32 tx_agc[2] = {0, 0}, tmpval;
+       bool turbo_scanoff = false;
+       u8 idx1, idx2;
+       u8 *ptr;
+       u8 direction;
+       u32 pwrtrac_value;
+
+       if (rtlefuse->eeprom_regulatory != 0)
+               turbo_scanoff = true;
+
+       if (mac->act_scanning) {
+               tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+               tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+
+               if (turbo_scanoff) {
+                       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+                               tx_agc[idx1] = ppowerlevel[idx1] |
+                                              (ppowerlevel[idx1] << 8) |
+                                              (ppowerlevel[idx1] << 16) |
+                                              (ppowerlevel[idx1] << 24);
+                       }
+               }
+       } else {
+               for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+                       tx_agc[idx1] = ppowerlevel[idx1] |
+                                      (ppowerlevel[idx1] << 8) |
+                                      (ppowerlevel[idx1] << 16) |
+                                      (ppowerlevel[idx1] << 24);
+               }
+               if (rtlefuse->eeprom_regulatory == 0) {
+                       tmpval =
+                           (rtlphy->mcs_offset[0][6]) +
+                           (rtlphy->mcs_offset[0][7] << 8);
+                       tx_agc[RF90_PATH_A] += tmpval;
+
+                       tmpval = (rtlphy->mcs_offset[0][14]) +
+                                (rtlphy->mcs_offset[0][15] <<
+                                 24);
+                       tx_agc[RF90_PATH_B] += tmpval;
+               }
+       }
+       for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+               ptr = (u8 *)(&(tx_agc[idx1]));
+               for (idx2 = 0; idx2 < 4; idx2++) {
+                       if (*ptr > RF6052_MAX_TX_PWR)
+                               *ptr = RF6052_MAX_TX_PWR;
+                       ptr++;
+               }
+       }
+       rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
+       if (direction == 1) {
+               tx_agc[0] += pwrtrac_value;
+               tx_agc[1] += pwrtrac_value;
+       } else if (direction == 2) {
+               tx_agc[0] -= pwrtrac_value;
+               tx_agc[1] -= pwrtrac_value;
+       }
+       tmpval = tx_agc[RF90_PATH_A] & 0xff;
+       rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_A_CCK1_MCS32);
+
+       tmpval = tx_agc[RF90_PATH_A] >> 8;
+
+       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK11_A_CCK2_11);
+
+       tmpval = tx_agc[RF90_PATH_B] >> 24;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK11_A_CCK2_11);
+
+       tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+       rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
+                RTXAGC_B_CCK1_55_MCS32);
+}
+
+static void rtl8723be_phy_get_power_base(struct ieee80211_hw *hw,
+                                        u8 *ppowerlevel_ofdm,
+                                        u8 *ppowerlevel_bw20,
+                                        u8 *ppowerlevel_bw40,
+                                        u8 channel, u32 *ofdmbase,
+                                        u32 *mcsbase)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       u32 powerbase0, powerbase1;
+       u8 i, powerlevel[2];
+
+       for (i = 0; i < 2; i++) {
+               powerbase0 = ppowerlevel_ofdm[i];
+
+               powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+                   (powerbase0 << 8) | powerbase0;
+               *(ofdmbase + i) = powerbase0;
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        " [OFDM power base index rf(%c) = 0x%x]\n",
+                        ((i == 0) ? 'A' : 'B'), *(ofdmbase + i));
+       }
+
+       for (i = 0; i < 2; i++) {
+               if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
+                       powerlevel[i] = ppowerlevel_bw20[i];
+               else
+                       powerlevel[i] = ppowerlevel_bw40[i];
+               powerbase1 = powerlevel[i];
+               powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
+                            (powerbase1 << 8) | powerbase1;
+
+               *(mcsbase + i) = powerbase1;
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                       " [MCS power base index rf(%c) = 0x%x]\n",
+                       ((i == 0) ? 'A' : 'B'), *(mcsbase + i));
+       }
+}
+
+static void txpwr_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index,
+                               u32 *powerbase0, u32 *powerbase1,
+                               u32 *p_outwriteval)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       u8 i, chnlgroup = 0, pwr_diff_limit[4];
+       u8 pwr_diff = 0, customer_pwr_diff;
+       u32 writeval, customer_limit, rf;
+
+       for (rf = 0; rf < 2; rf++) {
+               switch (rtlefuse->eeprom_regulatory) {
+               case 0:
+                       chnlgroup = 0;
+
+                       writeval =
+                           rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)]
+                           + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "RTK better performance, "
+                                "writeval(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeval);
+                       break;
+               case 1:
+                       if (rtlphy->pwrgroup_cnt == 1) {
+                               chnlgroup = 0;
+                       } else {
+                               if (channel < 3)
+                                       chnlgroup = 0;
+                               else if (channel < 6)
+                                       chnlgroup = 1;
+                               else if (channel < 9)
+                                       chnlgroup = 2;
+                               else if (channel < 12)
+                                       chnlgroup = 3;
+                               else if (channel < 14)
+                                       chnlgroup = 4;
+                               else if (channel == 14)
+                                       chnlgroup = 5;
+                       }
+                       writeval = rtlphy->mcs_offset[chnlgroup]
+                           [index + (rf ? 8 : 0)] + ((index < 2) ?
+                                                     powerbase0[rf] :
+                                                     powerbase1[rf]);
+
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "Realtek regulatory, 20MHz, "
+                                "writeval(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeval);
+
+                       break;
+               case 2:
+                       writeval =
+                           ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "Better regulatory, "
+                                "writeval(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeval);
+                       break;
+               case 3:
+                       chnlgroup = 0;
+
+                       if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                        "customer's limit, 40MHz "
+                                        "rf(%c) = 0x%x\n",
+                                        ((rf == 0) ? 'A' : 'B'),
+                                        rtlefuse->pwrgroup_ht40[rf]
+                                        [channel-1]);
+                       } else {
+                               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                        "customer's limit, 20MHz "
+                                        "rf(%c) = 0x%x\n",
+                                        ((rf == 0) ? 'A' : 'B'),
+                                        rtlefuse->pwrgroup_ht20[rf]
+                                        [channel-1]);
+                       }
+
+                       if (index < 2)
+                               pwr_diff =
+                                   rtlefuse->txpwr_legacyhtdiff[rf][channel-1];
+                       else if (rtlphy->current_chan_bw ==
+                                HT_CHANNEL_WIDTH_20)
+                               pwr_diff =
+                                   rtlefuse->txpwr_ht20diff[rf][channel-1];
+
+                       if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+                               customer_pwr_diff =
+                                       rtlefuse->pwrgroup_ht40[rf][channel-1];
+                       else
+                               customer_pwr_diff =
+                                       rtlefuse->pwrgroup_ht20[rf][channel-1];
+
+                       if (pwr_diff > customer_pwr_diff)
+                               pwr_diff = 0;
+                       else
+                               pwr_diff = customer_pwr_diff - pwr_diff;
+
+                       for (i = 0; i < 4; i++) {
+                               pwr_diff_limit[i] =
+                                   (u8)((rtlphy->mcs_offset
+                                         [chnlgroup][index + (rf ? 8 : 0)] &
+                                         (0x7f << (i * 8))) >> (i * 8));
+
+                                       if (pwr_diff_limit[i] > pwr_diff)
+                                               pwr_diff_limit[i] = pwr_diff;
+                       }
+
+                       customer_limit = (pwr_diff_limit[3] << 24) |
+                                        (pwr_diff_limit[2] << 16) |
+                                        (pwr_diff_limit[1] << 8) |
+                                        (pwr_diff_limit[0]);
+
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                               "Customer's limit rf(%c) = 0x%x\n",
+                               ((rf == 0) ? 'A' : 'B'), customer_limit);
+
+                       writeval = customer_limit + ((index < 2) ?
+                                                     powerbase0[rf] :
+                                                     powerbase1[rf]);
+
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "Customer, writeval rf(%c)= 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeval);
+                       break;
+               default:
+                       chnlgroup = 0;
+                       writeval =
+                           rtlphy->mcs_offset[chnlgroup]
+                           [index + (rf ? 8 : 0)]
+                           + ((index < 2) ? powerbase0[rf] : powerbase1[rf]);
+
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                                "RTK better performance, writeval "
+                                "rf(%c) = 0x%x\n",
+                                ((rf == 0) ? 'A' : 'B'), writeval);
+                       break;
+               }
+
+               if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
+                       writeval = writeval - 0x06060606;
+               else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
+                        TXHIGHPWRLEVEL_BT2)
+                       writeval = writeval - 0x0c0c0c0c;
+               *(p_outwriteval + rf) = writeval;
+       }
+}
+
+static void _rtl8723be_write_ofdm_power_reg(struct ieee80211_hw *hw,
+                                        u8 index, u32 *value)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u16 regoffset_a[6] = {
+               RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+               RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+               RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+       };
+       u16 regoffset_b[6] = {
+               RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+               RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+               RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+       };
+       u8 i, rf, pwr_val[4];
+       u32 writeval;
+       u16 regoffset;
+
+       for (rf = 0; rf < 2; rf++) {
+               writeval = value[rf];
+               for (i = 0; i < 4; i++) {
+                       pwr_val[i] = (u8) ((writeval & (0x7f <<
+                                                       (i * 8))) >> (i * 8));
+
+                       if (pwr_val[i] > RF6052_MAX_TX_PWR)
+                               pwr_val[i] = RF6052_MAX_TX_PWR;
+               }
+               writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+                   (pwr_val[1] << 8) | pwr_val[0];
+
+               if (rf == 0)
+                       regoffset = regoffset_a[index];
+               else
+                       regoffset = regoffset_b[index];
+               rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
+
+               RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                        "Set 0x%x = %08x\n", regoffset, writeval);
+       }
+}
+
+void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                          u8 *ppowerlevel_ofdm,
+                                          u8 *ppowerlevel_bw20,
+                                          u8 *ppowerlevel_bw40, u8 channel)
+{
+       u32 writeval[2], powerbase0[2], powerbase1[2];
+       u8 index;
+       u8 direction;
+       u32 pwrtrac_value;
+
+       rtl8723be_phy_get_power_base(hw, ppowerlevel_ofdm, ppowerlevel_bw20,
+                                    ppowerlevel_bw40, channel,
+                                    &powerbase0[0], &powerbase1[0]);
+
+       rtl8723be_dm_txpower_track_adjust(hw, 1, &direction, &pwrtrac_value);
+
+       for (index = 0; index < 6; index++) {
+               txpwr_by_regulatory(hw, channel, index, &powerbase0[0],
+                                   &powerbase1[0], &writeval[0]);
+               if (direction == 1) {
+                       writeval[0] += pwrtrac_value;
+                       writeval[1] += pwrtrac_value;
+               } else if (direction == 2) {
+                       writeval[0] -= pwrtrac_value;
+                       writeval[1] -= pwrtrac_value;
+               }
+               _rtl8723be_write_ofdm_power_reg(hw, index, &writeval[0]);
+       }
+}
+
+bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       if (rtlphy->rf_type == RF_1T1R)
+               rtlphy->num_total_rfpath = 1;
+       else
+               rtlphy->num_total_rfpath = 2;
+
+       return _rtl8723be_phy_rf6052_config_parafile(hw);
+}
+
+static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct bb_reg_def *pphyreg;
+       u32 u4_regvalue = 0;
+       u8 rfpath;
+       bool rtstatus = true;
+
+       for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+               pphyreg = &rtlphy->phyreg_def[rfpath];
+
+               switch (rfpath) {
+               case RF90_PATH_A:
+               case RF90_PATH_C:
+                       u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+                                                   BRFSI_RFENV);
+                       break;
+               case RF90_PATH_B:
+               case RF90_PATH_D:
+                       u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+                                                   BRFSI_RFENV << 16);
+                       break;
+               }
+
+               rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+               udelay(1);
+
+               rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+               udelay(1);
+
+               rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+                             B3WIREADDREAALENGTH, 0x0);
+               udelay(1);
+
+               rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+               udelay(1);
+
+               switch (rfpath) {
+               case RF90_PATH_A:
+                       rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
+                                                     (enum radio_path)rfpath);
+                       break;
+               case RF90_PATH_B:
+                       rtstatus = rtl8723be_phy_config_rf_with_headerfile(hw,
+                                                     (enum radio_path)rfpath);
+                       break;
+               case RF90_PATH_C:
+                       break;
+               case RF90_PATH_D:
+                       break;
+               }
+
+               switch (rfpath) {
+               case RF90_PATH_A:
+               case RF90_PATH_C:
+                       rtl_set_bbreg(hw, pphyreg->rfintfs,
+                                     BRFSI_RFENV, u4_regvalue);
+                       break;
+               case RF90_PATH_B:
+               case RF90_PATH_D:
+                       rtl_set_bbreg(hw, pphyreg->rfintfs,
+                                     BRFSI_RFENV << 16, u4_regvalue);
+                       break;
+               }
+
+               if (!rtstatus) {
+                       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+                                "Radio[%d] Fail!!", rfpath);
+                       return false;
+               }
+       }
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+       return rtstatus;
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/rtlwifi/rtl8723be/rf.h
new file mode 100644 (file)
index 0000000..a6fea10
--- /dev/null
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_RF_H__
+#define __RTL8723BE_RF_H__
+
+#define RF6052_MAX_TX_PWR              0x3F
+#define RF6052_MAX_REG                 0x3F
+
+void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
+                                       u8 bandwidth);
+void rtl8723be_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                         u8 *ppowerlevel);
+void rtl8723be_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                          u8 *ppowerlevel_ofdm,
+                                          u8 *ppowerlevel_bw20,
+                                          u8 *ppowerlevel_bw40,
+                                          u8 channel);
+bool rtl8723be_phy_rf6052_config(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
new file mode 100644 (file)
index 0000000..b4577eb
--- /dev/null
@@ -0,0 +1,384 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../pci.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "../rtl8723com/phy_common.h"
+#include "dm.h"
+#include "hw.h"
+#include "fw.h"
+#include "../rtl8723com/fw_common.h"
+#include "sw.h"
+#include "trx.h"
+#include "led.h"
+#include "table.h"
+#include "../btcoexist/rtl_btc.h"
+
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+
+static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
+{
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+
+       /*close ASPM for AMD defaultly */
+       rtlpci->const_amdpci_aspm = 0;
+
+       /* ASPM PS mode.
+        * 0 - Disable ASPM,
+        * 1 - Enable ASPM without Clock Req,
+        * 2 - Enable ASPM with Clock Req,
+        * 3 - Alwyas Enable ASPM with Clock Req,
+        * 4 - Always Enable ASPM without Clock Req.
+        * set defult to RTL8192CE:3 RTL8192E:2
+        */
+       rtlpci->const_pci_aspm = 3;
+
+       /*Setting for PCI-E device */
+       rtlpci->const_devicepci_aspm_setting = 0x03;
+
+       /*Setting for PCI-E bridge */
+       rtlpci->const_hostpci_aspm_setting = 0x02;
+
+       /* In Hw/Sw Radio Off situation.
+        * 0 - Default,
+        * 1 - From ASPM setting without low Mac Pwr,
+        * 2 - From ASPM setting with low Mac Pwr,
+        * 3 - Bus D3
+        * set default to RTL8192CE:0 RTL8192SE:2
+        */
+       rtlpci->const_hwsw_rfoff_d3 = 0;
+
+       /* This setting works for those device with
+        * backdoor ASPM setting such as EPHY setting.
+        * 0 - Not support ASPM,
+        * 1 - Support ASPM,
+        * 2 - According to chipset.
+        */
+       rtlpci->const_support_pciaspm = 1;
+}
+
+int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+{
+       int err = 0;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+       rtl8723be_bt_reg_init(hw);
+       rtlpci->msi_support = true;
+       rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+
+       rtlpriv->dm.dm_initialgain_enable = 1;
+       rtlpriv->dm.dm_flag = 0;
+       rtlpriv->dm.disable_framebursting = 0;
+       rtlpriv->dm.thermalvalue = 0;
+       rtlpci->transmit_config = CFENDFORM | BIT(15) | BIT(24) | BIT(25);
+
+       mac->ht_enable = true;
+
+       /* compatible 5G band 88ce just 2.4G band & smsp */
+       rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
+       rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
+       rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
+
+       rtlpci->receive_config = (RCR_APPFCS            |
+                                 RCR_APP_MIC           |
+                                 RCR_APP_ICV           |
+                                 RCR_APP_PHYST_RXFF    |
+                                 RCR_HTC_LOC_CTRL      |
+                                 RCR_AMF               |
+                                 RCR_ACF               |
+                                 RCR_ADF               |
+                                 RCR_AICV              |
+                                 RCR_AB                |
+                                 RCR_AM                |
+                                 RCR_APM               |
+                                 0);
+
+       rtlpci->irq_mask[0] = (u32) (IMR_PSTIMEOUT      |
+                                    IMR_HSISR_IND_ON_INT       |
+                                    IMR_C2HCMD         |
+                                    IMR_HIGHDOK        |
+                                    IMR_MGNTDOK        |
+                                    IMR_BKDOK          |
+                                    IMR_BEDOK          |
+                                    IMR_VIDOK          |
+                                    IMR_VODOK          |
+                                    IMR_RDU            |
+                                    IMR_ROK            |
+                                    0);
+
+       rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0);
+
+       /* for debug level */
+       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
+       /* for LPS & IPS */
+       rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+       rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+       rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+       rtlpriv->psc.reg_fwctrl_lps = 3;
+       rtlpriv->psc.reg_max_lps_awakeintvl = 5;
+       /* for ASPM, you can close aspm through
+        * set const_support_pciaspm = 0
+        */
+       rtl8723be_init_aspm_vars(hw);
+
+       if (rtlpriv->psc.reg_fwctrl_lps == 1)
+               rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
+       else if (rtlpriv->psc.reg_fwctrl_lps == 2)
+               rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
+       else if (rtlpriv->psc.reg_fwctrl_lps == 3)
+               rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
+
+       /* for firmware buf */
+       rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
+       if (!rtlpriv->rtlhal.pfirmware) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Can't alloc buffer for fw.\n");
+               return 1;
+       }
+
+       rtlpriv->max_fw_size = 0x8000;
+       pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
+       err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+                                     rtlpriv->io.dev, GFP_KERNEL, hw,
+                                     rtl_fw_cb);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Failed to request firmware!\n");
+               return 1;
+       }
+       return 0;
+}
+
+void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       if (rtlpriv->cfg->ops->get_btc_status())
+               rtlpriv->btcoexist.btc_ops->btc_halt_notify();
+       if (rtlpriv->rtlhal.pfirmware) {
+               vfree(rtlpriv->rtlhal.pfirmware);
+               rtlpriv->rtlhal.pfirmware = NULL;
+       }
+}
+
+/* get bt coexist status */
+bool rtl8723be_get_btc_status(void)
+{
+       return true;
+}
+
+static bool is_fw_header(struct rtl92c_firmware_header *hdr)
+{
+       return (hdr->signature & 0xfff0) == 0x5300;
+}
+
+static struct rtl_hal_ops rtl8723be_hal_ops = {
+       .init_sw_vars = rtl8723be_init_sw_vars,
+       .deinit_sw_vars = rtl8723be_deinit_sw_vars,
+       .read_eeprom_info = rtl8723be_read_eeprom_info,
+       .interrupt_recognized = rtl8723be_interrupt_recognized,
+       .hw_init = rtl8723be_hw_init,
+       .hw_disable = rtl8723be_card_disable,
+       .hw_suspend = rtl8723be_suspend,
+       .hw_resume = rtl8723be_resume,
+       .enable_interrupt = rtl8723be_enable_interrupt,
+       .disable_interrupt = rtl8723be_disable_interrupt,
+       .set_network_type = rtl8723be_set_network_type,
+       .set_chk_bssid = rtl8723be_set_check_bssid,
+       .set_qos = rtl8723be_set_qos,
+       .set_bcn_reg = rtl8723be_set_beacon_related_registers,
+       .set_bcn_intv = rtl8723be_set_beacon_interval,
+       .update_interrupt_mask = rtl8723be_update_interrupt_mask,
+       .get_hw_reg = rtl8723be_get_hw_reg,
+       .set_hw_reg = rtl8723be_set_hw_reg,
+       .update_rate_tbl = rtl8723be_update_hal_rate_tbl,
+       .fill_tx_desc = rtl8723be_tx_fill_desc,
+       .fill_tx_cmddesc = rtl8723be_tx_fill_cmddesc,
+       .query_rx_desc = rtl8723be_rx_query_desc,
+       .set_channel_access = rtl8723be_update_channel_access_setting,
+       .radio_onoff_checking = rtl8723be_gpio_radio_on_off_checking,
+       .set_bw_mode = rtl8723be_phy_set_bw_mode,
+       .switch_channel = rtl8723be_phy_sw_chnl,
+       .dm_watchdog = rtl8723be_dm_watchdog,
+       .scan_operation_backup = rtl8723be_phy_scan_operation_backup,
+       .set_rf_power_state = rtl8723be_phy_set_rf_power_state,
+       .led_control = rtl8723be_led_control,
+       .set_desc = rtl8723be_set_desc,
+       .get_desc = rtl8723be_get_desc,
+       .is_tx_desc_closed = rtl8723be_is_tx_desc_closed,
+       .tx_polling = rtl8723be_tx_polling,
+       .enable_hw_sec = rtl8723be_enable_hw_security_config,
+       .set_key = rtl8723be_set_key,
+       .init_sw_leds = rtl8723be_init_sw_leds,
+       .get_bbreg = rtl8723_phy_query_bb_reg,
+       .set_bbreg = rtl8723_phy_set_bb_reg,
+       .get_rfreg = rtl8723be_phy_query_rf_reg,
+       .set_rfreg = rtl8723be_phy_set_rf_reg,
+       .fill_h2c_cmd = rtl8723be_fill_h2c_cmd,
+       .get_btc_status = rtl8723be_get_btc_status,
+       .is_fw_header = is_fw_header,
+};
+
+static struct rtl_mod_params rtl8723be_mod_params = {
+       .sw_crypto = false,
+       .inactiveps = true,
+       .swctrl_lps = false,
+       .fwctrl_lps = true,
+       .debug = DBG_EMERG,
+};
+
+static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+       .bar_id = 2,
+       .write_readback = true,
+       .name = "rtl8723be_pci",
+       .fw_name = "rtlwifi/rtl8723befw.bin",
+       .ops = &rtl8723be_hal_ops,
+       .mod_params = &rtl8723be_mod_params,
+       .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+       .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+       .maps[SYS_CLK] = REG_SYS_CLKR,
+       .maps[MAC_RCR_AM] = AM,
+       .maps[MAC_RCR_AB] = AB,
+       .maps[MAC_RCR_ACRC32] = ACRC32,
+       .maps[MAC_RCR_ACF] = ACF,
+       .maps[MAC_RCR_AAP] = AAP,
+
+       .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+
+       .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+       .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+       .maps[EFUSE_CLK] = 0,
+       .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+       .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+       .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+       .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
+       .maps[EFUSE_ANA8M] = ANA8M,
+       .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+       .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+       .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+       .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
+
+       .maps[RWCAM] = REG_CAMCMD,
+       .maps[WCAMI] = REG_CAMWRITE,
+       .maps[RCAMO] = REG_CAMREAD,
+       .maps[CAMDBG] = REG_CAMDBG,
+       .maps[SECR] = REG_SECCFG,
+       .maps[SEC_CAM_NONE] = CAM_NONE,
+       .maps[SEC_CAM_WEP40] = CAM_WEP40,
+       .maps[SEC_CAM_TKIP] = CAM_TKIP,
+       .maps[SEC_CAM_AES] = CAM_AES,
+       .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+       .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+       .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+       .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+       .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+       .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+       .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+       .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+       .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+       .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+       .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+       .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+       .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+       .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+
+       .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+       .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+       .maps[RTL_IMR_BCNINT] = IMR_BCNDMAINT0,
+       .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+       .maps[RTL_IMR_RDU] = IMR_RDU,
+       .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+       .maps[RTL_IMR_BDOK] = IMR_BCNDOK0,
+       .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+       .maps[RTL_IMR_TBDER] = IMR_TBDER,
+       .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+       .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+       .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+       .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+       .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+       .maps[RTL_IMR_VODOK] = IMR_VODOK,
+       .maps[RTL_IMR_ROK] = IMR_ROK,
+       .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNDMAINT0 | IMR_TBDOK | IMR_TBDER),
+
+       .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
+       .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
+       .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
+       .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
+       .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
+       .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
+       .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
+       .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
+       .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
+       .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
+       .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
+       .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
+
+       .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
+       .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(rtl8723be_pci_id) = {
+       {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xb723, rtl8723be_hal_cfg)},
+       {},
+};
+
+MODULE_DEVICE_TABLE(pci, rtl8723be_pci_id);
+
+MODULE_AUTHOR("PageHe  <page_he@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
+
+module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
+module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
+module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
+module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
+MODULE_PARM_DESC(ips, "using no link power save (default 1 is open)\n");
+MODULE_PARM_DESC(fwlps, "using linked fw control power save (default 1 is open)\n");
+MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+
+static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+
+static struct pci_driver rtl8723be_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = rtl8723be_pci_id,
+       .probe = rtl_pci_probe,
+       .remove = rtl_pci_disconnect,
+
+       .driver.pm = &rtlwifi_pm_ops,
+};
+
+module_pci_driver(rtl8723be_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/rtlwifi/rtl8723be/sw.h
new file mode 100644 (file)
index 0000000..a7b25e7
--- /dev/null
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_SW_H__
+#define __RTL8723BE_SW_H__
+
+int rtl8723be_init_sw_vars(struct ieee80211_hw *hw);
+void rtl8723be_deinit_sw_vars(struct ieee80211_hw *hw);
+void rtl8723be_init_var_map(struct ieee80211_hw *hw);
+bool rtl8723be_get_btc_status(void);
+
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/rtlwifi/rtl8723be/table.c
new file mode 100644 (file)
index 0000000..4b283cd
--- /dev/null
@@ -0,0 +1,572 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on  2010/ 5/18,  1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "table.h"
+u32 RTL8723BEPHY_REG_1TARRAY[] = {
+       0x800, 0x80040000,
+       0x804, 0x00000003,
+       0x808, 0x0000FC00,
+       0x80C, 0x0000000A,
+       0x810, 0x10001331,
+       0x814, 0x020C3D10,
+       0x818, 0x02200385,
+       0x81C, 0x00000000,
+       0x820, 0x01000100,
+       0x824, 0x00390204,
+       0x828, 0x00000000,
+       0x82C, 0x00000000,
+       0x830, 0x00000000,
+       0x834, 0x00000000,
+       0x838, 0x00000000,
+       0x83C, 0x00000000,
+       0x840, 0x00010000,
+       0x844, 0x00000000,
+       0x848, 0x00000000,
+       0x84C, 0x00000000,
+       0x850, 0x00000000,
+       0x854, 0x00000000,
+       0x858, 0x569A11A9,
+       0x85C, 0x01000014,
+       0x860, 0x66F60110,
+       0x864, 0x061F0649,
+       0x868, 0x00000000,
+       0x86C, 0x27272700,
+       0x870, 0x07000760,
+       0x874, 0x25004000,
+       0x878, 0x00000808,
+       0x87C, 0x00000000,
+       0x880, 0xB0000C1C,
+       0x884, 0x00000001,
+       0x888, 0x00000000,
+       0x88C, 0xCCC000C0,
+       0x890, 0x00000800,
+       0x894, 0xFFFFFFFE,
+       0x898, 0x40302010,
+       0x89C, 0x00706050,
+       0x900, 0x00000000,
+       0x904, 0x00000023,
+       0x908, 0x00000000,
+       0x90C, 0x81121111,
+       0x910, 0x00000002,
+       0x914, 0x00000201,
+       0x948, 0x00000000,
+       0xA00, 0x00D047C8,
+       0xA04, 0x80FF000C,
+       0xA08, 0x8C838300,
+       0xA0C, 0x2E7F120F,
+       0xA10, 0x9500BB78,
+       0xA14, 0x1114D028,
+       0xA18, 0x00881117,
+       0xA1C, 0x89140F00,
+       0xA20, 0x1A1B0000,
+       0xA24, 0x090E1317,
+       0xA28, 0x00000204,
+       0xA2C, 0x00D30000,
+       0xA70, 0x101FBF00,
+       0xA74, 0x00000007,
+       0xA78, 0x00000900,
+       0xA7C, 0x225B0606,
+       0xA80, 0x21806490,
+       0xB2C, 0x00000000,
+       0xC00, 0x48071D40,
+       0xC04, 0x03A05611,
+       0xC08, 0x000000E4,
+       0xC0C, 0x6C6C6C6C,
+       0xC10, 0x08800000,
+       0xC14, 0x40000100,
+       0xC18, 0x08800000,
+       0xC1C, 0x40000100,
+       0xC20, 0x00000000,
+       0xC24, 0x00000000,
+       0xC28, 0x00000000,
+       0xC2C, 0x00000000,
+       0xC30, 0x69E9AC44,
+       0xC34, 0x469652AF,
+       0xC38, 0x49795994,
+       0xC3C, 0x0A97971C,
+       0xC40, 0x1F7C403F,
+       0xC44, 0x000100B7,
+       0xC48, 0xEC020107,
+       0xC4C, 0x007F037F,
+       0xC50, 0x69553420,
+       0xC54, 0x43BC0094,
+       0xC58, 0x00023169,
+       0xC5C, 0x00250492,
+       0xC60, 0x00000000,
+       0xC64, 0x7112848B,
+       0xC68, 0x47C00BFF,
+       0xC6C, 0x00000036,
+       0xC70, 0x2C7F000D,
+       0xC74, 0x020610DB,
+       0xC78, 0x0000001F,
+       0xC7C, 0x00B91612,
+       0xC80, 0x390000E4,
+       0xC84, 0x20F60000,
+       0xC88, 0x40000100,
+       0xC8C, 0x20200000,
+       0xC90, 0x00020E1A,
+       0xC94, 0x00000000,
+       0xC98, 0x00020E1A,
+       0xC9C, 0x00007F7F,
+       0xCA0, 0x00000000,
+       0xCA4, 0x000300A0,
+       0xCA8, 0x00000000,
+       0xCAC, 0x00000000,
+       0xCB0, 0x00000000,
+       0xCB4, 0x00000000,
+       0xCB8, 0x00000000,
+       0xCBC, 0x28000000,
+       0xCC0, 0x00000000,
+       0xCC4, 0x00000000,
+       0xCC8, 0x00000000,
+       0xCCC, 0x00000000,
+       0xCD0, 0x00000000,
+       0xCD4, 0x00000000,
+       0xCD8, 0x64B22427,
+       0xCDC, 0x00766932,
+       0xCE0, 0x00222222,
+       0xCE4, 0x00000000,
+       0xCE8, 0x37644302,
+       0xCEC, 0x2F97D40C,
+       0xD00, 0x00000740,
+       0xD04, 0x40020401,
+       0xD08, 0x0000907F,
+       0xD0C, 0x20010201,
+       0xD10, 0xA0633333,
+       0xD14, 0x3333BC53,
+       0xD18, 0x7A8F5B6F,
+       0xD2C, 0xCC979975,
+       0xD30, 0x00000000,
+       0xD34, 0x80608000,
+       0xD38, 0x00000000,
+       0xD3C, 0x00127353,
+       0xD40, 0x00000000,
+       0xD44, 0x00000000,
+       0xD48, 0x00000000,
+       0xD4C, 0x00000000,
+       0xD50, 0x6437140A,
+       0xD54, 0x00000000,
+       0xD58, 0x00000282,
+       0xD5C, 0x30032064,
+       0xD60, 0x4653DE68,
+       0xD64, 0x04518A3C,
+       0xD68, 0x00002101,
+       0xD6C, 0x2A201C16,
+       0xD70, 0x1812362E,
+       0xD74, 0x322C2220,
+       0xD78, 0x000E3C24,
+       0xE00, 0x2D2D2D2D,
+       0xE04, 0x2D2D2D2D,
+       0xE08, 0x0390272D,
+       0xE10, 0x2D2D2D2D,
+       0xE14, 0x2D2D2D2D,
+       0xE18, 0x2D2D2D2D,
+       0xE1C, 0x2D2D2D2D,
+       0xE28, 0x00000000,
+       0xE30, 0x1000DC1F,
+       0xE34, 0x10008C1F,
+       0xE38, 0x02140102,
+       0xE3C, 0x681604C2,
+       0xE40, 0x01007C00,
+       0xE44, 0x01004800,
+       0xE48, 0xFB000000,
+       0xE4C, 0x000028D1,
+       0xE50, 0x1000DC1F,
+       0xE54, 0x10008C1F,
+       0xE58, 0x02140102,
+       0xE5C, 0x28160D05,
+       0xE60, 0x00000008,
+       0xE68, 0x001B2556,
+       0xE6C, 0x00C00096,
+       0xE70, 0x00C00096,
+       0xE74, 0x01000056,
+       0xE78, 0x01000014,
+       0xE7C, 0x01000056,
+       0xE80, 0x01000014,
+       0xE84, 0x00C00096,
+       0xE88, 0x01000056,
+       0xE8C, 0x00C00096,
+       0xED0, 0x00C00096,
+       0xED4, 0x00C00096,
+       0xED8, 0x00C00096,
+       0xEDC, 0x000000D6,
+       0xEE0, 0x000000D6,
+       0xEEC, 0x01C00016,
+       0xF14, 0x00000003,
+       0xF4C, 0x00000000,
+       0xF00, 0x00000300,
+       0x820, 0x01000100,
+       0x800, 0x83040000,
+};
+
+u32 RTL8723BEPHY_REG_ARRAY_PG[] = {
+       0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000,
+       0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800,
+       0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646,
+       0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840,
+       0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244,
+       0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436
+};
+
+u32 RTL8723BE_RADIOA_1TARRAY[] = {
+       0x000, 0x00010000,
+       0x0B0, 0x000DFFE0,
+       0x0FE, 0x00000000,
+       0x0FE, 0x00000000,
+       0x0FE, 0x00000000,
+       0x0B1, 0x00000018,
+       0x0FE, 0x00000000,
+       0x0FE, 0x00000000,
+       0x0FE, 0x00000000,
+       0x0B2, 0x00084C00,
+       0x0B5, 0x0000D2CC,
+       0x0B6, 0x000925AA,
+       0x0B7, 0x00000010,
+       0x0B8, 0x0000907F,
+       0x05C, 0x00000002,
+       0x07C, 0x00000002,
+       0x07E, 0x00000005,
+       0x08B, 0x0006FC00,
+       0x0B0, 0x000FF9F0,
+       0x01C, 0x000739D2,
+       0x01E, 0x00000000,
+       0x0DF, 0x00000780,
+       0x050, 0x00067435,
+       0x051, 0x0006B04E,
+       0x052, 0x000007D2,
+       0x053, 0x00000000,
+       0x054, 0x00050400,
+       0x055, 0x0004026E,
+       0x0DD, 0x0000004C,
+       0x070, 0x00067435,
+       0x071, 0x0006B04E,
+       0x072, 0x000007D2,
+       0x073, 0x00000000,
+       0x074, 0x00050400,
+       0x075, 0x0004026E,
+       0x0EF, 0x00000100,
+       0x034, 0x0000ADD7,
+       0x035, 0x00005C00,
+       0x034, 0x00009DD4,
+       0x035, 0x00005000,
+       0x034, 0x00008DD1,
+       0x035, 0x00004400,
+       0x034, 0x00007DCE,
+       0x035, 0x00003800,
+       0x034, 0x00006CD1,
+       0x035, 0x00004400,
+       0x034, 0x00005CCE,
+       0x035, 0x00003800,
+       0x034, 0x000048CE,
+       0x035, 0x00004400,
+       0x034, 0x000034CE,
+       0x035, 0x00003800,
+       0x034, 0x00002451,
+       0x035, 0x00004400,
+       0x034, 0x0000144E,
+       0x035, 0x00003800,
+       0x034, 0x00000051,
+       0x035, 0x00004400,
+       0x0EF, 0x00000000,
+       0x0EF, 0x00000100,
+       0x0ED, 0x00000010,
+       0x044, 0x0000ADD7,
+       0x044, 0x00009DD4,
+       0x044, 0x00008DD1,
+       0x044, 0x00007DCE,
+       0x044, 0x00006CC1,
+       0x044, 0x00005CCE,
+       0x044, 0x000044D1,
+       0x044, 0x000034CE,
+       0x044, 0x00002451,
+       0x044, 0x0000144E,
+       0x044, 0x00000051,
+       0x0EF, 0x00000000,
+       0x0ED, 0x00000000,
+       0x0EF, 0x00002000,
+       0x03B, 0x000380EF,
+       0x03B, 0x000302FE,
+       0x03B, 0x00028CE6,
+       0x03B, 0x000200BC,
+       0x03B, 0x000188A5,
+       0x03B, 0x00010FBC,
+       0x03B, 0x00008F71,
+       0x03B, 0x00000900,
+       0x0EF, 0x00000000,
+       0x0ED, 0x00000001,
+       0x040, 0x000380EF,
+       0x040, 0x000302FE,
+       0x040, 0x00028CE6,
+       0x040, 0x000200BC,
+       0x040, 0x000188A5,
+       0x040, 0x00010FBC,
+       0x040, 0x00008F71,
+       0x040, 0x00000900,
+       0x0ED, 0x00000000,
+       0x082, 0x00080000,
+       0x083, 0x00008000,
+       0x084, 0x00048D80,
+       0x085, 0x00068000,
+       0x0A2, 0x00080000,
+       0x0A3, 0x00008000,
+       0x0A4, 0x00048D80,
+       0x0A5, 0x00068000,
+       0x000, 0x00033D80,
+};
+
+u32 RTL8723BEMAC_1T_ARRAY[] = {
+       0x02F, 0x00000030,
+       0x035, 0x00000000,
+       0x428, 0x0000000A,
+       0x429, 0x00000010,
+       0x430, 0x00000000,
+       0x431, 0x00000000,
+       0x432, 0x00000000,
+       0x433, 0x00000001,
+       0x434, 0x00000004,
+       0x435, 0x00000005,
+       0x436, 0x00000007,
+       0x437, 0x00000008,
+       0x43C, 0x00000004,
+       0x43D, 0x00000005,
+       0x43E, 0x00000007,
+       0x43F, 0x00000008,
+       0x440, 0x0000005D,
+       0x441, 0x00000001,
+       0x442, 0x00000000,
+       0x444, 0x00000010,
+       0x445, 0x00000000,
+       0x446, 0x00000000,
+       0x447, 0x00000000,
+       0x448, 0x00000000,
+       0x449, 0x000000F0,
+       0x44A, 0x0000000F,
+       0x44B, 0x0000003E,
+       0x44C, 0x00000010,
+       0x44D, 0x00000000,
+       0x44E, 0x00000000,
+       0x44F, 0x00000000,
+       0x450, 0x00000000,
+       0x451, 0x000000F0,
+       0x452, 0x0000000F,
+       0x453, 0x00000000,
+       0x456, 0x0000005E,
+       0x460, 0x00000066,
+       0x461, 0x00000066,
+       0x4C8, 0x000000FF,
+       0x4C9, 0x00000008,
+       0x4CC, 0x000000FF,
+       0x4CD, 0x000000FF,
+       0x4CE, 0x00000001,
+       0x500, 0x00000026,
+       0x501, 0x000000A2,
+       0x502, 0x0000002F,
+       0x503, 0x00000000,
+       0x504, 0x00000028,
+       0x505, 0x000000A3,
+       0x506, 0x0000005E,
+       0x507, 0x00000000,
+       0x508, 0x0000002B,
+       0x509, 0x000000A4,
+       0x50A, 0x0000005E,
+       0x50B, 0x00000000,
+       0x50C, 0x0000004F,
+       0x50D, 0x000000A4,
+       0x50E, 0x00000000,
+       0x50F, 0x00000000,
+       0x512, 0x0000001C,
+       0x514, 0x0000000A,
+       0x516, 0x0000000A,
+       0x525, 0x0000004F,
+       0x550, 0x00000010,
+       0x551, 0x00000010,
+       0x559, 0x00000002,
+       0x55C, 0x00000050,
+       0x55D, 0x000000FF,
+       0x605, 0x00000030,
+       0x608, 0x0000000E,
+       0x609, 0x0000002A,
+       0x620, 0x000000FF,
+       0x621, 0x000000FF,
+       0x622, 0x000000FF,
+       0x623, 0x000000FF,
+       0x624, 0x000000FF,
+       0x625, 0x000000FF,
+       0x626, 0x000000FF,
+       0x627, 0x000000FF,
+       0x638, 0x00000050,
+       0x63C, 0x0000000A,
+       0x63D, 0x0000000A,
+       0x63E, 0x0000000E,
+       0x63F, 0x0000000E,
+       0x640, 0x00000040,
+       0x642, 0x00000040,
+       0x643, 0x00000000,
+       0x652, 0x000000C8,
+       0x66E, 0x00000005,
+       0x700, 0x00000021,
+       0x701, 0x00000043,
+       0x702, 0x00000065,
+       0x703, 0x00000087,
+       0x708, 0x00000021,
+       0x709, 0x00000043,
+       0x70A, 0x00000065,
+       0x70B, 0x00000087,
+};
+
+u32 RTL8723BEAGCTAB_1TARRAY[] = {
+       0xC78, 0xFD000001,
+       0xC78, 0xFC010001,
+       0xC78, 0xFB020001,
+       0xC78, 0xFA030001,
+       0xC78, 0xF9040001,
+       0xC78, 0xF8050001,
+       0xC78, 0xF7060001,
+       0xC78, 0xF6070001,
+       0xC78, 0xF5080001,
+       0xC78, 0xF4090001,
+       0xC78, 0xF30A0001,
+       0xC78, 0xF20B0001,
+       0xC78, 0xF10C0001,
+       0xC78, 0xF00D0001,
+       0xC78, 0xEF0E0001,
+       0xC78, 0xEE0F0001,
+       0xC78, 0xED100001,
+       0xC78, 0xEC110001,
+       0xC78, 0xEB120001,
+       0xC78, 0xEA130001,
+       0xC78, 0xE9140001,
+       0xC78, 0xE8150001,
+       0xC78, 0xE7160001,
+       0xC78, 0xAA170001,
+       0xC78, 0xA9180001,
+       0xC78, 0xA8190001,
+       0xC78, 0xA71A0001,
+       0xC78, 0xA61B0001,
+       0xC78, 0xA51C0001,
+       0xC78, 0xA41D0001,
+       0xC78, 0xA31E0001,
+       0xC78, 0x671F0001,
+       0xC78, 0x66200001,
+       0xC78, 0x65210001,
+       0xC78, 0x64220001,
+       0xC78, 0x63230001,
+       0xC78, 0x62240001,
+       0xC78, 0x61250001,
+       0xC78, 0x47260001,
+       0xC78, 0x46270001,
+       0xC78, 0x45280001,
+       0xC78, 0x44290001,
+       0xC78, 0x432A0001,
+       0xC78, 0x422B0001,
+       0xC78, 0x292C0001,
+       0xC78, 0x282D0001,
+       0xC78, 0x272E0001,
+       0xC78, 0x262F0001,
+       0xC78, 0x25300001,
+       0xC78, 0x24310001,
+       0xC78, 0x09320001,
+       0xC78, 0x08330001,
+       0xC78, 0x07340001,
+       0xC78, 0x06350001,
+       0xC78, 0x05360001,
+       0xC78, 0x04370001,
+       0xC78, 0x03380001,
+       0xC78, 0x02390001,
+       0xC78, 0x013A0001,
+       0xC78, 0x003B0001,
+       0xC78, 0x003C0001,
+       0xC78, 0x003D0001,
+       0xC78, 0x003E0001,
+       0xC78, 0x003F0001,
+       0xC78, 0xFC400001,
+       0xC78, 0xFB410001,
+       0xC78, 0xFA420001,
+       0xC78, 0xF9430001,
+       0xC78, 0xF8440001,
+       0xC78, 0xF7450001,
+       0xC78, 0xF6460001,
+       0xC78, 0xF5470001,
+       0xC78, 0xF4480001,
+       0xC78, 0xF3490001,
+       0xC78, 0xF24A0001,
+       0xC78, 0xF14B0001,
+       0xC78, 0xF04C0001,
+       0xC78, 0xEF4D0001,
+       0xC78, 0xEE4E0001,
+       0xC78, 0xED4F0001,
+       0xC78, 0xEC500001,
+       0xC78, 0xEB510001,
+       0xC78, 0xEA520001,
+       0xC78, 0xE9530001,
+       0xC78, 0xE8540001,
+       0xC78, 0xE7550001,
+       0xC78, 0xE6560001,
+       0xC78, 0xE5570001,
+       0xC78, 0xAA580001,
+       0xC78, 0xA9590001,
+       0xC78, 0xA85A0001,
+       0xC78, 0xA75B0001,
+       0xC78, 0xA65C0001,
+       0xC78, 0xA55D0001,
+       0xC78, 0xA45E0001,
+       0xC78, 0x675F0001,
+       0xC78, 0x66600001,
+       0xC78, 0x65610001,
+       0xC78, 0x64620001,
+       0xC78, 0x63630001,
+       0xC78, 0x62640001,
+       0xC78, 0x61650001,
+       0xC78, 0x47660001,
+       0xC78, 0x46670001,
+       0xC78, 0x45680001,
+       0xC78, 0x44690001,
+       0xC78, 0x436A0001,
+       0xC78, 0x426B0001,
+       0xC78, 0x296C0001,
+       0xC78, 0x286D0001,
+       0xC78, 0x276E0001,
+       0xC78, 0x266F0001,
+       0xC78, 0x25700001,
+       0xC78, 0x24710001,
+       0xC78, 0x09720001,
+       0xC78, 0x08730001,
+       0xC78, 0x07740001,
+       0xC78, 0x06750001,
+       0xC78, 0x05760001,
+       0xC78, 0x04770001,
+       0xC78, 0x03780001,
+       0xC78, 0x02790001,
+       0xC78, 0x017A0001,
+       0xC78, 0x007B0001,
+       0xC78, 0x007C0001,
+       0xC78, 0x007D0001,
+       0xC78, 0x007E0001,
+       0xC78, 0x007F0001,
+       0xC50, 0x69553422,
+       0xC50, 0x69553420,
+};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/rtlwifi/rtl8723be/table.h
new file mode 100644 (file)
index 0000000..932760a
--- /dev/null
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Created on  2010/ 5/18,  1:41
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_TABLE__H_
+#define __RTL8723BE_TABLE__H_
+
+#include <linux/types.h>
+#define  RTL8723BEPHY_REG_1TARRAYLEN   388
+extern u32 RTL8723BEPHY_REG_1TARRAY[];
+#define RTL8723BEPHY_REG_ARRAY_PGLEN   36
+extern u32 RTL8723BEPHY_REG_ARRAY_PG[];
+#define        RTL8723BE_RADIOA_1TARRAYLEN     206
+extern u32 RTL8723BE_RADIOA_1TARRAY[];
+#define RTL8723BEMAC_1T_ARRAYLEN       194
+extern u32 RTL8723BEMAC_1T_ARRAY[];
+#define RTL8723BEAGCTAB_1TARRAYLEN     260
+extern u32 RTL8723BEAGCTAB_1TARRAY[];
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/rtlwifi/rtl8723be/trx.c
new file mode 100644 (file)
index 0000000..e0a0d8c
--- /dev/null
@@ -0,0 +1,960 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../stats.h"
+#include "reg.h"
+#include "def.h"
+#include "phy.h"
+#include "trx.h"
+#include "led.h"
+#include "dm.h"
+#include "phy.h"
+
+static u8 _rtl8723be_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
+{
+       __le16 fc = rtl_get_fc(skb);
+
+       if (unlikely(ieee80211_is_beacon(fc)))
+               return QSLT_BEACON;
+       if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
+               return QSLT_MGNT;
+
+       return skb->priority;
+}
+
+/* mac80211's rate_idx is like this:
+ *
+ * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ *
+ * B/G rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92C_RATE1M-->DESC92C_RATE54M ==> idx is 0-->11,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
+ *
+ * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * A rate:
+ * (rx_status->flag & RX_FLAG_HT) = 0,
+ * DESC92C_RATE6M-->DESC92C_RATE54M ==> idx is 0-->7,
+ *
+ * N rate:
+ * (rx_status->flag & RX_FLAG_HT) = 1,
+ * DESC92C_RATEMCS0-->DESC92C_RATEMCS15 ==> idx is 0-->15
+ */
+static int _rtl8723be_rate_mapping(struct ieee80211_hw *hw,
+                                  bool isht, u8 desc_rate)
+{
+       int rate_idx;
+
+       if (!isht) {
+               if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+                       switch (desc_rate) {
+                       case DESC92C_RATE1M:
+                               rate_idx = 0;
+                               break;
+                       case DESC92C_RATE2M:
+                               rate_idx = 1;
+                               break;
+                       case DESC92C_RATE5_5M:
+                               rate_idx = 2;
+                               break;
+                       case DESC92C_RATE11M:
+                               rate_idx = 3;
+                               break;
+                       case DESC92C_RATE6M:
+                               rate_idx = 4;
+                               break;
+                       case DESC92C_RATE9M:
+                               rate_idx = 5;
+                               break;
+                       case DESC92C_RATE12M:
+                               rate_idx = 6;
+                               break;
+                       case DESC92C_RATE18M:
+                               rate_idx = 7;
+                               break;
+                       case DESC92C_RATE24M:
+                               rate_idx = 8;
+                               break;
+                       case DESC92C_RATE36M:
+                               rate_idx = 9;
+                               break;
+                       case DESC92C_RATE48M:
+                               rate_idx = 10;
+                               break;
+                       case DESC92C_RATE54M:
+                               rate_idx = 11;
+                               break;
+                       default:
+                               rate_idx = 0;
+                               break;
+                       }
+               } else {
+                       switch (desc_rate) {
+                       case DESC92C_RATE6M:
+                               rate_idx = 0;
+                               break;
+                       case DESC92C_RATE9M:
+                               rate_idx = 1;
+                               break;
+                       case DESC92C_RATE12M:
+                               rate_idx = 2;
+                               break;
+                       case DESC92C_RATE18M:
+                               rate_idx = 3;
+                               break;
+                       case DESC92C_RATE24M:
+                               rate_idx = 4;
+                               break;
+                       case DESC92C_RATE36M:
+                               rate_idx = 5;
+                               break;
+                       case DESC92C_RATE48M:
+                               rate_idx = 6;
+                               break;
+                       case DESC92C_RATE54M:
+                               rate_idx = 7;
+                               break;
+                       default:
+                               rate_idx = 0;
+                               break;
+                       }
+               }
+       } else {
+               switch (desc_rate) {
+               case DESC92C_RATEMCS0:
+                       rate_idx = 0;
+                       break;
+               case DESC92C_RATEMCS1:
+                       rate_idx = 1;
+                       break;
+               case DESC92C_RATEMCS2:
+                       rate_idx = 2;
+                       break;
+               case DESC92C_RATEMCS3:
+                       rate_idx = 3;
+                       break;
+               case DESC92C_RATEMCS4:
+                       rate_idx = 4;
+                       break;
+               case DESC92C_RATEMCS5:
+                       rate_idx = 5;
+                       break;
+               case DESC92C_RATEMCS6:
+                       rate_idx = 6;
+                       break;
+               case DESC92C_RATEMCS7:
+                       rate_idx = 7;
+                       break;
+               case DESC92C_RATEMCS8:
+                       rate_idx = 8;
+                       break;
+               case DESC92C_RATEMCS9:
+                       rate_idx = 9;
+                       break;
+               case DESC92C_RATEMCS10:
+                       rate_idx = 10;
+                       break;
+               case DESC92C_RATEMCS11:
+                       rate_idx = 11;
+                       break;
+               case DESC92C_RATEMCS12:
+                       rate_idx = 12;
+                       break;
+               case DESC92C_RATEMCS13:
+                       rate_idx = 13;
+                       break;
+               case DESC92C_RATEMCS14:
+                       rate_idx = 14;
+                       break;
+               case DESC92C_RATEMCS15:
+                       rate_idx = 15;
+                       break;
+               default:
+                       rate_idx = 0;
+                       break;
+               }
+       }
+       return rate_idx;
+}
+
+static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw,
+                                        struct rtl_stats *pstatus, u8 *pdesc,
+                                        struct rx_fwinfo_8723be *p_drvinfo,
+                                        bool packet_match_bssid,
+                                        bool packet_toself,
+                                        bool packet_beacon)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+       struct phy_sts_cck_8723e_t *cck_buf;
+       struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo;
+       struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
+       char rx_pwr_all = 0, rx_pwr[4];
+       u8 rf_rx_num = 0, evm, pwdb_all;
+       u8 i, max_spatial_stream;
+       u32 rssi, total_rssi = 0;
+       bool is_cck = pstatus->is_cck;
+       u8 lan_idx, vga_idx;
+
+       /* Record it for next packet processing */
+       pstatus->packet_matchbssid = packet_match_bssid;
+       pstatus->packet_toself = packet_toself;
+       pstatus->packet_beacon = packet_beacon;
+       pstatus->rx_mimo_sig_qual[0] = -1;
+       pstatus->rx_mimo_sig_qual[1] = -1;
+
+       if (is_cck) {
+               u8 cck_highpwr;
+               u8 cck_agc_rpt;
+               /* CCK Driver info Structure is not the same as OFDM packet. */
+               cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
+               cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+               /* (1)Hardware does not provide RSSI for CCK
+                * (2)PWDB, Average PWDB cacluated by
+                * hardware (for rate adaptive)
+                */
+               if (ppsc->rfpwr_state == ERFON)
+                       cck_highpwr = (u8) rtl_get_bbreg(hw,
+                                                      RFPGA0_XA_HSSIPARAMETER2,
+                                                      BIT(9));
+               else
+                       cck_highpwr = false;
+
+               lan_idx = ((cck_agc_rpt & 0xE0) >> 5);
+               vga_idx = (cck_agc_rpt & 0x1f);
+               switch (lan_idx) {
+               case 7:
+                       if (vga_idx <= 27)/*VGA_idx = 27~2*/
+                               rx_pwr_all = -100 + 2 * (27 - vga_idx);
+                       else
+                               rx_pwr_all = -100;
+                       break;
+               case 6:/*VGA_idx = 2~0*/
+                       rx_pwr_all = -48 + 2 * (2 - vga_idx);
+                       break;
+               case 5:/*VGA_idx = 7~5*/
+                       rx_pwr_all = -42 + 2 * (7 - vga_idx);
+                       break;
+               case 4:/*VGA_idx = 7~4*/
+                       rx_pwr_all = -36 + 2 * (7 - vga_idx);
+                       break;
+               case 3:/*VGA_idx = 7~0*/
+                       rx_pwr_all = -24 + 2 * (7 - vga_idx);
+                       break;
+               case 2:
+                       if (cck_highpwr)/*VGA_idx = 5~0*/
+                               rx_pwr_all = -12 + 2 * (5 - vga_idx);
+                       else
+                               rx_pwr_all = -6 + 2 * (5 - vga_idx);
+                       break;
+               case 1:
+                       rx_pwr_all = 8 - 2 * vga_idx;
+                       break;
+               case 0:
+                       rx_pwr_all = 14 - 2 * vga_idx;
+                       break;
+               default:
+                       break;
+               }
+               rx_pwr_all += 6;
+               pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+               /* CCK gain is smaller than OFDM/MCS gain,  */
+               /* so we add gain diff by experiences,
+                * the val is 6
+                */
+               pwdb_all += 6;
+               if (pwdb_all > 100)
+                       pwdb_all = 100;
+               /* modify the offset to make the same gain index with OFDM. */
+               if (pwdb_all > 34 && pwdb_all <= 42)
+                       pwdb_all -= 2;
+               else if (pwdb_all > 26 && pwdb_all <= 34)
+                       pwdb_all -= 6;
+               else if (pwdb_all > 14 && pwdb_all <= 26)
+                       pwdb_all -= 8;
+               else if (pwdb_all > 4 && pwdb_all <= 14)
+                       pwdb_all -= 4;
+               if (!cck_highpwr) {
+                       if (pwdb_all >= 80)
+                               pwdb_all = ((pwdb_all - 80) << 1) +
+                                          ((pwdb_all - 80) >> 1) + 80;
+                       else if ((pwdb_all <= 78) && (pwdb_all >= 20))
+                               pwdb_all += 3;
+                       if (pwdb_all > 100)
+                               pwdb_all = 100;
+               }
+
+               pstatus->rx_pwdb_all = pwdb_all;
+               pstatus->recvsignalpower = rx_pwr_all;
+
+               /* (3) Get Signal Quality (EVM) */
+               if (packet_match_bssid) {
+                       u8 sq;
+
+                       if (pstatus->rx_pwdb_all > 40) {
+                               sq = 100;
+                       } else {
+                               sq = cck_buf->sq_rpt;
+                               if (sq > 64)
+                                       sq = 0;
+                               else if (sq < 20)
+                                       sq = 100;
+                               else
+                                       sq = ((64 - sq) * 100) / 44;
+                       }
+
+                       pstatus->signalquality = sq;
+                       pstatus->rx_mimo_sig_qual[0] = sq;
+                       pstatus->rx_mimo_sig_qual[1] = -1;
+               }
+       } else {
+               rtlpriv->dm.rfpath_rxenable[0] = true;
+               rtlpriv->dm.rfpath_rxenable[1] = true;
+
+               /* (1)Get RSSI for HT rate */
+               for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+                       /* we will judge RF RX path now. */
+                       if (rtlpriv->dm.rfpath_rxenable[i])
+                               rf_rx_num++;
+
+                       rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110;
+
+                       /* Translate DBM to percentage. */
+                       rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+                       total_rssi += rssi;
+
+                       /* Get Rx snr value in DB */
+                       rtlpriv->stats.rx_snr_db[i] =
+                                       (long)(p_drvinfo->rxsnr[i] / 2);
+
+                       /* Record Signal Strength for next packet */
+                       if (packet_match_bssid)
+                               pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
+               }
+
+               /* (2)PWDB, Avg cacluated by hardware (for rate adaptive) */
+               rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
+
+               pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+               pstatus->rx_pwdb_all = pwdb_all;
+               pstatus->rxpower = rx_pwr_all;
+               pstatus->recvsignalpower = rx_pwr_all;
+
+               /* (3)EVM of HT rate */
+               if (pstatus->is_ht && pstatus->rate >= DESC92C_RATEMCS8 &&
+                   pstatus->rate <= DESC92C_RATEMCS15)
+                       max_spatial_stream = 2;
+               else
+                       max_spatial_stream = 1;
+
+               for (i = 0; i < max_spatial_stream; i++) {
+                       evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+
+                       if (packet_match_bssid) {
+                               /* Fill value in RFD, Get the first
+                                * spatial stream only
+                                */
+                               if (i == 0)
+                                       pstatus->signalquality =
+                                                       (u8) (evm & 0xff);
+                               pstatus->rx_mimo_sig_qual[i] =
+                                                       (u8) (evm & 0xff);
+                       }
+               }
+               if (packet_match_bssid) {
+                       for (i = RF90_PATH_A; i <= RF90_PATH_B; i++)
+                               rtl_priv(hw)->dm.cfo_tail[i] =
+                                       (char)p_phystrpt->path_cfotail[i];
+
+                       rtl_priv(hw)->dm.packet_count++;
+                       if (rtl_priv(hw)->dm.packet_count == 0xffffffff)
+                               rtl_priv(hw)->dm.packet_count = 0;
+               }
+       }
+
+       /* UI BSS List signal strength(in percentage),
+        * make it good looking, from 0~100.
+        */
+       if (is_cck)
+               pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+                                                               pwdb_all));
+       else if (rf_rx_num != 0)
+               pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+                                               total_rssi /= rf_rx_num));
+       /*HW antenna diversity*/
+       rtldm->fat_table.antsel_rx_keep_0 = p_phystrpt->ant_sel;
+       rtldm->fat_table.antsel_rx_keep_1 = p_phystrpt->ant_sel_b;
+       rtldm->fat_table.antsel_rx_keep_2 = p_phystrpt->antsel_rx_keep_2;
+}
+
+static void _rtl8723be_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+                                       struct sk_buff *skb,
+                                       struct rtl_stats *pstatus,
+                                       u8 *pdesc,
+                                       struct rx_fwinfo_8723be *p_drvinfo)
+{
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+       struct ieee80211_hdr *hdr;
+       u8 *tmp_buf;
+       u8 *praddr;
+       u8 *psaddr;
+       u16 fc, type;
+       bool packet_matchbssid, packet_toself, packet_beacon;
+
+       tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
+
+       hdr = (struct ieee80211_hdr *)tmp_buf;
+       fc = le16_to_cpu(hdr->frame_control);
+       type = WLAN_FC_GET_TYPE(hdr->frame_control);
+       praddr = hdr->addr1;
+       psaddr = ieee80211_get_SA(hdr);
+       memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
+
+       packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
+            (!ether_addr_equal(mac->bssid, (fc & IEEE80211_FCTL_TODS) ?
+                               hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
+                               hdr->addr2 : hdr->addr3)) &&
+                               (!pstatus->hwerror) &&
+                               (!pstatus->crc) && (!pstatus->icv));
+
+       packet_toself = packet_matchbssid &&
+           (!ether_addr_equal(praddr, rtlefuse->dev_addr));
+
+       /* YP: packet_beacon is not initialized,
+        * this assignment is neccesary,
+        * otherwise it counld be true in this case
+        * the situation is much worse in Kernel 3.10
+        */
+       if (ieee80211_is_beacon(hdr->frame_control))
+               packet_beacon = true;
+       else
+               packet_beacon = false;
+
+       if (packet_beacon && packet_matchbssid)
+               rtl_priv(hw)->dm.dbginfo.num_qry_beacon_pkt++;
+
+       _rtl8723be_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
+                                    packet_matchbssid,
+                                    packet_toself,
+                                    packet_beacon);
+
+       rtl_process_phyinfo(hw, tmp_buf, pstatus);
+}
+
+static void _rtl8723be_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+                                       u8 *virtualaddress)
+{
+       u32 dwtmp = 0;
+       memset(virtualaddress, 0, 8);
+
+       SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+       if (ptcb_desc->empkt_num == 1) {
+               dwtmp = ptcb_desc->empkt_len[0];
+       } else {
+               dwtmp = ptcb_desc->empkt_len[0];
+               dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+               dwtmp += ptcb_desc->empkt_len[1];
+       }
+       SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+
+       if (ptcb_desc->empkt_num <= 3) {
+               dwtmp = ptcb_desc->empkt_len[2];
+       } else {
+               dwtmp = ptcb_desc->empkt_len[2];
+               dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+               dwtmp += ptcb_desc->empkt_len[3];
+       }
+       SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+       if (ptcb_desc->empkt_num <= 5) {
+               dwtmp = ptcb_desc->empkt_len[4];
+       } else {
+               dwtmp = ptcb_desc->empkt_len[4];
+               dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+               dwtmp += ptcb_desc->empkt_len[5];
+       }
+       SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
+       SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+       if (ptcb_desc->empkt_num <= 7) {
+               dwtmp = ptcb_desc->empkt_len[6];
+       } else {
+               dwtmp = ptcb_desc->empkt_len[6];
+               dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+               dwtmp += ptcb_desc->empkt_len[7];
+       }
+       SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+       if (ptcb_desc->empkt_num <= 9) {
+               dwtmp = ptcb_desc->empkt_len[8];
+       } else {
+               dwtmp = ptcb_desc->empkt_len[8];
+               dwtmp += ((dwtmp % 4) ? (4 - dwtmp % 4) : 0) + 4;
+               dwtmp += ptcb_desc->empkt_len[9];
+       }
+       SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+}
+
+bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
+                            struct rtl_stats *status,
+                            struct ieee80211_rx_status *rx_status,
+                            u8 *pdesc, struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rx_fwinfo_8723be *p_drvinfo;
+       struct ieee80211_hdr *hdr;
+
+       u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+       status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
+       if (status->packet_report_type == TX_REPORT2)
+               status->length = (u16) GET_RX_RPT2_DESC_PKT_LEN(pdesc);
+       else
+               status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
+       status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+                                 RX_DRV_INFO_SIZE_UNIT;
+       status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
+       status->icv = (u16) GET_RX_DESC_ICV(pdesc);
+       status->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+       status->hwerror = (status->crc | status->icv);
+       status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
+       status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
+       status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
+       status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+       status->isfirst_ampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
+       if (status->packet_report_type == NORMAL_RX)
+               status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+       status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
+       status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+
+       status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate);
+
+       status->macid = GET_RX_DESC_MACID(pdesc);
+       if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+               status->wake_match = BIT(2);
+       else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+               status->wake_match = BIT(1);
+       else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+               status->wake_match = BIT(0);
+       else
+               status->wake_match = 0;
+       if (status->wake_match)
+               RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
+                        "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+                        status->wake_match);
+       rx_status->freq = hw->conf.chandef.chan->center_freq;
+       rx_status->band = hw->conf.chandef.chan->band;
+
+
+       hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size +
+                                      status->rx_bufshift);
+
+       if (status->crc)
+               rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+       if (status->rx_is40Mhzpacket)
+               rx_status->flag |= RX_FLAG_40MHZ;
+
+       if (status->is_ht)
+               rx_status->flag |= RX_FLAG_HT;
+
+       rx_status->flag |= RX_FLAG_MACTIME_START;
+
+       /* hw will set status->decrypted true, if it finds the
+        * frame is open data frame or mgmt frame.
+        * So hw will not decryption robust managment frame
+        * for IEEE80211w but still set status->decrypted
+        * true, so here we should set it back to undecrypted
+        * for IEEE80211w frame, and mac80211 sw will help
+        * to decrypt it
+        */
+       if (status->decrypted) {
+               if (!hdr) {
+                       WARN_ON_ONCE(true);
+                       pr_err("decrypted is true but hdr NULL in skb %p\n",
+                              rtl_get_hdr(skb));
+                       return false;
+               }
+
+               if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
+                   (ieee80211_has_protected(hdr->frame_control)))
+                       rx_status->flag &= ~RX_FLAG_DECRYPTED;
+               else
+                       rx_status->flag |= RX_FLAG_DECRYPTED;
+       }
+
+       /* rate_idx: index of data rate into band's
+        * supported rates or MCS index if HT rates
+        * are use (RX_FLAG_HT)
+        * Notice: this is diff with windows define
+        */
+       rx_status->rate_idx = _rtl8723be_rate_mapping(hw, status->is_ht,
+                                                     status->rate);
+
+       rx_status->mactime = status->timestamp_low;
+       if (phystatus) {
+               p_drvinfo = (struct rx_fwinfo_8723be *)(skb->data +
+                                                       status->rx_bufshift);
+
+               _rtl8723be_translate_rx_signal_stuff(hw, skb, status,
+                                                    pdesc, p_drvinfo);
+       }
+
+       /*rx_status->qual = status->signal; */
+       rx_status->signal = status->recvsignalpower + 10;
+       if (status->packet_report_type == TX_REPORT2) {
+               status->macid_valid_entry[0] =
+                        GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+               status->macid_valid_entry[1] =
+                        GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+       }
+       return true;
+}
+
+void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
+                           struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+                           u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+                           struct ieee80211_sta *sta, struct sk_buff *skb,
+                           u8 hw_queue, struct rtl_tcb_desc *ptcb_desc)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+       u8 *pdesc = pdesc_tx;
+       u16 seq_number;
+       __le16 fc = hdr->frame_control;
+       unsigned int buf_len = 0;
+       unsigned int skb_len = skb->len;
+       u8 fw_qsel = _rtl8723be_map_hwqueue_to_fwqueue(skb, hw_queue);
+       bool firstseg = ((hdr->seq_ctrl &
+                         cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
+       bool lastseg = ((hdr->frame_control &
+                        cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
+       dma_addr_t mapping;
+       u8 bw_40 = 0;
+       u8 short_gi = 0;
+
+       if (mac->opmode == NL80211_IFTYPE_STATION) {
+               bw_40 = mac->bw_40;
+       } else if (mac->opmode == NL80211_IFTYPE_AP ||
+               mac->opmode == NL80211_IFTYPE_ADHOC) {
+               if (sta)
+                       bw_40 = sta->ht_cap.cap &
+                               IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+       }
+       seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+       rtl_get_tcb_desc(hw, info, sta, skb, ptcb_desc);
+       /* reserve 8 byte for AMPDU early mode */
+       if (rtlhal->earlymode_enable) {
+               skb_push(skb, EM_HDR_LEN);
+               memset(skb->data, 0, EM_HDR_LEN);
+       }
+       buf_len = skb->len;
+       mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
+                                PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+               RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error");
+               return;
+       }
+       CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be));
+       if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
+               firstseg = true;
+               lastseg = true;
+       }
+       if (firstseg) {
+               if (rtlhal->earlymode_enable) {
+                       SET_TX_DESC_PKT_OFFSET(pdesc, 1);
+                       SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+                                          EM_HDR_LEN);
+                       if (ptcb_desc->empkt_num) {
+                               RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                                        "Insert 8 byte.pTcb->EMPktNum:%d\n",
+                                         ptcb_desc->empkt_num);
+                               _rtl8723be_insert_emcontent(ptcb_desc,
+                                                           (u8 *)(skb->data));
+                       }
+               } else {
+                       SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+               }
+
+               /* ptcb_desc->use_driver_rate = true; */
+               SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+               if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
+                       short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
+               else
+                       short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
+
+               SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+
+               if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+                       SET_TX_DESC_AGG_ENABLE(pdesc, 1);
+                       SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+               }
+               SET_TX_DESC_SEQ(pdesc, seq_number);
+               SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
+                                               !ptcb_desc->cts_enable) ?
+                                               1 : 0));
+               SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
+               SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ?
+                                             1 : 0));
+
+               SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
+
+               SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
+               SET_TX_DESC_RTS_SHORT(pdesc,
+                       ((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
+                        (ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
+                        (ptcb_desc->rts_use_shortgi ? 1 : 0)));
+
+               if (ptcb_desc->btx_enable_sw_calc_duration)
+                       SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+
+               if (bw_40) {
+                       if (ptcb_desc->packet_bw) {
+                               SET_TX_DESC_DATA_BW(pdesc, 1);
+                               SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+                       } else {
+                               SET_TX_DESC_DATA_BW(pdesc, 0);
+                               SET_TX_DESC_TX_SUB_CARRIER(pdesc, mac->cur_40_prime_sc);
+                       }
+               } else {
+                       SET_TX_DESC_DATA_BW(pdesc, 0);
+                       SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+               }
+
+               SET_TX_DESC_LINIP(pdesc, 0);
+               SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb_len);
+               if (sta) {
+                       u8 ampdu_density = sta->ht_cap.ampdu_density;
+                       SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+               }
+               if (info->control.hw_key) {
+                       struct ieee80211_key_conf *keyconf =
+                                               info->control.hw_key;
+                       switch (keyconf->cipher) {
+                       case WLAN_CIPHER_SUITE_WEP40:
+                       case WLAN_CIPHER_SUITE_WEP104:
+                       case WLAN_CIPHER_SUITE_TKIP:
+                               SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+                               break;
+                       case WLAN_CIPHER_SUITE_CCMP:
+                               SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+                               break;
+                       default:
+                               SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+                               break;
+                       }
+               }
+
+               SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+               SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
+               SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
+               SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+                                      1 : 0);
+               SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+
+               if (ieee80211_is_data_qos(fc)) {
+                       if (mac->rdg_en) {
+                               RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                                        "Enable RDG function.\n");
+                               SET_TX_DESC_RDG_ENABLE(pdesc, 1);
+                               SET_TX_DESC_HTC(pdesc, 1);
+                       }
+               }
+       }
+
+       SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
+       SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+       SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) buf_len);
+       SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+       SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
+       SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+
+       if (!ieee80211_is_data_qos(fc))  {
+               SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+               SET_TX_DESC_HWSEQ_SEL(pdesc, 0);
+       }
+       SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+       if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+           is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
+               SET_TX_DESC_BMC(pdesc, 1);
+       }
+       RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+}
+
+void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+                              bool b_firstseg, bool b_lastseg,
+                              struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       u8 fw_queue = QSLT_BEACON;
+
+       dma_addr_t mapping = pci_map_single(rtlpci->pdev,
+                                           skb->data, skb->len,
+                                           PCI_DMA_TODEVICE);
+
+       if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
+               RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+                        "DMA mapping error");
+               return;
+       }
+       CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+
+       SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+
+       SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+
+       SET_TX_DESC_SEQ(pdesc, 0);
+
+       SET_TX_DESC_LINIP(pdesc, 0);
+
+       SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+
+       SET_TX_DESC_FIRST_SEG(pdesc, 1);
+       SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+       SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
+
+       SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+
+       SET_TX_DESC_RATE_ID(pdesc, 0);
+       SET_TX_DESC_MACID(pdesc, 0);
+
+       SET_TX_DESC_OWN(pdesc, 1);
+
+       SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+
+       SET_TX_DESC_FIRST_SEG(pdesc, 1);
+       SET_TX_DESC_LAST_SEG(pdesc, 1);
+
+       SET_TX_DESC_USE_RATE(pdesc, 1);
+}
+
+void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                       u8 desc_name, u8 *val)
+{
+       if (istx) {
+               switch (desc_name) {
+               case HW_DESC_OWN:
+                       SET_TX_DESC_OWN(pdesc, 1);
+                       break;
+               case HW_DESC_TX_NEXTDESC_ADDR:
+                       SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
+                       break;
+               default:
+                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                                 desc_name);
+                       break;
+               }
+       } else {
+               switch (desc_name) {
+               case HW_DESC_RXOWN:
+                       SET_RX_DESC_OWN(pdesc, 1);
+                       break;
+               case HW_DESC_RXBUFF_ADDR:
+                       SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
+                       break;
+               case HW_DESC_RXPKT_LEN:
+                       SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
+                       break;
+               case HW_DESC_RXERO:
+                       SET_RX_DESC_EOR(pdesc, 1);
+                       break;
+               default:
+                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                                 desc_name);
+                       break;
+               }
+       }
+}
+
+u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
+{
+       u32 ret = 0;
+
+       if (istx) {
+               switch (desc_name) {
+               case HW_DESC_OWN:
+                       ret = GET_TX_DESC_OWN(pdesc);
+                       break;
+               case HW_DESC_TXBUFF_ADDR:
+                       ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+                       break;
+               default:
+                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                                 desc_name);
+                       break;
+               }
+       } else {
+               switch (desc_name) {
+               case HW_DESC_OWN:
+                       ret = GET_RX_DESC_OWN(pdesc);
+                       break;
+               case HW_DESC_RXPKT_LEN:
+                       ret = GET_RX_DESC_PKT_LEN(pdesc);
+                       break;
+               default:
+                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                                 desc_name);
+                       break;
+               }
+       }
+       return ret;
+}
+
+bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
+                                u8 hw_queue, u16 index)
+{
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
+       u8 *entry = (u8 *)(&ring->desc[ring->idx]);
+       u8 own = (u8) rtl8723be_get_desc(entry, true, HW_DESC_OWN);
+
+       /*beacon packet will only use the first
+        *descriptor by default, and the own may not
+        *be cleared by the hardware
+        */
+       if (own)
+               return false;
+       else
+               return true;
+}
+
+void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       if (hw_queue == BEACON_QUEUE) {
+               rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
+       } else {
+               rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
+                              BIT(0) << (hw_queue));
+       }
+}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/rtlwifi/rtl8723be/trx.h
new file mode 100644 (file)
index 0000000..102f33d
--- /dev/null
@@ -0,0 +1,617 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __RTL8723BE_TRX_H__
+#define __RTL8723BE_TRX_H__
+
+#define TX_DESC_SIZE                           40
+#define TX_DESC_AGGR_SUBFRAME_SIZE             32
+
+#define RX_DESC_SIZE                           32
+#define RX_DRV_INFO_SIZE_UNIT                  8
+
+#define        TX_DESC_NEXT_DESC_OFFSET                40
+#define USB_HWDESC_HEADER_LEN                  40
+#define CRCLENGTH                              4
+
+#define SET_TX_DESC_PKT_SIZE(__pdesc, __val)           \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
+#define SET_TX_DESC_OFFSET(__pdesc, __val)             \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
+#define SET_TX_DESC_BMC(__pdesc, __val)                        \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
+#define SET_TX_DESC_HTC(__pdesc, __val)                        \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
+#define SET_TX_DESC_LAST_SEG(__pdesc, __val)           \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
+#define SET_TX_DESC_FIRST_SEG(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
+#define SET_TX_DESC_LINIP(__pdesc, __val)              \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
+#define SET_TX_DESC_NO_ACM(__pdesc, __val)             \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
+#define SET_TX_DESC_GF(__pdesc, __val)                 \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_TX_DESC_OWN(__pdesc, __val)                        \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_TX_DESC_PKT_SIZE(__pdesc)                  \
+       LE_BITS_TO_4BYTE(__pdesc, 0, 16)
+#define GET_TX_DESC_OFFSET(__pdesc)                    \
+       LE_BITS_TO_4BYTE(__pdesc, 16, 8)
+#define GET_TX_DESC_BMC(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc, 24, 1)
+#define GET_TX_DESC_HTC(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc, 25, 1)
+#define GET_TX_DESC_LAST_SEG(__pdesc)                  \
+       LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_TX_DESC_FIRST_SEG(__pdesc)                 \
+       LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_TX_DESC_LINIP(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_TX_DESC_NO_ACM(__pdesc)                    \
+       LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_TX_DESC_GF(__pdesc)                                \
+       LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_TX_DESC_OWN(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_TX_DESC_MACID(__pdesc, __val)              \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 7, __val)
+#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
+#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
+#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val)       \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
+#define SET_TX_DESC_PIFS(__pdesc, __val)               \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
+#define SET_TX_DESC_RATE_ID(__pdesc, __val)            \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 5, __val)
+#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
+#define SET_TX_DESC_SEC_TYPE(__pdesc, __val)           \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
+#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 5, __val)
+
+
+#define SET_TX_DESC_PAID(__pdesc, __val)               \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 9, __val)
+#define SET_TX_DESC_CCA_RTS(__pdesc, __val)            \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 10, 2, __val)
+#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
+#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
+#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
+#define SET_TX_DESC_AGG_BREAK(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
+#define SET_TX_DESC_MORE_FRAG(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
+#define SET_TX_DESC_RAW(__pdesc, __val)                        \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
+#define SET_TX_DESC_SPE_RPT(__pdesc, __val)            \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
+#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val)      \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
+#define SET_TX_DESC_BT_INT(__pdesc, __val)             \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
+#define SET_TX_DESC_GID(__pdesc, __val)                        \
+       SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 6, __val)
+
+
+#define SET_TX_DESC_WHEADER_LEN(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 4, __val)
+#define SET_TX_DESC_CHK_EN(__pdesc, __val)             \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 4, 1, __val)
+#define SET_TX_DESC_EARLY_MODE(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 5, 1, __val)
+#define SET_TX_DESC_HWSEQ_SEL(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 6, 2, __val)
+#define SET_TX_DESC_USE_RATE(__pdesc, __val)           \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 1, __val)
+#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val)     \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 9, 1, __val)
+#define SET_TX_DESC_DISABLE_FB(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 10, 1, __val)
+#define SET_TX_DESC_CTS2SELF(__pdesc, __val)           \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 11, 1, __val)
+#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 12, 1, __val)
+#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val)      \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 13, 1, __val)
+#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 15, 1, __val)
+#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 1, __val)
+#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 17, 5, __val)
+#define SET_TX_DESC_NDPA(__pdesc, __val)               \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 22, 2, __val)
+#define SET_TX_DESC_AMPDU_MAX_TIME(__pdesc, __val)     \
+       SET_BITS_TO_LE_4BYTE(__pdesc+12, 24, 8, __val)
+
+
+#define SET_TX_DESC_TX_RATE(__pdesc, __val)            \
+       SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 7, __val)
+#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
+       SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 5, __val)
+#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val)  \
+       SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 4, __val)
+#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
+       SET_BITS_TO_LE_4BYTE(__pdesc+16, 17, 1, __val)
+#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val)   \
+       SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 6, __val)
+#define SET_TX_DESC_RTS_RATE(__pdesc, __val)           \
+       SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 5, __val)
+
+
+#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val)     \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 4, __val)
+#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val)       \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 4, 1, __val)
+#define SET_TX_DESC_DATA_BW(__pdesc, __val)            \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 5, 2, __val)
+#define SET_TX_DESC_DATA_LDPC(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
+#define SET_TX_DESC_DATA_STBC(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 2, __val)
+#define SET_TX_DESC_CTROL_STBC(__pdesc, __val)         \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 10, 2, __val)
+#define SET_TX_DESC_RTS_SHORT(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 12, 1, __val)
+#define SET_TX_DESC_RTS_SC(__pdesc, __val)             \
+       SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
+
+
+#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val)     \
+       SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
+
+#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc)            \
+       LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
+
+#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val)           \
+       SET_BITS_TO_LE_4BYTE(__pdesc+32, 15, 1, __val)
+
+#define SET_TX_DESC_SEQ(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc+36, 12, 12, __val)
+
+#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val)  \
+       SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
+
+#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc)         \
+       LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
+
+
+#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val)  \
+       SET_BITS_TO_LE_4BYTE(__pdesc+48, 0, 32, __val)
+
+#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc)         \
+       LE_BITS_TO_4BYTE(__pdesc+48, 0, 32)
+
+#define GET_RX_DESC_PKT_LEN(__pdesc)                   \
+       LE_BITS_TO_4BYTE(__pdesc, 0, 14)
+#define GET_RX_DESC_CRC32(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc, 14, 1)
+#define GET_RX_DESC_ICV(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc, 15, 1)
+#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc)             \
+       LE_BITS_TO_4BYTE(__pdesc, 16, 4)
+#define GET_RX_DESC_SECURITY(__pdesc)                  \
+       LE_BITS_TO_4BYTE(__pdesc, 20, 3)
+#define GET_RX_DESC_QOS(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc, 23, 1)
+#define GET_RX_DESC_SHIFT(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc, 24, 2)
+#define GET_RX_DESC_PHYST(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc, 26, 1)
+#define GET_RX_DESC_SWDEC(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc, 27, 1)
+#define GET_RX_DESC_LS(__pdesc)                        \
+       LE_BITS_TO_4BYTE(__pdesc, 28, 1)
+#define GET_RX_DESC_FS(__pdesc)                        \
+       LE_BITS_TO_4BYTE(__pdesc, 29, 1)
+#define GET_RX_DESC_EOR(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc, 30, 1)
+#define GET_RX_DESC_OWN(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc, 31, 1)
+
+#define SET_RX_DESC_PKT_LEN(__pdesc, __val)            \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
+#define SET_RX_DESC_EOR(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
+#define SET_RX_DESC_OWN(__pdesc, __val)                \
+       SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
+
+#define GET_RX_DESC_MACID(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc+4, 0, 7)
+#define GET_RX_DESC_TID(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc+4, 8, 4)
+#define GET_RX_DESC_AMSDU(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
+#define GET_RX_STATUS_DESC_RXID_MATCH(__pdesc)         \
+       LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
+#define GET_RX_DESC_PAGGR(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
+#define GET_RX_DESC_A1_FIT(__pdesc)                    \
+       LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
+#define GET_RX_DESC_CHKERR(__pdesc)                    \
+       LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
+#define GET_RX_DESC_IPVER(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
+#define GET_RX_STATUS_DESC_IS_TCPUDP(__pdesc)          \
+       LE_BITS_TO_4BYTE(__pdesc+4, 22, 1)
+#define GET_RX_STATUS_DESC_CHK_VLD(__pdesc)            \
+       LE_BITS_TO_4BYTE(__pdesc+4, 23, 1)
+#define GET_RX_DESC_PAM(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
+#define GET_RX_DESC_PWR(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
+#define GET_RX_DESC_MD(__pdesc)                        \
+       LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
+#define GET_RX_DESC_MF(__pdesc)                        \
+       LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
+#define GET_RX_DESC_TYPE(__pdesc)                      \
+       LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
+#define GET_RX_DESC_MC(__pdesc)                        \
+       LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
+#define GET_RX_DESC_BC(__pdesc)                        \
+       LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
+
+
+#define GET_RX_DESC_SEQ(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
+#define GET_RX_DESC_FRAG(__pdesc)                      \
+       LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
+#define GET_RX_STATUS_DESC_RX_IS_QOS(__pdesc)          \
+       LE_BITS_TO_4BYTE(__pdesc+8, 16, 1)
+#define GET_RX_STATUS_DESC_WLANHD_IV_LEN(__pdesc)      \
+       LE_BITS_TO_4BYTE(__pdesc+8, 18, 6)
+#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc)            \
+       LE_BITS_TO_4BYTE(__pdesc+8, 28, 1)
+
+
+#define GET_RX_DESC_RXMCS(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc+12, 0, 7)
+#define GET_RX_DESC_RXHT(__pdesc)                      \
+       LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
+#define GET_RX_STATUS_DESC_RX_GF(__pdesc)              \
+       LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
+#define GET_RX_DESC_HTC(__pdesc)                       \
+       LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
+#define GET_RX_STATUS_DESC_EOSP(__pdesc)               \
+       LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
+#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc)          \
+       LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
+
+#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc)      \
+       LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
+#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc)      \
+       LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
+#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc)                \
+       LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
+
+#define GET_RX_DESC_SPLCP(__pdesc)                     \
+       LE_BITS_TO_4BYTE(__pdesc+16, 0, 1)
+#define GET_RX_STATUS_DESC_LDPC(__pdesc)               \
+       LE_BITS_TO_4BYTE(__pdesc+16, 1, 1)
+#define GET_RX_STATUS_DESC_STBC(__pdesc)               \
+       LE_BITS_TO_4BYTE(__pdesc+16, 2, 1)
+#define GET_RX_DESC_BW(__pdesc)                        \
+       LE_BITS_TO_4BYTE(__pdesc+16, 4, 2)
+
+#define GET_RX_DESC_TSFL(__pdesc)                      \
+       LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
+
+#define GET_RX_DESC_BUFF_ADDR(__pdesc)                 \
+       LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
+#define GET_RX_DESC_BUFF_ADDR64(__pdesc)               \
+       LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
+
+#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val)          \
+       SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
+#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val)        \
+       SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+
+
+/* TX report 2 format in Rx desc*/
+
+#define GET_RX_RPT2_DESC_PKT_LEN(__rxstatusdesc)       \
+       LE_BITS_TO_4BYTE(__rxstatusdesc, 0, 9)
+#define GET_RX_RPT2_DESC_MACID_VALID_1(__rxstatusdesc) \
+       LE_BITS_TO_4BYTE(__rxstatusdesc+16, 0, 32)
+#define GET_RX_RPT2_DESC_MACID_VALID_2(__rxstatusdesc) \
+       LE_BITS_TO_4BYTE(__rxstatusdesc+20, 0, 32)
+
+#define SET_EARLYMODE_PKTNUM(__paddr, __value)         \
+       SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
+#define SET_EARLYMODE_LEN0(__paddr, __value)           \
+       SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
+#define SET_EARLYMODE_LEN1(__paddr, __value)           \
+       SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
+#define SET_EARLYMODE_LEN2_1(__paddr, __value)         \
+       SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
+#define SET_EARLYMODE_LEN2_2(__paddr, __value)         \
+       SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
+#define SET_EARLYMODE_LEN3(__paddr, __value)           \
+       SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
+#define SET_EARLYMODE_LEN4(__paddr, __value)           \
+       SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
+
+#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size)              \
+do {                                                           \
+       if (_size > TX_DESC_NEXT_DESC_OFFSET)                   \
+               memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);   \
+       else                                                    \
+               memset(__pdesc, 0, _size);                      \
+} while (0)
+
+struct phy_rx_agc_info_t {
+       #ifdef __LITTLE_ENDIAN
+               u8 gain:7, trsw:1;
+       #else
+               u8 trsw:1, gain:7;
+       #endif
+};
+struct phy_status_rpt {
+       struct phy_rx_agc_info_t path_agc[2];
+       u8 ch_corr[2];
+       u8 cck_sig_qual_ofdm_pwdb_all;
+       u8 cck_agc_rpt_ofdm_cfosho_a;
+       u8 cck_rpt_b_ofdm_cfosho_b;
+       u8 rsvd_1;/* ch_corr_msb; */
+       u8 noise_power_db_msb;
+       char path_cfotail[2];
+       u8 pcts_mask[2];
+       char stream_rxevm[2];
+       u8 path_rxsnr[2];
+       u8 noise_power_db_lsb;
+       u8 rsvd_2[3];
+       u8 stream_csi[2];
+       u8 stream_target_csi[2];
+       u8 sig_evm;
+       u8 rsvd_3;
+#ifdef __LITTLE_ENDIAN
+       u8 antsel_rx_keep_2:1;  /*ex_intf_flg:1;*/
+       u8 sgi_en:1;
+       u8 rxsc:2;
+       u8 idle_long:1;
+       u8 r_ant_train_en:1;
+       u8 ant_sel_b:1;
+       u8 ant_sel:1;
+#else  /* _BIG_ENDIAN_ */
+       u8 ant_sel:1;
+       u8 ant_sel_b:1;
+       u8 r_ant_train_en:1;
+       u8 idle_long:1;
+       u8 rxsc:2;
+       u8 sgi_en:1;
+       u8 antsel_rx_keep_2:1;  /*ex_intf_flg:1;*/
+#endif
+} __packed;
+
+struct rx_fwinfo_8723be {
+       u8 gain_trsw[4];
+       u8 pwdb_all;
+       u8 cfosho[4];
+       u8 cfotail[4];
+       char rxevm[2];
+       char rxsnr[4];
+       u8 pdsnr[2];
+       u8 csi_current[2];
+       u8 csi_target[2];
+       u8 sigevm;
+       u8 max_ex_pwr;
+       u8 ex_intf_flag:1;
+       u8 sgi_en:1;
+       u8 rxsc:2;
+       u8 reserve:4;
+} __packed;
+
+struct tx_desc_8723be {
+       u32 pktsize:16;
+       u32 offset:8;
+       u32 bmc:1;
+       u32 htc:1;
+       u32 lastseg:1;
+       u32 firstseg:1;
+       u32 linip:1;
+       u32 noacm:1;
+       u32 gf:1;
+       u32 own:1;
+
+       u32 macid:6;
+       u32 rsvd0:2;
+       u32 queuesel:5;
+       u32 rd_nav_ext:1;
+       u32 lsig_txop_en:1;
+       u32 pifs:1;
+       u32 rateid:4;
+       u32 nav_usehdr:1;
+       u32 en_descid:1;
+       u32 sectype:2;
+       u32 pktoffset:8;
+
+       u32 rts_rc:6;
+       u32 data_rc:6;
+       u32 agg_en:1;
+       u32 rdg_en:1;
+       u32 bar_retryht:2;
+       u32 agg_break:1;
+       u32 morefrag:1;
+       u32 raw:1;
+       u32 ccx:1;
+       u32 ampdudensity:3;
+       u32 bt_int:1;
+       u32 ant_sela:1;
+       u32 ant_selb:1;
+       u32 txant_cck:2;
+       u32 txant_l:2;
+       u32 txant_ht:2;
+
+       u32 nextheadpage:8;
+       u32 tailpage:8;
+       u32 seq:12;
+       u32 cpu_handle:1;
+       u32 tag1:1;
+       u32 trigger_int:1;
+       u32 hwseq_en:1;
+
+       u32 rtsrate:5;
+       u32 apdcfe:1;
+       u32 qos:1;
+       u32 hwseq_ssn:1;
+       u32 userrate:1;
+       u32 dis_rtsfb:1;
+       u32 dis_datafb:1;
+       u32 cts2self:1;
+       u32 rts_en:1;
+       u32 hwrts_en:1;
+       u32 portid:1;
+       u32 pwr_status:3;
+       u32 waitdcts:1;
+       u32 cts2ap_en:1;
+       u32 txsc:2;
+       u32 stbc:2;
+       u32 txshort:1;
+       u32 txbw:1;
+       u32 rtsshort:1;
+       u32 rtsbw:1;
+       u32 rtssc:2;
+       u32 rtsstbc:2;
+
+       u32 txrate:6;
+       u32 shortgi:1;
+       u32 ccxt:1;
+       u32 txrate_fb_lmt:5;
+       u32 rtsrate_fb_lmt:4;
+       u32 retrylmt_en:1;
+       u32 txretrylmt:6;
+       u32 usb_txaggnum:8;
+
+       u32 txagca:5;
+       u32 txagcb:5;
+       u32 usemaxlen:1;
+       u32 maxaggnum:5;
+       u32 mcsg1maxlen:4;
+       u32 mcsg2maxlen:4;
+       u32 mcsg3maxlen:4;
+       u32 mcs7sgimaxlen:4;
+
+       u32 txbuffersize:16;
+       u32 sw_offset30:8;
+       u32 sw_offset31:4;
+       u32 rsvd1:1;
+       u32 antsel_c:1;
+       u32 null_0:1;
+       u32 null_1:1;
+
+       u32 txbuffaddr;
+       u32 txbufferaddr64;
+       u32 nextdescaddress;
+       u32 nextdescaddress64;
+
+       u32 reserve_pass_pcie_mm_limit[4];
+} __packed;
+
+struct rx_desc_8723be {
+       u32 length:14;
+       u32 crc32:1;
+       u32 icverror:1;
+       u32 drv_infosize:4;
+       u32 security:3;
+       u32 qos:1;
+       u32 shift:2;
+       u32 phystatus:1;
+       u32 swdec:1;
+       u32 lastseg:1;
+       u32 firstseg:1;
+       u32 eor:1;
+       u32 own:1;
+
+       u32 macid:6;
+       u32 tid:4;
+       u32 hwrsvd:5;
+       u32 paggr:1;
+       u32 faggr:1;
+       u32 a1_fit:4;
+       u32 a2_fit:4;
+       u32 pam:1;
+       u32 pwr:1;
+       u32 moredata:1;
+       u32 morefrag:1;
+       u32 type:2;
+       u32 mc:1;
+       u32 bc:1;
+
+       u32 seq:12;
+       u32 frag:4;
+       u32 nextpktlen:14;
+       u32 nextind:1;
+       u32 rsvd:1;
+
+       u32 rxmcs:6;
+       u32 rxht:1;
+       u32 amsdu:1;
+       u32 splcp:1;
+       u32 bandwidth:1;
+       u32 htc:1;
+       u32 tcpchk_rpt:1;
+       u32 ipcchk_rpt:1;
+       u32 tcpchk_valid:1;
+       u32 hwpcerr:1;
+       u32 hwpcind:1;
+       u32 iv0:16;
+
+       u32 iv1;
+
+       u32 tsfl;
+
+       u32 bufferaddress;
+       u32 bufferaddress64;
+
+} __packed;
+
+void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
+                           struct ieee80211_hdr *hdr, u8 *pdesc,
+                           u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+                           struct ieee80211_sta *sta, struct sk_buff *skb,
+                           u8 hw_queue, struct rtl_tcb_desc *ptcb_desc);
+bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
+                            struct rtl_stats *status,
+                            struct ieee80211_rx_status *rx_status,
+                            u8 *pdesc, struct sk_buff *skb);
+void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                       u8 desc_name, u8 *val);
+u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name);
+bool rtl8723be_is_tx_desc_closed(struct ieee80211_hw *hw,
+                                u8 hw_queue, u16 index);
+void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
+void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
+                              bool b_firstseg, bool b_lastseg,
+                              struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/Makefile b/drivers/net/wireless/rtlwifi/rtl8723com/Makefile
new file mode 100644 (file)
index 0000000..345a68a
--- /dev/null
@@ -0,0 +1,9 @@
+rtl8723-common-objs :=         \
+               main.o          \
+               dm_common.o     \
+               fw_common.o     \
+               phy_common.o
+
+obj-$(CONFIG_RTL8723_COMMON) += rtl8723-common.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.c
new file mode 100644 (file)
index 0000000..4e254b7
--- /dev/null
@@ -0,0 +1,65 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "dm_common.h"
+#include "../rtl8723ae/dm.h"
+#include <linux/module.h>
+
+/* These routines are common to RTL8723AE and RTL8723bE */
+
+void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.dynamic_txpower_enable = false;
+
+       rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
+       rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_txpower);
+
+void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm.current_turbo_edca = false;
+       rtlpriv->dm.is_any_nonbepkts = false;
+       rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_edca_turbo);
+
+void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
+       rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
+       rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
+       rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
+       rtlpriv->dm_pstable.rssi_val_min = 0;
+       rtlpriv->dm_pstable.initialize = 0;
+}
+EXPORT_SYMBOL_GPL(rtl8723_dm_init_dynamic_bb_powersaving);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/dm_common.h
new file mode 100644 (file)
index 0000000..5c1b94c
--- /dev/null
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __DM_COMMON_H__
+#define __DM_COMMON_H__
+
+void rtl8723_dm_init_dynamic_txpower(struct ieee80211_hw *hw);
+void rtl8723_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl8723_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
new file mode 100644 (file)
index 0000000..540278f
--- /dev/null
@@ -0,0 +1,329 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "fw_common.h"
+#include <linux/module.h>
+
+void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 tmp;
+
+       if (enable) {
+               tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+
+               tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+               rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+
+               tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+               rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+       } else {
+               tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+               rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+
+               rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+       }
+}
+EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download);
+
+void rtl8723_fw_block_write(struct ieee80211_hw *hw,
+                           const u8 *buffer, u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 blocksize = sizeof(u32);
+       u8 *bufferptr = (u8 *)buffer;
+       u32 *pu4byteptr = (u32 *)buffer;
+       u32 i, offset, blockcount, remainsize;
+
+       blockcount = size / blocksize;
+       remainsize = size % blocksize;
+
+       for (i = 0; i < blockcount; i++) {
+               offset = i * blocksize;
+               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
+                               *(pu4byteptr + i));
+       }
+       if (remainsize) {
+               offset = blockcount * blocksize;
+               bufferptr += offset;
+               for (i = 0; i < remainsize; i++) {
+                       rtl_write_byte(rtlpriv,
+                                      (FW_8192C_START_ADDRESS + offset + i),
+                                      *(bufferptr + i));
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_block_write);
+
+void rtl8723_fw_page_write(struct ieee80211_hw *hw,
+                          u32 page, const u8 *buffer, u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value8;
+       u8 u8page = (u8) (page & 0x07);
+
+       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+       rtl8723_fw_block_write(hw, buffer, size);
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_page_write);
+
+static void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+       u32 fwlen = *pfwlen;
+       u8 remain = (u8) (fwlen % 4);
+
+       remain = (remain == 0) ? 0 : (4 - remain);
+
+       while (remain > 0) {
+               pfwbuf[fwlen] = 0;
+               fwlen++;
+               remain--;
+       }
+       *pfwlen = fwlen;
+}
+
+void rtl8723_write_fw(struct ieee80211_hw *hw,
+                     enum version_8723e version,
+                     u8 *buffer, u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *bufferptr = buffer;
+       u32 pagenums, remainsize;
+       u32 page, offset;
+
+       RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
+
+       rtl8723_fill_dummy(bufferptr, &size);
+
+       pagenums = size / FW_8192C_PAGE_SIZE;
+       remainsize = size % FW_8192C_PAGE_SIZE;
+
+       if (pagenums > 8) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Page numbers should not greater then 8\n");
+       }
+       for (page = 0; page < pagenums; page++) {
+               offset = page * FW_8192C_PAGE_SIZE;
+               rtl8723_fw_page_write(hw, page, (bufferptr + offset),
+                                     FW_8192C_PAGE_SIZE);
+       }
+       if (remainsize) {
+               offset = pagenums * FW_8192C_PAGE_SIZE;
+               page = pagenums;
+               rtl8723_fw_page_write(hw, page, (bufferptr + offset),
+                                     remainsize);
+       }
+}
+EXPORT_SYMBOL_GPL(rtl8723_write_fw);
+
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
+{
+       u8 u1tmp;
+       u8 delay = 100;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+       u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+       while (u1tmp & BIT(2)) {
+               delay--;
+               if (delay == 0)
+                       break;
+               udelay(50);
+               u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+       }
+       if (delay == 0) {
+               u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+               rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
+       }
+}
+EXPORT_SYMBOL_GPL(rtl8723ae_firmware_selfreset);
+
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw)
+{
+       u8 u1b_tmp;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp & (~BIT(0))));
+
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp & (~BIT(2))));
+       udelay(50);
+
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
+       rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1b_tmp | BIT(0)));
+
+       u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+       rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
+
+       RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+                "  _8051Reset8723be(): 8051 reset success .\n");
+}
+EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset);
+
+int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       int err = -EIO;
+       u32 counter = 0;
+       u32 value32;
+
+       do {
+               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+       } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
+                (!(value32 & FWDL_CHKSUM_RPT)));
+
+       if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "chksum report fail ! REG_MCUFWDL:0x%08x .\n",
+                        value32);
+               goto exit;
+       }
+       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+                "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
+
+       value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY;
+       value32 &= ~WINTINI_RDY;
+       rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+       if (is_8723be)
+               rtl8723be_firmware_selfreset(hw);
+       counter = 0;
+
+       do {
+               value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+               if (value32 & WINTINI_RDY) {
+                       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+                                "Polling FW ready success!! "
+                                "REG_MCUFWDL:0x%08x .\n",
+                                value32);
+                       err = 0;
+                       goto exit;
+               }
+               udelay(FW_8192C_POLLING_DELAY);
+
+       } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
+
+       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+                value32);
+
+exit:
+       return err;
+}
+EXPORT_SYMBOL_GPL(rtl8723_fw_free_to_go);
+
+int rtl8723_download_fw(struct ieee80211_hw *hw,
+                       bool is_8723be)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl92c_firmware_header *pfwheader;
+       u8 *pfwdata;
+       u32 fwsize;
+       int err;
+       enum version_8723e version = rtlhal->version;
+
+       if (!rtlhal->pfirmware)
+               return 1;
+
+       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+       pfwdata = rtlhal->pfirmware;
+       fwsize = rtlhal->fwsize;
+       RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+                "normal Firmware SIZE %d\n", fwsize);
+
+       if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) {
+               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
+                        "Firmware Version(%d), Signature(%#x), Size(%d)\n",
+                        pfwheader->version, pfwheader->signature,
+                        (int)sizeof(struct rtl92c_firmware_header));
+
+               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
+               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+       }
+       if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
+               rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+               if (is_8723be)
+                       rtl8723be_firmware_selfreset(hw);
+               else
+                       rtl8723ae_firmware_selfreset(hw);
+       }
+       rtl8723_enable_fw_download(hw, true);
+       rtl8723_write_fw(hw, version, pfwdata, fwsize);
+       rtl8723_enable_fw_download(hw, false);
+
+       err = rtl8723_fw_free_to_go(hw, is_8723be);
+       if (err) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                        "Firmware is not ready to run!\n");
+       } else {
+               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
+                        "Firmware is ready to run!\n");
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtl8723_download_fw);
+
+bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
+                            struct sk_buff *skb)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+       struct rtl8192_tx_ring *ring;
+       struct rtl_tx_desc *pdesc;
+       struct sk_buff *pskb = NULL;
+       u8 own;
+       unsigned long flags;
+
+       ring = &rtlpci->tx_ring[BEACON_QUEUE];
+
+       pskb = __skb_dequeue(&ring->queue);
+       if (pskb)
+               kfree_skb(pskb);
+
+       spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
+
+       pdesc = &ring->desc[0];
+       own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc, true, HW_DESC_OWN);
+
+       rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *)pdesc, 1, 1, skb);
+
+       __skb_queue_tail(&ring->queue, skb);
+
+       spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+       rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(rtl8723_cmd_send_packet);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
new file mode 100644 (file)
index 0000000..cf1cc58
--- /dev/null
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __FW_COMMON_H__
+#define __FW_COMMON_H__
+
+#define REG_SYS_FUNC_EN                                0x0002
+#define REG_MCUFWDL                            0x0080
+#define FW_8192C_START_ADDRESS                 0x1000
+#define FW_8192C_PAGE_SIZE                     4096
+#define FW_8192C_POLLING_TIMEOUT_COUNT         6000
+#define FW_8192C_POLLING_DELAY                 5
+
+#define MCUFWDL_RDY                            BIT(1)
+#define FWDL_CHKSUM_RPT                                BIT(2)
+#define WINTINI_RDY                            BIT(6)
+
+#define REG_RSV_CTRL                           0x001C
+#define REG_HMETFR                             0x01CC
+
+enum version_8723e {
+       VERSION_TEST_UMC_CHIP_8723 = 0x0081,
+       VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
+       VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
+       VERSION_TEST_CHIP_1T1R_8723B = 0x0106,
+       VERSION_NORMAL_SMIC_CHIP_1T1R_8723B = 0x010E,
+       VERSION_UNKNOWN = 0xFF,
+};
+
+enum rtl8723ae_h2c_cmd {
+       H2C_AP_OFFLOAD = 0,
+       H2C_SETPWRMODE = 1,
+       H2C_JOINBSSRPT = 2,
+       H2C_RSVDPAGE = 3,
+       H2C_RSSI_REPORT = 4,
+       H2C_P2P_PS_CTW_CMD = 5,
+       H2C_P2P_PS_OFFLOAD = 6,
+       H2C_RA_MASK = 7,
+       MAX_H2CCMD
+};
+
+enum rtl8723be_cmd {
+       H2C_8723BE_RSVDPAGE = 0,
+       H2C_8723BE_JOINBSSRPT = 1,
+       H2C_8723BE_SCAN = 2,
+       H2C_8723BE_KEEP_ALIVE_CTRL = 3,
+       H2C_8723BE_DISCONNECT_DECISION = 4,
+       H2C_8723BE_INIT_OFFLOAD = 6,
+       H2C_8723BE_AP_OFFLOAD = 8,
+       H2C_8723BE_BCN_RSVDPAGE = 9,
+       H2C_8723BE_PROBERSP_RSVDPAGE = 10,
+
+       H2C_8723BE_SETPWRMODE = 0x20,
+       H2C_8723BE_PS_TUNING_PARA = 0x21,
+       H2C_8723BE_PS_TUNING_PARA2 = 0x22,
+       H2C_8723BE_PS_LPS_PARA = 0x23,
+       H2C_8723BE_P2P_PS_OFFLOAD = 0x24,
+
+       H2C_8723BE_WO_WLAN = 0x80,
+       H2C_8723BE_REMOTE_WAKE_CTRL = 0x81,
+       H2C_8723BE_AOAC_GLOBAL_INFO = 0x82,
+       H2C_8723BE_AOAC_RSVDPAGE = 0x83,
+       H2C_8723BE_RSSI_REPORT = 0x42,
+       H2C_8723BE_RA_MASK = 0x40,
+       H2C_8723BE_SELECTIVE_SUSPEND_ROF_CMD,
+       H2C_8723BE_P2P_PS_MODE,
+       H2C_8723BE_PSD_RESULT,
+       /*Not defined CTW CMD for P2P yet*/
+       H2C_8723BE_P2P_PS_CTW_CMD,
+       MAX_8723BE_H2CCMD
+};
+
+struct rtl92c_firmware_header {
+       u16 signature;
+       u8 category;
+       u8 function;
+       u16 version;
+       u8 subversion;
+       u8 rsvd1;
+       u8 month;
+       u8 date;
+       u8 hour;
+       u8 minute;
+       u16 ramcodesize;
+       u16 rsvd2;
+       u32 svnindex;
+       u32 rsvd3;
+       u32 rsvd4;
+       u32 rsvd5;
+};
+
+void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
+void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable);
+void rtl8723_fw_block_write(struct ieee80211_hw *hw,
+                           const u8 *buffer, u32 size);
+void rtl8723_fw_page_write(struct ieee80211_hw *hw,
+                          u32 page, const u8 *buffer, u32 size);
+void rtl8723_write_fw(struct ieee80211_hw *hw,
+                     enum version_8723e version,
+                     u8 *buffer, u32 size);
+int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be);
+int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be);
+#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/rtlwifi/rtl8723com/main.c
new file mode 100644 (file)
index 0000000..9014a94
--- /dev/null
@@ -0,0 +1,33 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include <linux/module.h>
+
+
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger    <Larry.Finger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek RTL8723AE/RTL8723BE 802.11n PCI wireless common routines");
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.c
new file mode 100644 (file)
index 0000000..d73b659
--- /dev/null
@@ -0,0 +1,434 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#include "../wifi.h"
+#include "phy_common.h"
+#include "../rtl8723ae/reg.h"
+#include <linux/module.h>
+
+/* These routines are common to RTL8723AE and RTL8723bE */
+
+u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
+                            u32 regaddr, u32 bitmask)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 returnvalue, originalvalue, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+       originalvalue = rtl_read_dword(rtlpriv, regaddr);
+       bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+       returnvalue = (originalvalue & bitmask) >> bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "BBR MASK = 0x%x Addr[0x%x]= 0x%x\n",
+                 bitmask, regaddr, originalvalue);
+
+       return returnvalue;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg);
+
+void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+                             u32 bitmask, u32 data)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 originalvalue, bitshift;
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+                 regaddr, bitmask, data);
+
+       if (bitmask != MASKDWORD) {
+               originalvalue = rtl_read_dword(rtlpriv, regaddr);
+               bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
+               data = ((originalvalue & (~bitmask)) | (data << bitshift));
+       }
+
+       rtl_write_dword(rtlpriv, regaddr, data);
+
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+                 regaddr, bitmask, data);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
+
+u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
+{
+       u32 i;
+
+       for (i = 0; i <= 31; i++) {
+               if (((bitmask >> i) & 0x1) == 1)
+                       break;
+       }
+       return i;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
+
+u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
+                              enum radio_path rfpath, u32 offset)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+       u32 newoffset;
+       u32 tmplong, tmplong2;
+       u8 rfpi_enable = 0;
+       u32 retvalue;
+
+       offset &= 0xff;
+       newoffset = offset;
+       if (RT_CANNOT_IO(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+               return 0xFFFFFFFF;
+       }
+       tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+       if (rfpath == RF90_PATH_A)
+               tmplong2 = tmplong;
+       else
+               tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+       tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+                  (newoffset << 23) | BLSSIREADEDGE;
+       rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+                     tmplong & (~BLSSIREADEDGE));
+       mdelay(1);
+       rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+       mdelay(2);
+       if (rfpath == RF90_PATH_A)
+               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+                                                BIT(8));
+       else if (rfpath == RF90_PATH_B)
+               rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+                                                BIT(8));
+       if (rfpi_enable)
+               retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
+                                        BLSSIREADBACKDATA);
+       else
+               retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
+                                        BLSSIREADBACKDATA);
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "RFR-%d Addr[0x%x]= 0x%x\n",
+                 rfpath, pphyreg->rf_rb, retvalue);
+       return retvalue;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read);
+
+void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
+                                enum radio_path rfpath,
+                                u32 offset, u32 data)
+{
+       u32 data_and_addr;
+       u32 newoffset;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+       struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+       if (RT_CANNOT_IO(hw)) {
+               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+               return;
+       }
+       offset &= 0xff;
+       newoffset = offset;
+       data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+       rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+       RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
+                "RFW-%d Addr[0x%x]= 0x%x\n", rfpath,
+                  pphyreg->rf3wire_offset, data_and_addr);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write);
+
+long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+                                 enum wireless_mode wirelessmode,
+                                 u8 txpwridx)
+{
+       long offset;
+       long pwrout_dbm;
+
+       switch (wirelessmode) {
+       case WIRELESS_MODE_B:
+               offset = -7;
+               break;
+       case WIRELESS_MODE_G:
+       case WIRELESS_MODE_N_24G:
+       default:
+               offset = -8;
+               break;
+       }
+       pwrout_dbm = txpwridx / 2 + offset;
+       return pwrout_dbm;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_txpwr_idx_to_dbm);
+
+void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_phy *rtlphy = &(rtlpriv->phy);
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+       rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+       rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+                           RFPGA0_XA_LSSIPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+                           RFPGA0_XB_LSSIPARAMETER;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+       rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+       rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+       rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+       rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
+       rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+       rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+       rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+       rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+       rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+       rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+       rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+       rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
+
+       rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
+       rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_init_bb_rf_reg_def);
+
+bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+                                     u32 cmdtableidx,
+                                     u32 cmdtablesz,
+                                     enum swchnlcmd_id cmdid,
+                                     u32 para1, u32 para2,
+                                     u32 msdelay)
+{
+       struct swchnlcmd *pcmd;
+
+       if (cmdtable == NULL) {
+               RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+               return false;
+       }
+
+       if (cmdtableidx >= cmdtablesz)
+               return false;
+
+       pcmd = cmdtable + cmdtableidx;
+       pcmd->cmdid = cmdid;
+       pcmd->para1 = para1;
+       pcmd->para2 = para2;
+       pcmd->msdelay = msdelay;
+       return true;
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_set_sw_chnl_cmdarray);
+
+void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+                                       bool iqk_ok,
+                                       long result[][8],
+                                       u8 final_candidate,
+                                       bool btxonly)
+{
+       u32 oldval_0, x, tx0_a, reg;
+       long y, tx0_c;
+
+       if (final_candidate == 0xFF) {
+               return;
+       } else if (iqk_ok) {
+               oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+                                         MASKDWORD) >> 22) & 0x3FF;
+               x = result[final_candidate][0];
+               if ((x & 0x00000200) != 0)
+                       x = x | 0xFFFFFC00;
+               tx0_a = (x * oldval_0) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
+                             ((x * oldval_0 >> 7) & 0x1));
+               y = result[final_candidate][1];
+               if ((y & 0x00000200) != 0)
+                       y = y | 0xFFFFFC00;
+               tx0_c = (y * oldval_0) >> 8;
+               rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
+                             ((tx0_c & 0x3C0) >> 6));
+               rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
+                             (tx0_c & 0x3F));
+               rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
+                             ((y * oldval_0 >> 7) & 0x1));
+               if (btxonly)
+                       return;
+               reg = result[final_candidate][2];
+               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+               reg = result[final_candidate][3] & 0x3F;
+               rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+               reg = (result[final_candidate][3] >> 6) & 0xF;
+               rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
+       }
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_fill_iqk_matrix);
+
+void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
+                                u32 *addabackup, u32 registernum)
+{
+       u32 i;
+
+       for (i = 0; i < registernum; i++)
+               addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
+}
+EXPORT_SYMBOL_GPL(rtl8723_save_adda_registers);
+
+void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
+                                   u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+               macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+       macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_save_mac_registers);
+
+void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
+                                      u32 *addareg, u32 *addabackup,
+                                      u32 regiesternum)
+{
+       u32 i;
+
+       for (i = 0; i < regiesternum; i++)
+               rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_reload_adda_registers);
+
+void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
+                                     u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i;
+
+       for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+               rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
+       rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_reload_mac_registers);
+
+void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
+                             bool is_patha_on, bool is2t)
+{
+       u32 pathon;
+       u32 i;
+
+       pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
+       if (!is2t) {
+               pathon = 0x0bdb25a0;
+               rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
+       } else {
+               rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon);
+       }
+
+       for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+               rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_adda_on);
+
+void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+                                        u32 *macreg, u32 *macbackup)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u32 i = 0;
+
+       rtl_write_byte(rtlpriv, macreg[i], 0x3F);
+
+       for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+               rtl_write_byte(rtlpriv, macreg[i],
+                              (u8) (macbackup[i] & (~BIT(3))));
+       rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_mac_setting_calibration);
+
+void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw)
+{
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
+       rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
+       rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_path_a_standby);
+
+void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+       u32 mode;
+
+       mode = pi_mode ? 0x01000100 : 0x01000000;
+       rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
+       rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
+}
+EXPORT_SYMBOL_GPL(rtl8723_phy_pi_mode_switch);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/phy_common.h
new file mode 100644 (file)
index 0000000..83b891a
--- /dev/null
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009-2014  Realtek Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+ * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
+ * Hsinchu 300, Taiwan.
+ *
+ * Larry Finger <Larry.Finger@lwfinger.net>
+ *
+ *****************************************************************************/
+
+#ifndef __PHY_COMMON__
+#define __PHY_COMMON__
+
+#define RT_CANNOT_IO(hw)                       false
+
+enum swchnlcmd_id {
+       CMDID_END,
+       CMDID_SET_TXPOWEROWER_LEVEL,
+       CMDID_BBREGWRITE10,
+       CMDID_WRITEPORT_ULONG,
+       CMDID_WRITEPORT_USHORT,
+       CMDID_WRITEPORT_UCHAR,
+       CMDID_RF_WRITEREG,
+};
+
+struct swchnlcmd {
+       enum swchnlcmd_id cmdid;
+       u32 para1;
+       u32 para2;
+       u32 msdelay;
+};
+
+u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
+                            u32 regaddr, u32 bitmask);
+void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
+                             u32 bitmask, u32 data);
+u32 rtl8723_phy_calculate_bit_shift(u32 bitmask);
+u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
+                              enum radio_path rfpath, u32 offset);
+void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
+                                enum radio_path rfpath,
+                                u32 offset, u32 data);
+long rtl8723_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
+                                 enum wireless_mode wirelessmode,
+                                 u8 txpwridx);
+void rtl8723_phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
+bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
+                                     u32 cmdtableidx,
+                                     u32 cmdtablesz,
+                                     enum swchnlcmd_id cmdid,
+                                     u32 para1, u32 para2,
+                                     u32 msdelay);
+void rtl8723_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
+                                       bool iqk_ok,
+                                       long result[][8],
+                                       u8 final_candidate,
+                                       bool btxonly);
+void rtl8723_save_adda_registers(struct ieee80211_hw *hw, u32 *addareg,
+                                u32 *addabackup, u32 registernum);
+void rtl8723_phy_save_mac_registers(struct ieee80211_hw *hw,
+                                   u32 *macreg, u32 *macbackup);
+void rtl8723_phy_reload_adda_registers(struct ieee80211_hw *hw,
+                                      u32 *addareg, u32 *addabackup,
+                                      u32 regiesternum);
+void rtl8723_phy_reload_mac_registers(struct ieee80211_hw *hw,
+                                     u32 *macreg, u32 *macbackup);
+void rtl8723_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg,
+                             bool is_patha_on, bool is2t);
+void rtl8723_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+                                        u32 *macreg, u32 *macbackup);
+void rtl8723_phy_path_a_standby(struct ieee80211_hw *hw);
+void rtl8723_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode);
+
+#endif
index 4933f02ce1d510cf94ef127b59ea025238ad9476..0398d3ea15b00b6c68b4b1e160700df276f90254 100644 (file)
@@ -410,7 +410,7 @@ static void rtl_usb_init_sw(struct ieee80211_hw *hw)
        mac->current_ampdu_factor = 3;
 
        /* QOS */
-       rtlusb->acm_method = eAcmWay2_SW;
+       rtlusb->acm_method = EACMWAY2_SW;
 
        /* IRQ */
        /* HIMR - turn all on */
@@ -994,7 +994,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
                seq_number += 1;
                seq_number <<= 4;
        }
-       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, sta, skb,
+       rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, NULL, info, sta, skb,
                                        hw_queue, &tcb_desc);
        if (!ieee80211_has_morefrags(hdr->frame_control)) {
                if (qc)
index 8c647391bedf0f4bc5ef4fca39b7ca39a05e1483..6965afdf572a9d57c06fabd8b070fbc8cc58b9ce 100644 (file)
 #include <linux/completion.h>
 #include "debug.h"
 
+#define        MASKBYTE0                               0xff
+#define        MASKBYTE1                               0xff00
+#define        MASKBYTE2                               0xff0000
+#define        MASKBYTE3                               0xff000000
+#define        MASKHWORD                               0xffff0000
+#define        MASKLWORD                               0x0000ffff
+#define        MASKDWORD                               0xffffffff
+#define        MASK12BITS                              0xfff
+#define        MASKH4BITS                              0xf0000000
+#define MASKOFDM_D                             0xffc00000
+#define        MASKCCK                                 0x3f3f3f3f
+
+#define        MASK4BITS                               0x0f
+#define        MASK20BITS                              0xfffff
+#define RFREG_OFFSET_MASK                      0xfffff
+
+#define        MASKBYTE0                               0xff
+#define        MASKBYTE1                               0xff00
+#define        MASKBYTE2                               0xff0000
+#define        MASKBYTE3                               0xff000000
+#define        MASKHWORD                               0xffff0000
+#define        MASKLWORD                               0x0000ffff
+#define        MASKDWORD                               0xffffffff
+#define        MASK12BITS                              0xfff
+#define        MASKH4BITS                              0xf0000000
+#define MASKOFDM_D                             0xffc00000
+#define        MASKCCK                                 0x3f3f3f3f
+
+#define        MASK4BITS                               0x0f
+#define        MASK20BITS                              0xfffff
+#define RFREG_OFFSET_MASK                      0xfffff
+
 #define RF_CHANGE_BY_INIT                      0
 #define RF_CHANGE_BY_IPS                       BIT(28)
 #define RF_CHANGE_BY_PS                                BIT(29)
@@ -49,6 +81,7 @@
 
 #define IQK_ADDA_REG_NUM                       16
 #define IQK_MAC_REG_NUM                                4
+#define IQK_THRESHOLD                          8
 
 #define MAX_KEY_LEN                            61
 #define KEY_BUF_SIZE                           5
 #define MAC80211_4ADDR_LEN                     30
 
 #define CHANNEL_MAX_NUMBER     (14 + 24 + 21)  /* 14 is the max channel no */
+#define CHANNEL_MAX_NUMBER_2G          14
+#define CHANNEL_MAX_NUMBER_5G          54 /* Please refer to
+                                           *"phy_GetChnlGroup8812A" and
+                                           * "Hal_ReadTxPowerInfo8812A"
+                                           */
+#define CHANNEL_MAX_NUMBER_5G_80M      7
 #define CHANNEL_GROUP_MAX      (3 + 9) /*  ch1~3, 4~9, 10~14 = three groups */
+#define CHANNEL_MAX_NUMBER_5G          54 /* Please refer to
+                                           *"phy_GetChnlGroup8812A" and
+                                           * "Hal_ReadTxPowerInfo8812A"
+                                           */
+#define CHANNEL_MAX_NUMBER_5G_80M      7
 #define MAX_PG_GROUP                   13
 #define        CHANNEL_GROUP_MAX_2G            3
 #define        CHANNEL_GROUP_IDX_5GL           3
 #define CHANNEL_MAX_NUMBER_2G          14
 #define AVG_THERMAL_NUM                        8
 #define AVG_THERMAL_NUM_88E            4
+#define AVG_THERMAL_NUM_8723BE         4
 #define MAX_TID_COUNT                  9
 
 /* for early mode */
 #define        MAX_CHNL_GROUP_24G              6
 #define        MAX_CHNL_GROUP_5G               14
 
+#define TX_PWR_BY_RATE_NUM_BAND                2
+#define TX_PWR_BY_RATE_NUM_RF          4
+#define TX_PWR_BY_RATE_NUM_SECTION     12
+#define MAX_BASE_NUM_IN_PHY_REG_PG_24G  6
+#define MAX_BASE_NUM_IN_PHY_REG_PG_5G  5
+
+#define RTL8192EE_SEG_NUM              1 /* 0:2 seg, 1: 4 seg, 2: 8 seg */
+
+#define DEL_SW_IDX_SZ          30
+#define BAND_NUM                       3
+
+enum rf_tx_num {
+       RF_1TX = 0,
+       RF_2TX,
+       RF_MAX_TX_NUM,
+       RF_TX_NUM_NONIMPLEMENT,
+};
+
 struct txpower_info_2g {
        u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
        u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -115,6 +178,8 @@ struct txpower_info_2g {
        u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
        u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
        u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+       u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+       u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
 };
 
 struct txpower_info_5g {
@@ -123,6 +188,17 @@ struct txpower_info_5g {
        u8 ofdm_diff[MAX_RF_PATH][MAX_TX_COUNT];
        u8 bw20_diff[MAX_RF_PATH][MAX_TX_COUNT];
        u8 bw40_diff[MAX_RF_PATH][MAX_TX_COUNT];
+       u8 bw80_diff[MAX_RF_PATH][MAX_TX_COUNT];
+       u8 bw160_diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+enum rate_section {
+       CCK = 0,
+       OFDM,
+       HT_MCS0_MCS7,
+       HT_MCS8_MCS15,
+       VHT_1SSMCS0_1SSMCS9,
+       VHT_2SSMCS0_2SSMCS9,
 };
 
 enum intf_type {
@@ -158,7 +234,10 @@ enum hardware_type {
        HARDWARE_TYPE_RTL8192DU,
        HARDWARE_TYPE_RTL8723AE,
        HARDWARE_TYPE_RTL8723U,
+       HARDWARE_TYPE_RTL8723BE,
        HARDWARE_TYPE_RTL8188EE,
+       HARDWARE_TYPE_RTL8821AE,
+       HARDWARE_TYPE_RTL8812AE,
 
        /* keep it last */
        HARDWARE_TYPE_NUM
@@ -195,8 +274,16 @@ enum hardware_type {
         _pdesc->rxmcs == DESC92_RATE5_5M ||            \
         _pdesc->rxmcs == DESC92_RATE11M)
 
+#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs)             \
+       ((rxmcs) == DESC92_RATE1M ||                    \
+        (rxmcs) == DESC92_RATE2M ||                    \
+        (rxmcs) == DESC92_RATE5_5M ||                  \
+        (rxmcs) == DESC92_RATE11M)
+
 enum scan_operation_backup_opt {
        SCAN_OPT_BACKUP = 0,
+       SCAN_OPT_BACKUP_BAND0 = 0,
+       SCAN_OPT_BACKUP_BAND1,
        SCAN_OPT_RESTORE,
        SCAN_OPT_MAX
 };
@@ -231,7 +318,9 @@ struct bb_reg_def {
 
 enum io_type {
        IO_CMD_PAUSE_DM_BY_SCAN = 0,
-       IO_CMD_RESUME_DM_BY_SCAN = 1,
+       IO_CMD_PAUSE_BAND0_DM_BY_SCAN = 0,
+       IO_CMD_PAUSE_BAND1_DM_BY_SCAN = 1,
+       IO_CMD_RESUME_DM_BY_SCAN = 2,
 };
 
 enum hw_variables {
@@ -298,6 +387,7 @@ enum hw_variables {
        HW_VAR_SET_RPWM,
        HW_VAR_H2C_FW_PWRMODE,
        HW_VAR_H2C_FW_JOINBSSRPT,
+       HW_VAR_H2C_FW_MEDIASTATUSRPT,
        HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
        HW_VAR_FW_PSMODE_STATUS,
        HW_VAR_RESUME_CLK_ON,
@@ -330,6 +420,8 @@ enum hw_variables {
 
        HAL_DEF_WOWLAN,
        HW_VAR_MRC,
+       HW_VAR_KEEP_ALIVE,
+       HW_VAR_NAV_UPPER,
 
        HW_VAR_MGT_FILTER,
        HW_VAR_CTRL_FILTER,
@@ -348,34 +440,34 @@ enum rt_oem_id {
        RT_CID_8187_HW_LED = 3,
        RT_CID_8187_NETGEAR = 4,
        RT_CID_WHQL = 5,
-       RT_CID_819x_CAMEO = 6,
-       RT_CID_819x_RUNTOP = 7,
-       RT_CID_819x_Senao = 8,
+       RT_CID_819X_CAMEO = 6,
+       RT_CID_819X_RUNTOP = 7,
+       RT_CID_819X_SENAO = 8,
        RT_CID_TOSHIBA = 9,
-       RT_CID_819x_Netcore = 10,
-       RT_CID_Nettronix = 11,
+       RT_CID_819X_NETCORE = 10,
+       RT_CID_NETTRONIX = 11,
        RT_CID_DLINK = 12,
        RT_CID_PRONET = 13,
        RT_CID_COREGA = 14,
-       RT_CID_819x_ALPHA = 15,
-       RT_CID_819x_Sitecom = 16,
+       RT_CID_819X_ALPHA = 15,
+       RT_CID_819X_SITECOM = 16,
        RT_CID_CCX = 17,
-       RT_CID_819x_Lenovo = 18,
-       RT_CID_819x_QMI = 19,
-       RT_CID_819x_Edimax_Belkin = 20,
-       RT_CID_819x_Sercomm_Belkin = 21,
-       RT_CID_819x_CAMEO1 = 22,
-       RT_CID_819x_MSI = 23,
-       RT_CID_819x_Acer = 24,
-       RT_CID_819x_HP = 27,
-       RT_CID_819x_CLEVO = 28,
-       RT_CID_819x_Arcadyan_Belkin = 29,
-       RT_CID_819x_SAMSUNG = 30,
-       RT_CID_819x_WNC_COREGA = 31,
-       RT_CID_819x_Foxcoon = 32,
-       RT_CID_819x_DELL = 33,
-       RT_CID_819x_PRONETS = 34,
-       RT_CID_819x_Edimax_ASUS = 35,
+       RT_CID_819X_LENOVO = 18,
+       RT_CID_819X_QMI = 19,
+       RT_CID_819X_EDIMAX_BELKIN = 20,
+       RT_CID_819X_SERCOMM_BELKIN = 21,
+       RT_CID_819X_CAMEO1 = 22,
+       RT_CID_819X_MSI = 23,
+       RT_CID_819X_ACER = 24,
+       RT_CID_819X_HP = 27,
+       RT_CID_819X_CLEVO = 28,
+       RT_CID_819X_ARCADYAN_BELKIN = 29,
+       RT_CID_819X_SAMSUNG = 30,
+       RT_CID_819X_WNC_COREGA = 31,
+       RT_CID_819X_FOXCOON = 32,
+       RT_CID_819X_DELL = 33,
+       RT_CID_819X_PRONETS = 34,
+       RT_CID_819X_EDIMAX_ASUS = 35,
        RT_CID_NETGEAR = 36,
        RT_CID_PLANEX = 37,
        RT_CID_CC_C = 38,
@@ -389,6 +481,7 @@ enum hw_descs {
        HW_DESC_RXBUFF_ADDR,
        HW_DESC_RXPKT_LEN,
        HW_DESC_RXERO,
+       HW_DESC_RX_PREPARE,
 };
 
 enum prime_sc {
@@ -407,6 +500,7 @@ enum rf_type {
 enum ht_channel_width {
        HT_CHANNEL_WIDTH_20 = 0,
        HT_CHANNEL_WIDTH_20_40 = 1,
+       HT_CHANNEL_WIDTH_80 = 2,
 };
 
 /* Ref: 802.11i sepc D10.0 7.3.2.25.1
@@ -471,6 +565,9 @@ enum rtl_var_map {
        MAC_RCR_ACRC32,
        MAC_RCR_ACF,
        MAC_RCR_AAP,
+       MAC_HIMR,
+       MAC_HIMRE,
+       MAC_HSISR,
 
        /*efuse map */
        EFUSE_TEST,
@@ -608,7 +705,7 @@ enum rtl_led_pin {
 enum acm_method {
        eAcmWay0_SwAndHw = 0,
        eAcmWay1_HW = 1,
-       eAcmWay2_SW = 2,
+       EACMWAY2_SW = 2,
 };
 
 enum macphy_mode {
@@ -645,7 +742,9 @@ enum wireless_mode {
        WIRELESS_MODE_G = 0x04,
        WIRELESS_MODE_AUTO = 0x08,
        WIRELESS_MODE_N_24G = 0x10,
-       WIRELESS_MODE_N_5G = 0x20
+       WIRELESS_MODE_N_5G = 0x20,
+       WIRELESS_MODE_AC_5G = 0x40,
+       WIRELESS_MODE_AC_24G  = 0x80
 };
 
 #define IS_WIRELESS_MODE_A(wirelessmode)       \
@@ -669,6 +768,8 @@ enum ratr_table_mode {
        RATR_INX_WIRELESS_B = 6,
        RATR_INX_WIRELESS_MC = 7,
        RATR_INX_WIRELESS_A = 8,
+       RATR_INX_WIRELESS_AC_5N = 8,
+       RATR_INX_WIRELESS_AC_24N = 9,
 };
 
 enum rtl_link_state {
@@ -803,8 +904,12 @@ struct wireless_stats {
        long signal_strength;
 
        u8 rx_rssi_percentage[4];
+       u8 rx_evm_dbm[4];
        u8 rx_evm_percentage[2];
 
+       u16 rx_cfo_short[4];
+       u16 rx_cfo_tail[4];
+
        struct rt_smooth_data ui_rssi;
        struct rt_smooth_data ui_link_quality;
 };
@@ -817,9 +922,9 @@ struct rate_adaptive {
        u32 high_rssi_thresh_for_ra;
        u32 high2low_rssi_thresh_for_ra;
        u8 low2high_rssi_thresh_for_ra40m;
-       u32 low_rssi_thresh_for_ra40M;
+       u32 low_rssi_thresh_for_ra40m;
        u8 low2high_rssi_thresh_for_ra20m;
-       u32 low_rssi_thresh_for_ra20M;
+       u32 low_rssi_thresh_for_ra20m;
        u32 upper_rssi_threshold_ratr;
        u32 middleupper_rssi_threshold_ratr;
        u32 middle_rssi_threshold_ratr;
@@ -833,6 +938,10 @@ struct rate_adaptive {
        u32 ping_rssi_thresh_for_ra;
        u32 last_ratr;
        u8 pre_ratr_state;
+       u8 ldpc_thres;
+       bool use_ldpc;
+       bool lower_rts_rate;
+       bool is_special_data;
 };
 
 struct regd_pair_mapping {
@@ -841,6 +950,16 @@ struct regd_pair_mapping {
        u16 reg_2ghz_ctl;
 };
 
+struct dynamic_primary_cca {
+       u8 pricca_flag;
+       u8 intf_flag;
+       u8 intf_type;
+       u8 dup_rts_flag;
+       u8 monitor_flag;
+       u8 ch_offset;
+       u8 mf_state;
+};
+
 struct rtl_regulatory {
        char alpha2[2];
        u16 country_code;
@@ -976,16 +1095,29 @@ struct rtl_phy {
        u32 iqk_bb_backup[10];
        bool iqk_initialized;
 
+       bool rfpath_rx_enable[MAX_RF_PATH];
+       u8 reg_837;
        /* Dual mac */
        bool need_iqk;
        struct iqk_matrix_regs iqk_matrix[IQK_MATRIX_SETTINGS_NUM];
 
        bool rfpi_enable;
+       bool iqk_in_progress;
 
        u8 pwrgroup_cnt;
        u8 cck_high_power;
        /* MAX_PG_GROUP groups of pwr diff by rates */
        u32 mcs_offset[MAX_PG_GROUP][16];
+       u32 tx_power_by_rate_offset[TX_PWR_BY_RATE_NUM_BAND]
+                                  [TX_PWR_BY_RATE_NUM_RF]
+                                  [TX_PWR_BY_RATE_NUM_RF]
+                                  [TX_PWR_BY_RATE_NUM_SECTION];
+       u8 txpwr_by_rate_base_24g[TX_PWR_BY_RATE_NUM_RF]
+                                [TX_PWR_BY_RATE_NUM_RF]
+                                [MAX_BASE_NUM_IN_PHY_REG_PG_24G];
+       u8 txpwr_by_rate_base_5g[TX_PWR_BY_RATE_NUM_RF]
+                               [TX_PWR_BY_RATE_NUM_RF]
+                               [MAX_BASE_NUM_IN_PHY_REG_PG_5G];
        u8 default_initialgain[4];
 
        /* the current Tx power level */
@@ -998,6 +1130,7 @@ struct rtl_phy {
        bool apk_done;
        u32 reg_rf3c[2];        /* pathA / pathB  */
 
+       u32 backup_rf_0x1a;/*92ee*/
        /* bfsync */
        u8 framesync;
        u32 framesync_c34;
@@ -1006,6 +1139,7 @@ struct rtl_phy {
        struct phy_parameters hwparam_tables[MAX_TAB];
        u16 rf_pathmap;
 
+       u8 hw_rof_enable; /*Enable GPIO[9] as WL RF HW PDn source*/
        enum rt_polarity_ctl polarity_ctl;
 };
 
@@ -1133,6 +1267,7 @@ struct rtl_mac {
        u8 use_cts_protect;
        u8 cur_40_prime_sc;
        u8 cur_40_prime_sc_bk;
+       u8 cur_80_prime_sc;
        u64 tsf;
        u8 retry_short;
        u8 retry_long;
@@ -1213,6 +1348,7 @@ struct rtl_hal {
        bool being_init_adapter;
        bool bbrf_ready;
        bool mac_func_enable;
+       bool pre_edcca_enable;
        struct bt_coexist_8723 hal_coex_8723;
 
        enum intf_type interface;
@@ -1234,6 +1370,7 @@ struct rtl_hal {
        /*Reserve page start offset except beacon in TxQ. */
        u8 fw_rsvdpage_startoffset;
        u8 h2c_txcmd_seq;
+       u8 current_ra_rate;
 
        /* FW Cmd IO related */
        u16 fwcmd_iomap;
@@ -1273,6 +1410,9 @@ struct rtl_hal {
        bool disable_amsdu_8k;
        bool master_of_dmsp;
        bool slave_of_dmsp;
+
+       u16 rx_tag;/*for 92ee*/
+       u8 rts_en;
 };
 
 struct rtl_security {
@@ -1321,6 +1461,16 @@ struct fast_ant_training {
        bool    becomelinked;
 };
 
+struct dm_phy_dbg_info {
+       char rx_snrdb[4];
+       u64 num_qry_phy_status;
+       u64 num_qry_phy_status_cck;
+       u64 num_qry_phy_status_ofdm;
+       u16 num_qry_beacon_pkt;
+       u16 num_non_be_pkt;
+       s32 rx_evm[4];
+};
+
 struct rtl_dm {
        /*PHY status for Dynamic Management */
        long entry_min_undec_sm_pwdb;
@@ -1360,29 +1510,84 @@ struct rtl_dm {
        u8 txpower_track_control;
        bool interrupt_migration;
        bool disable_tx_int;
-       char ofdm_index[2];
+       char ofdm_index[MAX_RF_PATH];
+       u8 default_ofdm_index;
+       u8 default_cck_index;
        char cck_index;
-       char delta_power_index;
-       char delta_power_index_last;
-       char power_index_offset;
+       char delta_power_index[MAX_RF_PATH];
+       char delta_power_index_last[MAX_RF_PATH];
+       char power_index_offset[MAX_RF_PATH];
+       char absolute_ofdm_swing_idx[MAX_RF_PATH];
+       char remnant_ofdm_swing_idx[MAX_RF_PATH];
+       char remnant_cck_idx;
+       bool modify_txagc_flag_path_a;
+       bool modify_txagc_flag_path_b;
+
+       bool one_entry_only;
+       struct dm_phy_dbg_info dbginfo;
+
+       /* Dynamic ATC switch */
+       bool atc_status;
+       bool large_cfo_hit;
+       bool is_freeze;
+       int cfo_tail[2];
+       int cfo_ave_pre;
+       int crystal_cap;
+       u8 cfo_threshold;
+       u32 packet_count;
+       u32 packet_count_pre;
+       u8 tx_rate;
 
        /*88e tx power tracking*/
-       u8      swing_idx_ofdm[2];
+       u8      swing_idx_ofdm[MAX_RF_PATH];
        u8      swing_idx_ofdm_cur;
-       u8      swing_idx_ofdm_base;
+       u8      swing_idx_ofdm_base[MAX_RF_PATH];
        bool    swing_flag_ofdm;
        u8      swing_idx_cck;
        u8      swing_idx_cck_cur;
        u8      swing_idx_cck_base;
        bool    swing_flag_cck;
 
+       char    swing_diff_2g;
+       char    swing_diff_5g;
+
+       u8 delta_swing_table_idx_24gccka_p[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24gccka_n[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24gcckb_p[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24gcckb_n[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24ga_p[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24ga_n[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24gb_p[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24gb_n[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_5ga_p[BAND_NUM][DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_5ga_n[BAND_NUM][DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_5gb_p[BAND_NUM][DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_5gb_n[BAND_NUM][DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24ga_p_8188e[DEL_SW_IDX_SZ];
+       u8 delta_swing_table_idx_24ga_n_8188e[DEL_SW_IDX_SZ];
+
        /* DMSP */
        bool supp_phymode_switch;
 
+       /* DulMac */
        struct fast_ant_training fat_table;
+
+       u8      resp_tx_path;
+       u8      path_sel;
+       u32     patha_sum;
+       u32     pathb_sum;
+       u32     patha_cnt;
+       u32     pathb_cnt;
+
+       u8 pre_channel;
+       u8 *p_channel;
+       u8 linked_interval;
+
+       u64 last_tx_ok_cnt;
+       u64 last_rx_ok_cnt;
 };
 
-#define        EFUSE_MAX_LOGICAL_SIZE                  256
+#define        EFUSE_MAX_LOGICAL_SIZE                  512
 
 struct rtl_efuse {
        bool autoLoad_ok;
@@ -1422,12 +1627,9 @@ struct rtl_efuse {
        u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */
        u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX];
        u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
-       u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
-       u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
-       u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX];
-       u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
-       u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER];   /*For HT 40MHZ pwr */
-       u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER];   /*For HT 40MHZ pwr */
+       u8 eeprom_chnlarea_txpwr_cck[MAX_RF_PATH][CHANNEL_GROUP_MAX_2G];
+       u8 eeprom_chnlarea_txpwr_ht40_1s[MAX_RF_PATH][CHANNEL_GROUP_MAX];
+       u8 eprom_chnl_txpwr_ht40_2sdf[MAX_RF_PATH][CHANNEL_GROUP_MAX];
 
        u8 internal_pa_5g[2];   /* pathA / pathB */
        u8 eeprom_c9;
@@ -1438,9 +1640,38 @@ struct rtl_efuse {
        u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER];
        u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER];
 
-       char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */
-       /*For HT<->legacy pwr diff*/
-       u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER];
+       u8 txpwrlevel_cck[MAX_RF_PATH][CHANNEL_MAX_NUMBER_2G];
+       /*For HT 40MHZ pwr */
+       u8 txpwrlevel_ht40_1s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+       /*For HT 40MHZ pwr */
+       u8 txpwrlevel_ht40_2s[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+
+       /*--------------------------------------------------------*
+        * 8192CE\8192SE\8192DE\8723AE use the following 4 arrays,
+        * other ICs (8188EE\8723BE\8192EE\8812AE...)
+        * define new arrays in Windows code.
+        * BUT, in linux code, we use the same array for all ICs.
+        *
+        * The Correspondance relation between two arrays is:
+        * txpwr_cckdiff[][] == CCK_24G_Diff[][]
+        * txpwr_ht20diff[][] == BW20_24G_Diff[][]
+        * txpwr_ht40diff[][] == BW40_24G_Diff[][]
+        * txpwr_legacyhtdiff[][] == OFDM_24G_Diff[][]
+        *
+        * Sizes of these arrays are decided by the larger ones.
+        */
+       char txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+       char txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+       char txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+       char txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+
+       u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+       u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M];
+       char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT];
+       char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT];
+       char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT];
+       char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT];
+
        u8 txpwr_safetyflag;                    /* Band edge enable flag */
        u16 eeprom_txpowerdiff;
        u8 legacy_httxpowerdiff;        /* Legacy to HT rate power diff */
@@ -1571,7 +1802,9 @@ struct rtl_stats {
        bool rx_is40Mhzpacket;
        u32 rx_pwdb_all;
        u8 rx_mimo_signalstrength[4];   /*in 0~100 index */
-       s8 rx_mimo_sig_qual[2];
+       s8 rx_mimo_sig_qual[4];
+       u8 rx_pwr[4]; /* per-path's pwdb */
+       u8 rx_snr[4]; /* per-path's SNR */
        bool packet_matchbssid;
        bool is_cck;
        bool is_ht;
@@ -1644,6 +1877,8 @@ struct rtl_tcb_desc {
        bool btx_enable_sw_calc_duration;
 };
 
+struct rtl92c_firmware_header;
+
 struct rtl_hal_ops {
        int (*init_sw_vars) (struct ieee80211_hw *hw);
        void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -1673,9 +1908,17 @@ struct rtl_hal_ops {
        void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
        void (*update_rate_tbl) (struct ieee80211_hw *hw,
                              struct ieee80211_sta *sta, u8 rssi_level);
+       void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc,
+                                   u8 *desc, u8 queue_index,
+                                   struct sk_buff *skb, dma_addr_t addr);
        void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
+       u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw,
+                                        u8 queue_index);
+       void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc,
+                               u8 queue_index);
        void (*fill_tx_desc) (struct ieee80211_hw *hw,
                              struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+                             u8 *pbd_desc_tx,
                              struct ieee80211_tx_info *info,
                              struct ieee80211_sta *sta,
                              struct sk_buff *skb, u8 hw_queue,
@@ -1698,8 +1941,11 @@ struct rtl_hal_ops {
                                    enum rf_pwrstate rfpwr_state);
        void (*led_control) (struct ieee80211_hw *hw,
                             enum led_ctl_mode ledaction);
-       void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
+       void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+                        u8 desc_name, u8 *val);
        u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
+       bool (*is_tx_desc_closed) (struct ieee80211_hw *hw,
+                                  u8 hw_queue, u16 index);
        void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue);
        void (*enable_hw_sec) (struct ieee80211_hw *hw);
        void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
@@ -1738,6 +1984,10 @@ struct rtl_hal_ops {
        void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
        void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
                              u32 cmd_len, u8 *p_cmdbuffer);
+       bool (*get_btc_status) (void);
+       bool (*is_fw_header) (struct rtl92c_firmware_header *hdr);
+       u32 (*rx_command_packet)(struct ieee80211_hw *hw,
+                                struct rtl_stats status, struct sk_buff *skb);
 };
 
 struct rtl_intf_ops {
@@ -1847,6 +2097,8 @@ struct rtl_locks {
 
        /*Easy concurrent*/
        spinlock_t check_sendpkt_lock;
+
+       spinlock_t iqk_lock;
 };
 
 struct rtl_works {
@@ -1915,6 +2167,7 @@ struct ps_t {
        u8 cur_ccasate;
        u8 pre_rfstate;
        u8 cur_rfstate;
+       u8 initialize;
        long rssi_val_min;
 };
 
@@ -1939,6 +2192,7 @@ struct dig_t {
        u8 cursta_cstate;
        u8 presta_cstate;
        u8 curmultista_cstate;
+       u8 stop_dig;
        char back_val;
        char back_range_max;
        char back_range_min;
@@ -1956,6 +2210,7 @@ struct dig_t {
        u8 cur_ccasate;
        u8 large_fa_hit;
        u8 dig_dynamic_min;
+       u8 dig_dynamic_min_1;
        u8 forbidden_igi;
        u8 dig_state;
        u8 dig_highpwrstate;
@@ -1972,6 +2227,7 @@ struct dig_t {
        char backoffval_range_min;
        u8 dig_min_0;
        u8 dig_min_1;
+       u8 bt30_cur_igi;
        bool media_connect_0;
        bool media_connect_1;
 
@@ -1986,6 +2242,96 @@ struct rtl_global_var {
        spinlock_t glb_list_lock;
 };
 
+struct rtl_btc_info {
+       u8 bt_type;
+       u8 btcoexist;
+       u8 ant_num;
+};
+
+struct bt_coexist_info {
+       struct rtl_btc_ops *btc_ops;
+       struct rtl_btc_info btc_info;
+       /* EEPROM BT info. */
+       u8 eeprom_bt_coexist;
+       u8 eeprom_bt_type;
+       u8 eeprom_bt_ant_num;
+       u8 eeprom_bt_ant_isol;
+       u8 eeprom_bt_radio_shared;
+
+       u8 bt_coexistence;
+       u8 bt_ant_num;
+       u8 bt_coexist_type;
+       u8 bt_state;
+       u8 bt_cur_state;        /* 0:on, 1:off */
+       u8 bt_ant_isolation;    /* 0:good, 1:bad */
+       u8 bt_pape_ctrl;        /* 0:SW, 1:SW/HW dynamic */
+       u8 bt_service;
+       u8 bt_radio_shared_type;
+       u8 bt_rfreg_origin_1e;
+       u8 bt_rfreg_origin_1f;
+       u8 bt_rssi_state;
+       u32 ratio_tx;
+       u32 ratio_pri;
+       u32 bt_edca_ul;
+       u32 bt_edca_dl;
+
+       bool init_set;
+       bool bt_busy_traffic;
+       bool bt_traffic_mode_set;
+       bool bt_non_traffic_mode_set;
+
+       bool fw_coexist_all_off;
+       bool sw_coexist_all_off;
+       bool hw_coexist_all_off;
+       u32 cstate;
+       u32 previous_state;
+       u32 cstate_h;
+       u32 previous_state_h;
+
+       u8 bt_pre_rssi_state;
+       u8 bt_pre_rssi_state1;
+
+       u8 reg_bt_iso;
+       u8 reg_bt_sco;
+       bool balance_on;
+       u8 bt_active_zero_cnt;
+       bool cur_bt_disabled;
+       bool pre_bt_disabled;
+
+       u8 bt_profile_case;
+       u8 bt_profile_action;
+       bool bt_busy;
+       bool hold_for_bt_operation;
+       u8 lps_counter;
+};
+
+struct rtl_btc_ops {
+       void (*btc_init_variables) (struct rtl_priv *rtlpriv);
+       void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
+       void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
+       void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
+       void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype);
+       void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action);
+       void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv,
+                                       enum _RT_MEDIA_STATUS mstatus);
+       void (*btc_periodical) (struct rtl_priv *rtlpriv);
+       void (*btc_halt_notify) (void);
+       void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv,
+                                  u8 *tmp_buf, u8 length);
+       bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv);
+       bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv);
+       bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv);
+};
+
+struct proxim {
+       bool proxim_on;
+
+       void *proximity_priv;
+       int (*proxim_rx)(struct ieee80211_hw *hw, struct rtl_stats *status,
+                        struct sk_buff *skb);
+       u8  (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
+};
+
 struct rtl_priv {
        struct ieee80211_hw *hw;
        struct completion firmware_loading_complete;
@@ -2008,6 +2354,7 @@ struct rtl_priv {
 
        struct rtl_ps_ctl psc;
        struct rate_adaptive ra;
+       struct dynamic_primary_cca primarycca;
        struct wireless_stats stats;
        struct rt_link_detect link_info;
        struct false_alarm_statistics falsealm_cnt;
@@ -2048,6 +2395,20 @@ struct rtl_priv {
        bool enter_ps;  /* true when entering PS */
        u8 rate_mask[5];
 
+       /* intel Proximity, should be alloc mem
+        * in intel Proximity module and can only
+        * be used in intel Proximity mode
+        */
+       struct proxim proximity;
+
+       /*for bt coexist use*/
+       struct bt_coexist_info btcoexist;
+
+       /* separate 92ee from other ICs,
+        * 92ee use new trx flow.
+        */
+       bool use_new_trx_flow;
+
        /*This must be the last item so
           that it points to the data allocated
           beyond  this structure like:
@@ -2079,6 +2440,15 @@ enum bt_co_type {
        BT_CSR_BC8 = 4,
        BT_RTL8756 = 5,
        BT_RTL8723A = 6,
+       BT_RTL8821A = 7,
+       BT_RTL8723B = 8,
+       BT_RTL8192E = 9,
+       BT_RTL8812A = 11,
+};
+
+enum bt_total_ant_num {
+       ANT_TOTAL_X2 = 0,
+       ANT_TOTAL_X1 = 1
 };
 
 enum bt_cur_state {
@@ -2104,62 +2474,6 @@ enum bt_radio_shared {
        BT_RADIO_INDIVIDUAL = 1,
 };
 
-struct bt_coexist_info {
-
-       /* EEPROM BT info. */
-       u8 eeprom_bt_coexist;
-       u8 eeprom_bt_type;
-       u8 eeprom_bt_ant_num;
-       u8 eeprom_bt_ant_isol;
-       u8 eeprom_bt_radio_shared;
-
-       u8 bt_coexistence;
-       u8 bt_ant_num;
-       u8 bt_coexist_type;
-       u8 bt_state;
-       u8 bt_cur_state;        /* 0:on, 1:off */
-       u8 bt_ant_isolation;    /* 0:good, 1:bad */
-       u8 bt_pape_ctrl;        /* 0:SW, 1:SW/HW dynamic */
-       u8 bt_service;
-       u8 bt_radio_shared_type;
-       u8 bt_rfreg_origin_1e;
-       u8 bt_rfreg_origin_1f;
-       u8 bt_rssi_state;
-       u32 ratio_tx;
-       u32 ratio_pri;
-       u32 bt_edca_ul;
-       u32 bt_edca_dl;
-
-       bool init_set;
-       bool bt_busy_traffic;
-       bool bt_traffic_mode_set;
-       bool bt_non_traffic_mode_set;
-
-       bool fw_coexist_all_off;
-       bool sw_coexist_all_off;
-       bool hw_coexist_all_off;
-       u32 cstate;
-       u32 previous_state;
-       u32 cstate_h;
-       u32 previous_state_h;
-
-       u8 bt_pre_rssi_state;
-       u8 bt_pre_rssi_state1;
-
-       u8 reg_bt_iso;
-       u8 reg_bt_sco;
-       bool balance_on;
-       u8 bt_active_zero_cnt;
-       bool cur_bt_disabled;
-       bool pre_bt_disabled;
-
-       u8 bt_profile_case;
-       u8 bt_profile_action;
-       bool bt_busy;
-       bool hold_for_bt_operation;
-       u8 lps_counter;
-};
-
 
 /****************************************
        mem access macro define start
index 998e95895f9dc8beed8cd3d7c3b06f1e72259b90..a92bd3e89796b181902bc387f49ff8eafaf31d04 100644 (file)
 #include <linux/err.h>
 #include <linux/wl12xx.h>
 
-static struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *wl12xx_platform_data;
 
 int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
 {
-       if (platform_data)
+       if (wl12xx_platform_data)
                return -EBUSY;
        if (!data)
                return -EINVAL;
 
-       platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
-       if (!platform_data)
+       wl12xx_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+       if (!wl12xx_platform_data)
                return -ENOMEM;
 
        return 0;
@@ -41,9 +41,34 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
 
 struct wl12xx_platform_data *wl12xx_get_platform_data(void)
 {
-       if (!platform_data)
+       if (!wl12xx_platform_data)
                return ERR_PTR(-ENODEV);
 
-       return platform_data;
+       return wl12xx_platform_data;
 }
 EXPORT_SYMBOL(wl12xx_get_platform_data);
+
+static struct wl1251_platform_data *wl1251_platform_data;
+
+int __init wl1251_set_platform_data(const struct wl1251_platform_data *data)
+{
+       if (wl1251_platform_data)
+               return -EBUSY;
+       if (!data)
+               return -EINVAL;
+
+       wl1251_platform_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
+       if (!wl1251_platform_data)
+               return -ENOMEM;
+
+       return 0;
+}
+
+struct wl1251_platform_data *wl1251_get_platform_data(void)
+{
+       if (!wl1251_platform_data)
+               return ERR_PTR(-ENODEV);
+
+       return wl1251_platform_data;
+}
+EXPORT_SYMBOL(wl1251_get_platform_data);
index 223649bcaa5a6764cff615bd9f1716b16810cc00..bf1fa18b9786253159d634ec1f846f70d8551bc8 100644 (file)
@@ -448,7 +448,7 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
         * Note: This bug may be caused by the fw's DTIM handling.
         */
        if (is_zero_ether_addr(wl->bssid))
-               cmd->params.scan_options |= WL1251_SCAN_OPT_PRIORITY_HIGH;
+               cmd->params.scan_options |= cpu_to_le16(WL1251_SCAN_OPT_PRIORITY_HIGH);
        cmd->params.num_channels = n_channels;
        cmd->params.num_probe_requests = n_probes;
        cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
index e2b3d9c541e830f23fdc3703a6ddc5d48c276de0..b661f896e9fe148ff322879c541c67c755a9a1a4 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/wl12xx.h>
 #include <linux/irq.h>
 #include <linux/pm_runtime.h>
+#include <linux/gpio.h>
 
 #include "wl1251.h"
 
@@ -182,8 +183,9 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
                 * callback in case it wants to do any additional setup,
                 * for example enabling clock buffer for the module.
                 */
-               if (wl->set_power)
-                       wl->set_power(true);
+               if (gpio_is_valid(wl->power_gpio))
+                       gpio_set_value(wl->power_gpio, true);
+
 
                ret = pm_runtime_get_sync(&func->dev);
                if (ret < 0) {
@@ -203,8 +205,8 @@ static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
                if (ret < 0)
                        goto out;
 
-               if (wl->set_power)
-                       wl->set_power(false);
+               if (gpio_is_valid(wl->power_gpio))
+                       gpio_set_value(wl->power_gpio, false);
        }
 
 out:
@@ -227,7 +229,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
        struct wl1251 *wl;
        struct ieee80211_hw *hw;
        struct wl1251_sdio *wl_sdio;
-       const struct wl12xx_platform_data *wl12xx_board_data;
+       const struct wl1251_platform_data *wl1251_board_data;
 
        hw = wl1251_alloc_hw();
        if (IS_ERR(hw))
@@ -254,11 +256,20 @@ static int wl1251_sdio_probe(struct sdio_func *func,
        wl->if_priv = wl_sdio;
        wl->if_ops = &wl1251_sdio_ops;
 
-       wl12xx_board_data = wl12xx_get_platform_data();
-       if (!IS_ERR(wl12xx_board_data)) {
-               wl->set_power = wl12xx_board_data->set_power;
-               wl->irq = wl12xx_board_data->irq;
-               wl->use_eeprom = wl12xx_board_data->use_eeprom;
+       wl1251_board_data = wl1251_get_platform_data();
+       if (!IS_ERR(wl1251_board_data)) {
+               wl->power_gpio = wl1251_board_data->power_gpio;
+               wl->irq = wl1251_board_data->irq;
+               wl->use_eeprom = wl1251_board_data->use_eeprom;
+       }
+
+       if (gpio_is_valid(wl->power_gpio)) {
+               ret = devm_gpio_request(&func->dev, wl->power_gpio,
+                                                               "wl1251 power");
+               if (ret) {
+                       wl1251_error("Failed to request gpio: %d\n", ret);
+                       goto disable;
+               }
        }
 
        if (wl->irq) {
index 1342f81e683d1498bb16d5904d032009244bed58..b06d36d99362703c1b7aa35e6aadd5cab29e0d8f 100644 (file)
 #include <linux/crc7.h>
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
 
 #include "wl1251.h"
 #include "reg.h"
@@ -221,8 +225,8 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
 
 static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
 {
-       if (wl->set_power)
-               wl->set_power(enable);
+       if (gpio_is_valid(wl->power_gpio))
+               gpio_set_value(wl->power_gpio, enable);
 
        return 0;
 }
@@ -238,13 +242,13 @@ static const struct wl1251_if_operations wl1251_spi_ops = {
 
 static int wl1251_spi_probe(struct spi_device *spi)
 {
-       struct wl12xx_platform_data *pdata;
+       struct wl1251_platform_data *pdata = dev_get_platdata(&spi->dev);
+       struct device_node *np = spi->dev.of_node;
        struct ieee80211_hw *hw;
        struct wl1251 *wl;
        int ret;
 
-       pdata = dev_get_platdata(&spi->dev);
-       if (!pdata) {
+       if (!np && !pdata) {
                wl1251_error("no platform data");
                return -ENODEV;
        }
@@ -271,22 +275,42 @@ static int wl1251_spi_probe(struct spi_device *spi)
                goto out_free;
        }
 
-       wl->set_power = pdata->set_power;
-       if (!wl->set_power) {
-               wl1251_error("set power function missing in platform data");
-               return -ENODEV;
+       if (np) {
+               wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
+               wl->power_gpio = of_get_named_gpio(np, "ti,power-gpio", 0);
+       } else if (pdata) {
+               wl->power_gpio = pdata->power_gpio;
+               wl->use_eeprom = pdata->use_eeprom;
+       }
+
+       if (wl->power_gpio == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto out_free;
+       }
+
+       if (gpio_is_valid(wl->power_gpio)) {
+               ret = devm_gpio_request_one(&spi->dev, wl->power_gpio,
+                                       GPIOF_OUT_INIT_LOW, "wl1251 power");
+               if (ret) {
+                       wl1251_error("Failed to request gpio: %d\n", ret);
+                       goto out_free;
+               }
+       } else {
+               wl1251_error("set power gpio missing in platform data");
+               ret = -ENODEV;
+               goto out_free;
        }
 
        wl->irq = spi->irq;
        if (wl->irq < 0) {
                wl1251_error("irq missing in platform data");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto out_free;
        }
 
-       wl->use_eeprom = pdata->use_eeprom;
-
        irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
-       ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
+       ret = devm_request_irq(&spi->dev, wl->irq, wl1251_irq, 0,
+                                                       DRIVER_NAME, wl);
        if (ret < 0) {
                wl1251_error("request_irq() failed: %d", ret);
                goto out_free;
@@ -294,16 +318,26 @@ static int wl1251_spi_probe(struct spi_device *spi)
 
        irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
 
+       wl->vio = devm_regulator_get(&spi->dev, "vio");
+       if (IS_ERR(wl->vio)) {
+               ret = PTR_ERR(wl->vio);
+               wl1251_error("vio regulator missing: %d", ret);
+               goto out_free;
+       }
+
+       ret = regulator_enable(wl->vio);
+       if (ret)
+               goto out_free;
+
        ret = wl1251_init_ieee80211(wl);
        if (ret)
-               goto out_irq;
+               goto disable_regulator;
 
        return 0;
 
- out_irq:
-       free_irq(wl->irq, wl);
-
- out_free:
+disable_regulator:
+       regulator_disable(wl->vio);
+out_free:
        ieee80211_free_hw(hw);
 
        return ret;
@@ -315,6 +349,7 @@ static int wl1251_spi_remove(struct spi_device *spi)
 
        free_irq(wl->irq, wl);
        wl1251_free_hw(wl);
+       regulator_disable(wl->vio);
 
        return 0;
 }
index 235617a7716d59ff4a699427540c1ecb4a612178..16dae5269175ec7d531e516a9077f1e112a95d57 100644 (file)
@@ -276,10 +276,12 @@ struct wl1251 {
        void *if_priv;
        const struct wl1251_if_operations *if_ops;
 
-       void (*set_power)(bool enable);
+       int power_gpio;
        int irq;
        bool use_eeprom;
 
+       struct regulator *vio;
+
        spinlock_t wl_lock;
 
        enum wl1251_state state;
index be7129ba16ad651524910c1897a33b7d48570007..d50dfac91631ebcbd4f685497ca237d3eb22893f 100644 (file)
@@ -1378,7 +1378,7 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
 
 static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
 {
-       if (wl->fw_status_1->tx_results_counter ==
+       if (wl->fw_status->tx_results_counter ==
            (wl->tx_results_count & 0xff))
                return 0;
 
@@ -1438,6 +1438,37 @@ out:
        return ret;
 }
 
+static void wl12xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+                                    struct wl_fw_status *fw_status)
+{
+       struct wl12xx_fw_status *int_fw_status = raw_fw_status;
+
+       fw_status->intr = le32_to_cpu(int_fw_status->intr);
+       fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+       fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+       fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+       fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+       fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+       fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+       fw_status->link_fast_bitmap =
+                       le32_to_cpu(int_fw_status->link_fast_bitmap);
+       fw_status->total_released_blks =
+                       le32_to_cpu(int_fw_status->total_released_blks);
+       fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+       fw_status->counters.tx_released_pkts =
+                       int_fw_status->counters.tx_released_pkts;
+       fw_status->counters.tx_lnk_free_pkts =
+                       int_fw_status->counters.tx_lnk_free_pkts;
+       fw_status->counters.tx_voice_released_blks =
+                       int_fw_status->counters.tx_voice_released_blks;
+       fw_status->counters.tx_last_rate =
+                       int_fw_status->counters.tx_last_rate;
+
+       fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+}
+
 static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
                                       struct wl12xx_vif *wlvif)
 {
@@ -1677,6 +1708,7 @@ static struct wlcore_ops wl12xx_ops = {
        .tx_delayed_compl       = wl12xx_tx_delayed_compl,
        .hw_init                = wl12xx_hw_init,
        .init_vif               = NULL,
+       .convert_fw_status      = wl12xx_convert_fw_status,
        .sta_get_ap_rate_mask   = wl12xx_sta_get_ap_rate_mask,
        .get_pg_ver             = wl12xx_get_pg_ver,
        .get_mac                = wl12xx_get_mac,
@@ -1711,22 +1743,53 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
                },
 };
 
+static const struct ieee80211_iface_limit wl12xx_iface_limits[] = {
+       {
+               .max = 3,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP) |
+                        BIT(NL80211_IFTYPE_P2P_GO) |
+                        BIT(NL80211_IFTYPE_P2P_CLIENT),
+       },
+};
+
+static const struct ieee80211_iface_combination
+wl12xx_iface_combinations[] = {
+       {
+               .max_interfaces = 3,
+               .limits = wl12xx_iface_limits,
+               .n_limits = ARRAY_SIZE(wl12xx_iface_limits),
+               .num_different_channels = 1,
+       },
+};
+
 static int wl12xx_setup(struct wl1271 *wl)
 {
        struct wl12xx_priv *priv = wl->priv;
        struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
        struct wl12xx_platform_data *pdata = pdev_data->pdata;
 
+       BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
+       BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
+
        wl->rtable = wl12xx_rtable;
        wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
        wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
-       wl->num_channels = 1;
+       wl->num_links = WL12XX_MAX_LINKS;
+       wl->max_ap_stations = WL12XX_MAX_AP_STATIONS;
+       wl->iface_combinations = wl12xx_iface_combinations;
+       wl->n_iface_combinations = ARRAY_SIZE(wl12xx_iface_combinations);
        wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
        wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
+       wl->fw_status_len = sizeof(struct wl12xx_fw_status);
        wl->fw_status_priv_len = 0;
        wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
+       wl->ofdm_only_ap = true;
        wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
        wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
        wl12xx_conf_init(wl);
index 9e5484a7366704b55ff360ae2c0b86ac12eb2d6d..75c92658bfeaecb3ccd356303fde6dc17efc92c4 100644 (file)
@@ -65,6 +65,9 @@
 
 #define WL12XX_RX_BA_MAX_SESSIONS 3
 
+#define WL12XX_MAX_AP_STATIONS 8
+#define WL12XX_MAX_LINKS 12
+
 struct wl127x_rx_mem_pool_addr {
        u32 addr;
        u32 addr_extra;
@@ -79,4 +82,54 @@ struct wl12xx_priv {
        struct wl127x_rx_mem_pool_addr *rx_mem_addr;
 };
 
+struct wl12xx_fw_packet_counters {
+       /* Cumulative counter of released packets per AC */
+       u8 tx_released_pkts[NUM_TX_QUEUES];
+
+       /* Cumulative counter of freed packets per HLID */
+       u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
+
+       /* Cumulative counter of released Voice memory blocks */
+       u8 tx_voice_released_blks;
+
+       /* Tx rate of the last transmitted packet */
+       u8 tx_last_rate;
+
+       u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl12xx_fw_status {
+       __le32 intr;
+       u8  fw_rx_counter;
+       u8  drv_rx_counter;
+       u8  reserved;
+       u8  tx_results_counter;
+       __le32 rx_pkt_descs[WL12XX_NUM_RX_DESCRIPTORS];
+
+       __le32 fw_localtime;
+
+       /*
+        * A bitmap (where each bit represents a single HLID)
+        * to indicate if the station is in PS mode.
+        */
+       __le32 link_ps_bitmap;
+
+       /*
+        * A bitmap (where each bit represents a single HLID) to indicate
+        * if the station is in Fast mode
+        */
+       __le32 link_fast_bitmap;
+
+       /* Cumulative counter of total released mem blocks since FW-reset */
+       __le32 total_released_blks;
+
+       /* Size (in Memory Blocks) of TX pool */
+       __le32 tx_total;
+
+       struct wl12xx_fw_packet_counters counters;
+
+       __le32 log_start_addr;
+} __packed;
+
 #endif /* __WL12XX_PRIV_H__ */
index ec37b16585df939938fb1a75ef060ef0ea3d73fd..de5b4fa5d1666b9a5af57b8312da487a037ebdf4 100644 (file)
@@ -648,7 +648,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
 };
 
 /* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin"
 
 static int wl18xx_identify_chip(struct wl1271 *wl)
 {
@@ -1133,6 +1133,39 @@ static int wl18xx_hw_init(struct wl1271 *wl)
        return ret;
 }
 
+static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+                                    struct wl_fw_status *fw_status)
+{
+       struct wl18xx_fw_status *int_fw_status = raw_fw_status;
+
+       fw_status->intr = le32_to_cpu(int_fw_status->intr);
+       fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+       fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+       fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+       fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+       fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+       fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+       fw_status->link_fast_bitmap =
+                       le32_to_cpu(int_fw_status->link_fast_bitmap);
+       fw_status->total_released_blks =
+                       le32_to_cpu(int_fw_status->total_released_blks);
+       fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+       fw_status->counters.tx_released_pkts =
+                       int_fw_status->counters.tx_released_pkts;
+       fw_status->counters.tx_lnk_free_pkts =
+                       int_fw_status->counters.tx_lnk_free_pkts;
+       fw_status->counters.tx_voice_released_blks =
+                       int_fw_status->counters.tx_voice_released_blks;
+       fw_status->counters.tx_last_rate =
+                       int_fw_status->counters.tx_last_rate;
+
+       fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+
+       fw_status->priv = &int_fw_status->priv;
+}
+
 static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
                                    struct wl1271_tx_hw_descr *desc,
                                    struct sk_buff *skb)
@@ -1572,7 +1605,7 @@ static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
 {
        u8 thold;
        struct wl18xx_fw_status_priv *status_priv =
-               (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+               (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
        u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
 
        /* suspended links are never high priority */
@@ -1594,7 +1627,7 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
 {
        u8 thold;
        struct wl18xx_fw_status_priv *status_priv =
-               (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+               (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
        u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
 
        if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
@@ -1632,6 +1665,7 @@ static struct wlcore_ops wl18xx_ops = {
        .tx_immediate_compl = wl18xx_tx_immediate_completion,
        .tx_delayed_compl = NULL,
        .hw_init        = wl18xx_hw_init,
+       .convert_fw_status = wl18xx_convert_fw_status,
        .set_tx_desc_csum = wl18xx_set_tx_desc_csum,
        .get_pg_ver     = wl18xx_get_pg_ver,
        .set_rx_csum = wl18xx_set_rx_csum,
@@ -1713,19 +1747,62 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
                },
 };
 
+static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
+       {
+               .max = 3,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP) |
+                        BIT(NL80211_IFTYPE_P2P_GO) |
+                        BIT(NL80211_IFTYPE_P2P_CLIENT),
+       },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_AP),
+       },
+};
+
+static const struct ieee80211_iface_combination
+wl18xx_iface_combinations[] = {
+       {
+               .max_interfaces = 3,
+               .limits = wl18xx_iface_limits,
+               .n_limits = ARRAY_SIZE(wl18xx_iface_limits),
+               .num_different_channels = 2,
+       },
+       {
+               .max_interfaces = 2,
+               .limits = wl18xx_iface_ap_limits,
+               .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
+               .num_different_channels = 1,
+       }
+};
+
 static int wl18xx_setup(struct wl1271 *wl)
 {
        struct wl18xx_priv *priv = wl->priv;
        int ret;
 
+       BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS);
+       BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS);
+
        wl->rtable = wl18xx_rtable;
        wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
        wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
-       wl->num_channels = 2;
+       wl->num_links = WL18XX_MAX_LINKS;
+       wl->max_ap_stations = WL18XX_MAX_AP_STATIONS;
+       wl->iface_combinations = wl18xx_iface_combinations;
+       wl->n_iface_combinations = ARRAY_SIZE(wl18xx_iface_combinations);
        wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
        wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
        wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
        wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
+       wl->fw_status_len = sizeof(struct wl18xx_fw_status);
        wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
        wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
        wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
index 57c694396647f71adaea386cd4465df00a5611a8..be1ebd55ac88e8f7f04be16e6e7bb02436468cf6 100644 (file)
@@ -32,7 +32,7 @@ static
 void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
                             struct ieee80211_tx_rate *rate)
 {
-       u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+       u8 fw_rate = wl->fw_status->counters.tx_last_rate;
 
        if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
                wl1271_error("last Tx rate invalid: %d", fw_rate);
@@ -139,7 +139,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
 void wl18xx_tx_immediate_complete(struct wl1271 *wl)
 {
        struct wl18xx_fw_status_priv *status_priv =
-               (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+               (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
        struct wl18xx_priv *priv = wl->priv;
        u8 i;
 
index 9204e07ee432fe483a343f008ee1ce91f2752149..eb7cfe8170104ab5eb273c84f2c49209a0d9e1a3 100644 (file)
 
 /* minimum FW required for driver */
 #define WL18XX_CHIP_VER                8
-#define WL18XX_IFTYPE_VER      5
+#define WL18XX_IFTYPE_VER      8
 #define WL18XX_MAJOR_VER       WLCORE_FW_VER_IGNORE
 #define WL18XX_SUBTYPE_VER     WLCORE_FW_VER_IGNORE
-#define WL18XX_MINOR_VER       39
+#define WL18XX_MINOR_VER       13
 
 #define WL18XX_CMD_MAX_SIZE          740
 
 
 #define WL18XX_NUM_MAC_ADDRESSES 3
 
-#define WL18XX_RX_BA_MAX_SESSIONS 5
+#define WL18XX_RX_BA_MAX_SESSIONS 13
+
+#define WL18XX_MAX_AP_STATIONS 10
+#define WL18XX_MAX_LINKS 16
 
 struct wl18xx_priv {
        /* buffer for sending commands to FW */
@@ -109,6 +112,59 @@ struct wl18xx_fw_status_priv {
        u8 padding[3];
 };
 
+struct wl18xx_fw_packet_counters {
+       /* Cumulative counter of released packets per AC */
+       u8 tx_released_pkts[NUM_TX_QUEUES];
+
+       /* Cumulative counter of freed packets per HLID */
+       u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
+
+       /* Cumulative counter of released Voice memory blocks */
+       u8 tx_voice_released_blks;
+
+       /* Tx rate of the last transmitted packet */
+       u8 tx_last_rate;
+
+       u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl18xx_fw_status {
+       __le32 intr;
+       u8  fw_rx_counter;
+       u8  drv_rx_counter;
+       u8  reserved;
+       u8  tx_results_counter;
+       __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
+
+       __le32 fw_localtime;
+
+       /*
+        * A bitmap (where each bit represents a single HLID)
+        * to indicate if the station is in PS mode.
+        */
+       __le32 link_ps_bitmap;
+
+       /*
+        * A bitmap (where each bit represents a single HLID) to indicate
+        * if the station is in Fast mode
+        */
+       __le32 link_fast_bitmap;
+
+       /* Cumulative counter of total released mem blocks since FW-reset */
+       __le32 total_released_blks;
+
+       /* Size (in Memory Blocks) of TX pool */
+       __le32 tx_total;
+
+       struct wl18xx_fw_packet_counters counters;
+
+       __le32 log_start_addr;
+
+       /* Private status to be used by the lower drivers */
+       struct wl18xx_fw_status_priv priv;
+} __packed;
+
 #define WL18XX_PHY_VERSION_MAX_LEN 20
 
 struct wl18xx_static_data_priv {
index ec83675a244697537afd791613cfec9e87d13caa..b924ceadc02c4a5fcfd96530c986e65d9fc89f2b 100644 (file)
@@ -358,7 +358,8 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        struct acx_beacon_filter_option *beacon_filter = NULL;
        int ret = 0;
 
-       wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
+       wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d",
+                    enable_filter);
 
        if (enable_filter &&
            wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
@@ -1591,7 +1592,8 @@ out:
        return ret;
 }
 
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif, u8 *addr)
 {
        struct wl1271_acx_inconnection_sta *acx = NULL;
        int ret;
@@ -1603,6 +1605,7 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
                return -ENOMEM;
 
        memcpy(acx->addr, addr, ETH_ALEN);
+       acx->role_id = wlvif->role_id;
 
        ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
                                   acx, sizeof(*acx));
index 6dcfad9b04729a44d60044d41bb1ac0a9ac042cb..954d57ec98f45cc358c0753dab590205322a8178 100644 (file)
@@ -824,7 +824,8 @@ struct wl1271_acx_inconnection_sta {
        struct acx_header header;
 
        u8 addr[ETH_ALEN];
-       u8 padding1[2];
+       u8 role_id;
+       u8 padding;
 } __packed;
 
 /*
@@ -1118,7 +1119,8 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                               bool enable);
 int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+                                   struct wl12xx_vif *wlvif, u8 *addr);
 int wl1271_acx_fm_coex(struct wl1271 *wl);
 int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
 int wl12xx_acx_config_hangover(struct wl1271 *wl);
index 9b2ecf52449faa911f25bea176ed12683fe1836e..40dc30f4faaab2b2be20c724abe54c7807f0871a 100644 (file)
@@ -60,8 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
        u16 status;
        u16 poll_count = 0;
 
-       if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
-                   id != CMD_STOP_FWLOGGER))
+       if (unlikely(wl->state == WLCORE_STATE_RESTARTING &&
+                    id != CMD_STOP_FWLOGGER))
                return -EIO;
 
        cmd = buf;
@@ -312,8 +312,8 @@ static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
 int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
 {
        unsigned long flags;
-       u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
-       if (link >= WL12XX_MAX_LINKS)
+       u8 link = find_first_zero_bit(wl->links_map, wl->num_links);
+       if (link >= wl->num_links)
                return -EBUSY;
 
        wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
@@ -324,9 +324,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
        __set_bit(link, wlvif->links_map);
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
-       /* take the last "freed packets" value from the current FW status */
-       wl->links[link].prev_freed_pkts =
-                       wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+       /*
+        * take the last "freed packets" value from the current FW status.
+        * on recovery, we might not have fw_status yet, and
+        * tx_lnk_free_pkts will be NULL. check for it.
+        */
+       if (wl->fw_status->counters.tx_lnk_free_pkts)
+               wl->links[link].prev_freed_pkts =
+                       wl->fw_status->counters.tx_lnk_free_pkts[link];
        wl->links[link].wlvif = wlvif;
 
        /*
@@ -1527,6 +1532,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        cmd->sp_len = sta->max_sp;
        cmd->wmm = sta->wme ? 1 : 0;
        cmd->session_id = wl->session_ids[hlid];
+       cmd->role_id = wlvif->role_id;
 
        for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
                if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1563,7 +1569,8 @@ out:
        return ret;
 }
 
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u8 hlid)
 {
        struct wl12xx_cmd_remove_peer *cmd;
        int ret;
@@ -1581,6 +1588,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
        /* We never send a deauth, mac80211 is in charge of this */
        cmd->reason_opcode = 0;
        cmd->send_deauth_flag = 0;
+       cmd->role_id = wlvif->role_id;
 
        ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
        if (ret < 0) {
index 323d4a856e4ba80d37f52fadf0832ad09066996e..b084830a61cf51adbe40b8461fc28510dd13bd5a 100644 (file)
@@ -88,7 +88,8 @@ int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
 int wl12xx_croc(struct wl1271 *wl, u8 role_id);
 int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                        struct ieee80211_sta *sta, u8 hlid);
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u8 hlid);
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
                                     enum ieee80211_band band);
 int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
@@ -206,7 +207,7 @@ enum cmd_templ {
 #define WL1271_COMMAND_TIMEOUT     2000
 #define WL1271_CMD_TEMPL_DFLT_SIZE 252
 #define WL1271_CMD_TEMPL_MAX_SIZE  512
-#define WL1271_EVENT_TIMEOUT       1500
+#define WL1271_EVENT_TIMEOUT       5000
 
 struct wl1271_cmd_header {
        __le16 id;
@@ -594,6 +595,8 @@ struct wl12xx_cmd_add_peer {
        u8 sp_len;
        u8 wmm;
        u8 session_id;
+       u8 role_id;
+       u8 padding[3];
 } __packed;
 
 struct wl12xx_cmd_remove_peer {
@@ -602,7 +605,7 @@ struct wl12xx_cmd_remove_peer {
        u8 hlid;
        u8 reason_opcode;
        u8 send_deauth_flag;
-       u8 padding1;
+       u8 role_id;
 } __packed;
 
 /*
index 8d3b34965db3475f64178eeb6463ec2589434c3f..1f9a36031b06d42785352fa3e7666bda3c9f17ce 100644 (file)
@@ -67,7 +67,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                u8 hlid;
                struct wl1271_link *lnk;
                for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
-                                WL12XX_MAX_LINKS) {
+                                wl->num_links) {
                        lnk = &wl->links[hlid];
                        if (!lnk->ba_bitmap)
                                continue;
@@ -172,7 +172,7 @@ static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
        const u8 *addr;
        int h;
 
-       for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+       for_each_set_bit(h, &sta_bitmap, wl->num_links) {
                bool found = false;
                /* find the ap vif connected to this sta */
                wl12xx_for_each_wlvif_ap(wl, wlvif) {
index 51f8d634d32f43274d2eaccb679a6dff732468f3..1555ff9700509186e43996ffb4a7c5bbe106522f 100644 (file)
@@ -106,6 +106,15 @@ wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        return 0;
 }
 
+static inline void
+wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+                           struct wl_fw_status *fw_status)
+{
+       BUG_ON(!wl->ops->convert_fw_status);
+
+       wl->ops->convert_fw_status(wl, raw_fw_status, fw_status);
+}
+
 static inline u32
 wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
index 7699f9d07e2636e3528fa7e3c28f260bfaa63e3f..199e941208644864e16479c4eb586afe8df69e86 100644 (file)
@@ -287,8 +287,8 @@ static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
        if (ret < 0)
                return ret;
 
-       /* enable beacon filtering */
-       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+       /* disable beacon filtering until we get the first beacon */
+       ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
        if (ret < 0)
                return ret;
 
@@ -462,7 +462,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
         * If the basic rates contain OFDM rates, use OFDM only
         * rates for unicast TX as well. Else use all supported rates.
         */
-       if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
+       if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
                supported_rates = CONF_TX_OFDM_RATES;
        else
                supported_rates = CONF_TX_ENABLED_RATES;
index 07e3d6a049adf33d40dc586c27ef66c4428b83d1..0305729d09868230b2d75a5d5ce3b546b858ced6 100644 (file)
@@ -60,7 +60,9 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
 {
        int ret;
 
-       if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+       if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+           WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+                    addr != HW_ACCESS_ELP_CTRL_REG)))
                return -EIO;
 
        ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
@@ -76,7 +78,9 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
 {
        int ret;
 
-       if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+       if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+           WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+                    addr != HW_ACCESS_ELP_CTRL_REG)))
                return -EIO;
 
        ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
index b46b3116cc55c1cf129534af6ecf788996ae0cc0..ed88d39134839e34510d83252949dbbb2a964e83 100644 (file)
@@ -345,24 +345,24 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
         * Start high-level PS if the STA is asleep with enough blocks in FW.
         * Make an exception if this is the only connected link. In this
         * case FW-memory congestion is less of a problem.
-        * Note that a single connected STA means 3 active links, since we must
-        * account for the global and broadcast AP links. The "fw_ps" check
-        * assures us the third link is a STA connected to the AP. Otherwise
-        * the FW would not set the PSM bit.
+        * Note that a single connected STA means 2*ap_count + 1 active links,
+        * since we must account for the global and broadcast AP links
+        * for each AP. The "fw_ps" check assures us the other link is a STA
+        * connected to the AP. Otherwise the FW would not set the PSM bit.
         */
-       else if (wl->active_link_count > 3 && fw_ps &&
+       else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
                 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
                wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
 
 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
                                           struct wl12xx_vif *wlvif,
-                                          struct wl_fw_status_2 *status)
+                                          struct wl_fw_status *status)
 {
        u32 cur_fw_ps_map;
        u8 hlid;
 
-       cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+       cur_fw_ps_map = status->link_ps_bitmap;
        if (wl->ap_fw_ps_map != cur_fw_ps_map) {
                wl1271_debug(DEBUG_PSM,
                             "link ps prev 0x%x cur 0x%x changed 0x%x",
@@ -372,77 +372,73 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
                wl->ap_fw_ps_map = cur_fw_ps_map;
        }
 
-       for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
+       for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
                wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
                                            wl->links[hlid].allocated_pkts);
 }
 
-static int wlcore_fw_status(struct wl1271 *wl,
-                           struct wl_fw_status_1 *status_1,
-                           struct wl_fw_status_2 *status_2)
+static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 {
        struct wl12xx_vif *wlvif;
        struct timespec ts;
        u32 old_tx_blk_count = wl->tx_blocks_available;
        int avail, freed_blocks;
        int i;
-       size_t status_len;
        int ret;
        struct wl1271_link *lnk;
 
-       status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
-               sizeof(*status_2) + wl->fw_status_priv_len;
-
-       ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
-                                  status_len, false);
+       ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
+                                  wl->raw_fw_status,
+                                  wl->fw_status_len, false);
        if (ret < 0)
                return ret;
 
+       wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
+
        wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
                     "drv_rx_counter = %d, tx_results_counter = %d)",
-                    status_1->intr,
-                    status_1->fw_rx_counter,
-                    status_1->drv_rx_counter,
-                    status_1->tx_results_counter);
+                    status->intr,
+                    status->fw_rx_counter,
+                    status->drv_rx_counter,
+                    status->tx_results_counter);
 
        for (i = 0; i < NUM_TX_QUEUES; i++) {
                /* prevent wrap-around in freed-packets counter */
                wl->tx_allocated_pkts[i] -=
-                               (status_2->counters.tx_released_pkts[i] -
+                               (status->counters.tx_released_pkts[i] -
                                wl->tx_pkts_freed[i]) & 0xff;
 
-               wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
+               wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
        }
 
 
-       for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+       for_each_set_bit(i, wl->links_map, wl->num_links) {
                u8 diff;
                lnk = &wl->links[i];
 
                /* prevent wrap-around in freed-packets counter */
-               diff = (status_2->counters.tx_lnk_free_pkts[i] -
+               diff = (status->counters.tx_lnk_free_pkts[i] -
                       lnk->prev_freed_pkts) & 0xff;
 
                if (diff == 0)
                        continue;
 
                lnk->allocated_pkts -= diff;
-               lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+               lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
 
                /* accumulate the prev_freed_pkts counter */
                lnk->total_freed_pkts += diff;
        }
 
        /* prevent wrap-around in total blocks counter */
-       if (likely(wl->tx_blocks_freed <=
-                  le32_to_cpu(status_2->total_released_blks)))
-               freed_blocks = le32_to_cpu(status_2->total_released_blks) -
+       if (likely(wl->tx_blocks_freed <= status->total_released_blks))
+               freed_blocks = status->total_released_blks -
                               wl->tx_blocks_freed;
        else
                freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
-                              le32_to_cpu(status_2->total_released_blks);
+                              status->total_released_blks;
 
-       wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
+       wl->tx_blocks_freed = status->total_released_blks;
 
        wl->tx_allocated_blocks -= freed_blocks;
 
@@ -458,7 +454,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
                        cancel_delayed_work(&wl->tx_watchdog_work);
        }
 
-       avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
+       avail = status->tx_total - wl->tx_allocated_blocks;
 
        /*
         * The FW might change the total number of TX memblocks before
@@ -477,15 +473,15 @@ static int wlcore_fw_status(struct wl1271 *wl,
 
        /* for AP update num of allocated TX blocks per link and ps status */
        wl12xx_for_each_wlvif_ap(wl, wlvif) {
-               wl12xx_irq_update_links_status(wl, wlvif, status_2);
+               wl12xx_irq_update_links_status(wl, wlvif, status);
        }
 
        /* update the host-chipset time offset */
        getnstimeofday(&ts);
        wl->time_offset = (timespec_to_ns(&ts) >> 10) -
-               (s64)le32_to_cpu(status_2->fw_localtime);
+               (s64)(status->fw_localtime);
 
-       wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+       wl->fw_fast_lnk_map = status->link_fast_bitmap;
 
        return 0;
 }
@@ -549,13 +545,13 @@ static int wlcore_irq_locked(struct wl1271 *wl)
                clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
                smp_mb__after_clear_bit();
 
-               ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+               ret = wlcore_fw_status(wl, wl->fw_status);
                if (ret < 0)
                        goto out;
 
                wlcore_hw_tx_immediate_compl(wl);
 
-               intr = le32_to_cpu(wl->fw_status_1->intr);
+               intr = wl->fw_status->intr;
                intr &= WLCORE_ALL_INTR_MASK;
                if (!intr) {
                        done = true;
@@ -584,7 +580,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
                if (likely(intr & WL1271_ACX_INTR_DATA)) {
                        wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 
-                       ret = wlcore_rx(wl, wl->fw_status_1);
+                       ret = wlcore_rx(wl, wl->fw_status);
                        if (ret < 0)
                                goto out;
 
@@ -786,10 +782,11 @@ out:
 
 void wl12xx_queue_recovery_work(struct wl1271 *wl)
 {
-       WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
-
        /* Avoid a recursive recovery */
        if (wl->state == WLCORE_STATE_ON) {
+               WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
+                                 &wl->flags));
+
                wl->state = WLCORE_STATE_RESTARTING;
                set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
                wl1271_ps_elp_wakeup(wl);
@@ -803,7 +800,7 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
        size_t len;
 
        /* Make sure we have enough room */
-       len = min(maxlen, (size_t)(PAGE_SIZE - wl->fwlog_size));
+       len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
 
        /* Fill the FW log file, consumed by the sysfs fwlog entry */
        memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
@@ -843,11 +840,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
                wl12xx_cmd_stop_fwlog(wl);
 
        /* Read the first memory block address */
-       ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+       ret = wlcore_fw_status(wl, wl->fw_status);
        if (ret < 0)
                goto out;
 
-       addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
+       addr = wl->fw_status->log_start_addr;
        if (!addr)
                goto out;
 
@@ -990,23 +987,23 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
 
 static int wl1271_setup(struct wl1271 *wl)
 {
-       wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
-                                 sizeof(*wl->fw_status_2) +
-                                 wl->fw_status_priv_len, GFP_KERNEL);
-       if (!wl->fw_status_1)
-               return -ENOMEM;
+       wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
+       if (!wl->raw_fw_status)
+               goto err;
 
-       wl->fw_status_2 = (struct wl_fw_status_2 *)
-                               (((u8 *) wl->fw_status_1) +
-                               WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
+       wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
+       if (!wl->fw_status)
+               goto err;
 
        wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
-       if (!wl->tx_res_if) {
-               kfree(wl->fw_status_1);
-               return -ENOMEM;
-       }
+       if (!wl->tx_res_if)
+               goto err;
 
        return 0;
+err:
+       kfree(wl->fw_status);
+       kfree(wl->raw_fw_status);
+       return -ENOMEM;
 }
 
 static int wl12xx_set_power_on(struct wl1271 *wl)
@@ -1767,6 +1764,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
        flush_work(&wl->tx_work);
        flush_delayed_work(&wl->elp_work);
 
+       /*
+        * Cancel the watchdog even if above tx_flush failed. We will detect
+        * it on resume anyway.
+        */
+       cancel_delayed_work(&wl->tx_watchdog_work);
+
        return 0;
 }
 
@@ -1824,6 +1827,13 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
 
 out:
        wl->wow_enabled = false;
+
+       /*
+        * Set a flag to re-init the watchdog on the first Tx after resume.
+        * That way we avoid possible conditions where Tx-complete interrupts
+        * fail to arrive and we perform a spurious recovery.
+        */
+       set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
        mutex_unlock(&wl->mutex);
 
        return 0;
@@ -1914,6 +1924,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
        memset(wl->links_map, 0, sizeof(wl->links_map));
        memset(wl->roc_map, 0, sizeof(wl->roc_map));
        memset(wl->session_ids, 0, sizeof(wl->session_ids));
+       memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
        wl->active_sta_count = 0;
        wl->active_link_count = 0;
 
@@ -1938,9 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
 
        wl1271_debugfs_reset(wl);
 
-       kfree(wl->fw_status_1);
-       wl->fw_status_1 = NULL;
-       wl->fw_status_2 = NULL;
+       kfree(wl->raw_fw_status);
+       wl->raw_fw_status = NULL;
+       kfree(wl->fw_status);
+       wl->fw_status = NULL;
        kfree(wl->tx_res_if);
        wl->tx_res_if = NULL;
        kfree(wl->target_mem_map);
@@ -2571,10 +2583,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                ieee80211_scan_completed(wl->hw, true);
        }
 
-       if (wl->sched_vif == wlvif) {
-               ieee80211_sched_scan_stopped(wl->hw);
+       if (wl->sched_vif == wlvif)
                wl->sched_vif = NULL;
-       }
 
        if (wl->roc_vif == vif) {
                wl->roc_vif = NULL;
@@ -2931,6 +2941,11 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
                ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
                if (ret < 0)
                        return ret;
+
+               /* disable beacon filtering */
+               ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
+               if (ret < 0)
+                       return ret;
        }
 
        if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
@@ -3463,6 +3478,10 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
        wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
                     key_idx);
 
+       /* we don't handle unsetting of default key */
+       if (key_idx == -1)
+               return;
+
        mutex_lock(&wl->mutex);
 
        if (unlikely(wl->state != WLCORE_STATE_ON)) {
@@ -3649,8 +3668,8 @@ out:
        return ret;
 }
 
-static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
-                                     struct ieee80211_vif *vif)
+static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif)
 {
        struct wl1271 *wl = hw->priv;
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
@@ -3672,6 +3691,8 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
        wl1271_ps_elp_sleep(wl);
 out:
        mutex_unlock(&wl->mutex);
+
+       return 0;
 }
 
 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
@@ -4298,6 +4319,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                }
        }
 
+       if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
+               /* enable beacon filtering */
+               ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+               if (ret < 0)
+                       goto out;
+       }
+
        ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
        if (ret < 0)
                goto out;
@@ -4651,7 +4679,7 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
        int ret;
 
 
-       if (wl->active_sta_count >= AP_MAX_STATIONS) {
+       if (wl->active_sta_count >= wl->max_ap_stations) {
                wl1271_warning("could not allocate HLID - too much stations");
                return -EBUSY;
        }
@@ -4754,7 +4782,7 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
        if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
                return -EINVAL;
 
-       ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
+       ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
        if (ret < 0)
                return ret;
 
@@ -5679,28 +5707,6 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
 
 }
 
-static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
-       {
-               .max = 3,
-               .types = BIT(NL80211_IFTYPE_STATION),
-       },
-       {
-               .max = 1,
-               .types = BIT(NL80211_IFTYPE_AP) |
-                        BIT(NL80211_IFTYPE_P2P_GO) |
-                        BIT(NL80211_IFTYPE_P2P_CLIENT),
-       },
-};
-
-static struct ieee80211_iface_combination
-wlcore_iface_combinations[] = {
-       {
-         .max_interfaces = 3,
-         .limits = wlcore_iface_limits,
-         .n_limits = ARRAY_SIZE(wlcore_iface_limits),
-       },
-};
-
 static int wl1271_init_ieee80211(struct wl1271 *wl)
 {
        int i;
@@ -5733,7 +5739,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
                IEEE80211_HW_AP_LINK_PS |
                IEEE80211_HW_AMPDU_AGGREGATION |
                IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
-               IEEE80211_HW_QUEUE_CONTROL;
+               IEEE80211_HW_QUEUE_CONTROL |
+               IEEE80211_HW_CHANCTX_STA_CSA;
 
        wl->hw->wiphy->cipher_suites = cipher_suites;
        wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5821,10 +5828,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
                NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
        /* allowed interface combinations */
-       wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
-       wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
-       wl->hw->wiphy->n_iface_combinations =
-               ARRAY_SIZE(wlcore_iface_combinations);
+       wl->hw->wiphy->iface_combinations = wl->iface_combinations;
+       wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
 
        SET_IEEE80211_DEV(wl->hw, wl->dev);
 
@@ -5844,8 +5849,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
        int i, j, ret;
        unsigned int order;
 
-       BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
-
        hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
        if (!hw) {
                wl1271_error("could not alloc ieee80211_hw");
@@ -5867,8 +5870,12 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
 
        wl->hw = hw;
 
+       /*
+        * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
+        * we don't allocate any additional resource here, so that's fine.
+        */
        for (i = 0; i < NUM_TX_QUEUES; i++)
-               for (j = 0; j < WL12XX_MAX_LINKS; j++)
+               for (j = 0; j < WLCORE_MAX_LINKS; j++)
                        skb_queue_head_init(&wl->links[j].tx_queue[i]);
 
        skb_queue_head_init(&wl->deferred_rx_queue);
@@ -6011,7 +6018,8 @@ int wlcore_free_hw(struct wl1271 *wl)
        kfree(wl->nvs);
        wl->nvs = NULL;
 
-       kfree(wl->fw_status_1);
+       kfree(wl->raw_fw_status);
+       kfree(wl->fw_status);
        kfree(wl->tx_res_if);
        destroy_workqueue(wl->freezable_wq);
 
index 26bfc365ba70bf03e215ddc0be7df0d01c13512b..b52516eed7b20302b618fcbe4e7934651e12646e 100644 (file)
@@ -280,7 +280,11 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        struct ieee80211_sta *sta;
        struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 
-       if (test_bit(hlid, &wl->ap_ps_map))
+       if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
+               return;
+
+       if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
+           test_bit(hlid, &wl->ap_ps_map))
                return;
 
        wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
index 6791a1a6afba06702b434b1ae11a196377a0d229..e125974285cc890e0b40671886815c996ef37a11 100644 (file)
@@ -203,9 +203,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
        return is_data;
 }
 
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
 {
-       unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+       unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
        u32 buf_size;
        u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
        u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
@@ -263,12 +263,12 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
                                                  wl->aggr_buf + pkt_offset,
                                                  pkt_len, rx_align,
                                                  &hlid) == 1) {
-                               if (hlid < WL12XX_MAX_LINKS)
+                               if (hlid < wl->num_links)
                                        __set_bit(hlid, active_hlids);
                                else
                                        WARN(1,
-                                            "hlid exceeded WL12XX_MAX_LINKS "
-                                            "(%d)\n", hlid);
+                                            "hlid (%d) exceeded MAX_LINKS\n",
+                                            hlid);
                        }
 
                        wl->rx_counter++;
@@ -302,7 +302,7 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
 {
        int ret;
 
-       if (wl->rx_filter_enabled[index] == enable) {
+       if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
                wl1271_warning("Request to enable an already "
                             "enabled rx filter %d", index);
                return 0;
@@ -316,7 +316,10 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
                return ret;
        }
 
-       wl->rx_filter_enabled[index] = enable;
+       if (enable)
+               __set_bit(index, wl->rx_filter_enabled);
+       else
+               __clear_bit(index, wl->rx_filter_enabled);
 
        return 0;
 }
@@ -326,7 +329,7 @@ int wl1271_rx_filter_clear_all(struct wl1271 *wl)
        int i, ret = 0;
 
        for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
-               if (!wl->rx_filter_enabled[i])
+               if (!test_bit(i, wl->rx_filter_enabled))
                        continue;
                ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
                if (ret)
index 3363f60fb7da6dfb05ad672b359ac5d037fbb401..a3b1618db27c202db4377c8e529aee4751c8c32a 100644 (file)
@@ -142,7 +142,7 @@ struct wl1271_rx_descriptor {
        u8  reserved;
 } __packed;
 
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
 u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
 int wl1271_rx_filter_enable(struct wl1271 *wl,
                            int index, bool enable,
index b2c018dccf1887bb8dbdd5b85d9a88e0d30ff46e..dbe826dd7c23c49a38a08988cb24c50764d3efaa 100644 (file)
@@ -211,7 +211,7 @@ static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
        u32 chunk_len;
 
        while (len > 0) {
-               chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
+               chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
 
                cmd = &wl->buffer_cmd;
                busy_buf = wl->buffer_busyword;
@@ -285,7 +285,7 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
        cmd = &commands[0];
        i = 0;
        while (len > 0) {
-               chunk_len = min((size_t)WSPI_MAX_CHUNK_SIZE, len);
+               chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
 
                *cmd = 0;
                *cmd |= WSPI_CMD_WRITE;
index 8e583497940d0b0d728ee2151d1cfa0f04def249..24dd288d68098f2e87b12b546c6fe38460501edc 100644 (file)
@@ -152,7 +152,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
        }
 
        /* Seeking is not supported - old logs are not kept. Disregard pos. */
-       len = min(count, (size_t)wl->fwlog_size);
+       len = min_t(size_t, count, wl->fwlog_size);
        wl->fwlog_size -= len;
        memcpy(buffer, wl->fwlog, len);
 
index 87cd707affa240390f6b34ea0d1b28caf23ffb83..40b43115f83590b6a6cb4a38d8283e7b78460187 100644 (file)
@@ -101,7 +101,7 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
         * authentication response. this way it won't get de-authed by FW
         * when transmitting too soon.
         */
-       wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+       wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
 
        /*
         * ROC for 1 second on the AP channel for completing the connection.
@@ -134,12 +134,12 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
         * into high-level PS and clean out its TX queues.
         * Make an exception if this is the only connected link. In this
         * case FW-memory congestion is less of a problem.
-        * Note that a single connected STA means 3 active links, since we must
-        * account for the global and broadcast AP links. The "fw_ps" check
-        * assures us the third link is a STA connected to the AP. Otherwise
-        * the FW would not set the PSM bit.
+        * Note that a single connected STA means 2*ap_count + 1 active links,
+        * since we must account for the global and broadcast AP links
+        * for each AP. The "fw_ps" check assures us the other link is a STA
+        * connected to the AP. Otherwise the FW would not set the PSM bit.
         */
-       if (wl->active_link_count > 3 && fw_ps &&
+       if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
            tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
                wl12xx_ps_link_start(wl, wlvif, hlid, true);
 }
@@ -234,8 +234,13 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                wl->tx_blocks_available -= total_blocks;
                wl->tx_allocated_blocks += total_blocks;
 
-               /* If the FW was empty before, arm the Tx watchdog */
-               if (wl->tx_allocated_blocks == total_blocks)
+               /*
+                * If the FW was empty before, arm the Tx watchdog. Also do
+                * this on the first Tx after resume, as we always cancel the
+                * watchdog on suspend.
+                */
+               if (wl->tx_allocated_blocks == total_blocks ||
+                   test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
                        wl12xx_rearm_tx_watchdog_locked(wl);
 
                ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -357,6 +362,10 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
            ieee80211_has_protected(frame_control))
                tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
 
+       /* send EAPOL frames as voice */
+       if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
+               tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
+
        desc->tx_attr = cpu_to_le16(tx_attr);
 
        wlcore_hw_set_tx_desc_csum(wl, desc, skb);
@@ -560,11 +569,11 @@ static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
        int i, h, start_hlid;
 
        /* start from the link after the last one */
-       start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
+       start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
 
        /* dequeue according to AC, round robin on each link */
-       for (i = 0; i < WL12XX_MAX_LINKS; i++) {
-               h = (start_hlid + i) % WL12XX_MAX_LINKS;
+       for (i = 0; i < wl->num_links; i++) {
+               h = (start_hlid + i) % wl->num_links;
 
                /* only consider connected stations */
                if (!test_bit(h, wlvif->links_map))
@@ -688,8 +697,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
 
                /* make sure we dequeue the same packet next time */
-               wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
-                                     WL12XX_MAX_LINKS;
+               wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
+                                     wl->num_links;
        }
 
        spin_lock_irqsave(&wl->wl_lock, flags);
@@ -722,7 +731,7 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
        timeout = wl->conf.rx_streaming.duration;
        wl12xx_for_each_wlvif_sta(wl, wlvif) {
                bool found = false;
-               for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+               for_each_set_bit(hlid, active_hlids, wl->num_links) {
                        if (test_bit(hlid, wlvif->links_map)) {
                                found  = true;
                                break;
@@ -759,7 +768,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
        struct wl1271_tx_hw_descr *desc;
        u32 buf_offset = 0, last_len = 0;
        bool sent_packets = false;
-       unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+       unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
        int ret = 0;
        int bus_ret = 0;
        u8 hlid;
@@ -1061,7 +1070,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        int i;
 
        /* TX failure */
-       for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+       for_each_set_bit(i, wlvif->links_map, wl->num_links) {
                if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
                    i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
                        /* this calls wl12xx_free_link */
@@ -1085,7 +1094,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
 
        /* only reset the queues if something bad happened */
        if (wl1271_tx_total_queue_count(wl) != 0) {
-               for (i = 0; i < WL12XX_MAX_LINKS; i++)
+               for (i = 0; i < wl->num_links; i++)
                        wl1271_tx_reset_link_queues(wl, i);
 
                for (i = 0; i < NUM_TX_QUEUES; i++)
@@ -1178,7 +1187,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
                       WL1271_TX_FLUSH_TIMEOUT / 1000);
 
        /* forcibly flush all Tx buffers on our queues */
-       for (i = 0; i < WL12XX_MAX_LINKS; i++)
+       for (i = 0; i < wl->num_links; i++)
                wl1271_tx_reset_link_queues(wl, i);
 
 out_wake:
index 35489c300da17bfefe3b35fc7515dc58aeeaa196..79cb3ff8b71f576aef0913f0a8828a95e3ede130 100644 (file)
@@ -37,6 +37,7 @@
 #define TX_HW_ATTR_TX_CMPLT_REQ          BIT(12)
 #define TX_HW_ATTR_TX_DUMMY_REQ          BIT(13)
 #define TX_HW_ATTR_HOST_ENCRYPT          BIT(14)
+#define TX_HW_ATTR_EAPOL_FRAME           BIT(15)
 
 #define TX_HW_ATTR_OFST_SAVE_RETRIES     0
 #define TX_HW_ATTR_OFST_HEADER_PAD       1
index 06efc12a39e5175dfde449843ffbb5a7c50b157d..95a54504f0cc3815831d212906002faf6e0e8904 100644 (file)
@@ -73,6 +73,8 @@ struct wlcore_ops {
        void (*tx_immediate_compl)(struct wl1271 *wl);
        int (*hw_init)(struct wl1271 *wl);
        int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+       void (*convert_fw_status)(struct wl1271 *wl, void *raw_fw_status,
+                                 struct wl_fw_status *fw_status);
        u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
                                    struct wl12xx_vif *wlvif);
        int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
@@ -220,7 +222,7 @@ struct wl1271 {
        int channel;
        u8 system_hlid;
 
-       unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+       unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
        unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
        unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
        unsigned long rate_policies_map[
@@ -228,7 +230,7 @@ struct wl1271 {
        unsigned long klv_templates_map[
                        BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
 
-       u8 session_ids[WL12XX_MAX_LINKS];
+       u8 session_ids[WLCORE_MAX_LINKS];
 
        struct list_head wlvif_list;
 
@@ -346,8 +348,8 @@ struct wl1271 {
        u32 buffer_cmd;
        u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
 
-       struct wl_fw_status_1 *fw_status_1;
-       struct wl_fw_status_2 *fw_status_2;
+       void *raw_fw_status;
+       struct wl_fw_status *fw_status;
        struct wl1271_tx_hw_res_if *tx_res_if;
 
        /* Current chipset configuration */
@@ -376,7 +378,7 @@ struct wl1271 {
         * AP-mode - links indexed by HLID. The global and broadcast links
         * are always active.
         */
-       struct wl1271_link links[WL12XX_MAX_LINKS];
+       struct wl1271_link links[WLCORE_MAX_LINKS];
 
        /* number of currently active links */
        int active_link_count;
@@ -405,6 +407,9 @@ struct wl1271 {
        /* AP-mode - number of currently connected stations */
        int active_sta_count;
 
+       /* Flag determining whether AP should broadcast OFDM-only rates */
+       bool ofdm_only_ap;
+
        /* last wlvif we transmitted from */
        struct wl12xx_vif *last_wlvif;
 
@@ -434,6 +439,10 @@ struct wl1271 {
        u32 num_tx_desc;
        /* number of RX descriptors the HW supports. */
        u32 num_rx_desc;
+       /* number of links the HW supports */
+       u8 num_links;
+       /* max stations a single AP can support */
+       u8 max_ap_stations;
 
        /* translate HW Tx rates to standard rate-indices */
        const u8 **band_rate_to_idx;
@@ -448,10 +457,11 @@ struct wl1271 {
        struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
 
        /* size of the private FW status data */
+       size_t fw_status_len;
        size_t fw_status_priv_len;
 
        /* RX Data filter rule state - enabled/disabled */
-       bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
+       unsigned long rx_filter_enabled[BITS_TO_LONGS(WL1271_MAX_RX_FILTERS)];
 
        /* size of the private static data */
        size_t static_data_priv_len;
@@ -476,8 +486,9 @@ struct wl1271 {
 
        struct completion nvs_loading_complete;
 
-       /* number of concurrent channels the HW supports */
-       u32 num_channels;
+       /* interface combinations supported by the hw */
+       const struct ieee80211_iface_combination *iface_combinations;
+       u8 n_iface_combinations;
 };
 
 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
index ce7261ce8b59a244837618a07ecf174762f04946..756e890bc5ee2be0a5f68ef3e37de397ccc8dd52 100644 (file)
 #define WL1271_DEFAULT_DTIM_PERIOD 1
 
 #define WL12XX_MAX_ROLES           4
-#define WL12XX_MAX_LINKS           12
 #define WL12XX_INVALID_ROLE_ID     0xff
 #define WL12XX_INVALID_LINK_ID     0xff
 
+/*
+ * max number of links allowed by all HWs.
+ * this is NOT the actual max links supported by the current hw.
+ */
+#define WLCORE_MAX_LINKS 16
+
 /* the driver supports the 2.4Ghz and 5Ghz bands */
 #define WLCORE_NUM_BANDS           2
 
@@ -118,72 +123,58 @@ struct wl1271_chip {
 
 #define NUM_TX_QUEUES              4
 
-#define AP_MAX_STATIONS            8
-
-struct wl_fw_packet_counters {
-       /* Cumulative counter of released packets per AC */
-       u8 tx_released_pkts[NUM_TX_QUEUES];
-
-       /* Cumulative counter of freed packets per HLID */
-       u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
-
-       /* Cumulative counter of released Voice memory blocks */
-       u8 tx_voice_released_blks;
-
-       /* Tx rate of the last transmitted packet */
-       u8 tx_last_rate;
-
-       u8 padding[2];
-} __packed;
-
-/* FW status registers */
-struct wl_fw_status_1 {
-       __le32 intr;
+struct wl_fw_status {
+       u32 intr;
        u8  fw_rx_counter;
        u8  drv_rx_counter;
-       u8  reserved;
        u8  tx_results_counter;
-       __le32 rx_pkt_descs[0];
-} __packed;
-
-/*
- * Each HW arch has a different number of Rx descriptors.
- * The length of the status depends on it, since it holds an array
- * of descriptors.
- */
-#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
-               (sizeof(struct wl_fw_status_1) + \
-               (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
-               num_rx_desc)
+       __le32 *rx_pkt_descs;
 
-struct wl_fw_status_2 {
-       __le32 fw_localtime;
+       u32 fw_localtime;
 
        /*
         * A bitmap (where each bit represents a single HLID)
         * to indicate if the station is in PS mode.
         */
-       __le32 link_ps_bitmap;
+       u32 link_ps_bitmap;
 
        /*
         * A bitmap (where each bit represents a single HLID) to indicate
         * if the station is in Fast mode
         */
-       __le32 link_fast_bitmap;
+       u32 link_fast_bitmap;
 
        /* Cumulative counter of total released mem blocks since FW-reset */
-       __le32 total_released_blks;
+       u32 total_released_blks;
 
        /* Size (in Memory Blocks) of TX pool */
-       __le32 tx_total;
+       u32 tx_total;
+
+       struct {
+               /*
+                * Cumulative counter of released packets per AC
+                * (length of the array is NUM_TX_QUEUES)
+                */
+               u8 *tx_released_pkts;
 
-       struct wl_fw_packet_counters counters;
+               /*
+                * Cumulative counter of freed packets per HLID
+                * (length of the array is wl->num_links)
+                */
+               u8 *tx_lnk_free_pkts;
+
+               /* Cumulative counter of released Voice memory blocks */
+               u8 tx_voice_released_blks;
 
-       __le32 log_start_addr;
+               /* Tx rate of the last transmitted packet */
+               u8 tx_last_rate;
+       } counters;
+
+       u32 log_start_addr;
 
        /* Private status to be used by the lower drivers */
-       u8 priv[0];
-} __packed;
+       void *priv;
+};
 
 #define WL1271_MAX_CHANNELS 64
 struct wl1271_scan {
@@ -240,6 +231,7 @@ enum wl12xx_flags {
        WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
        WL1271_FLAG_INTENDED_FW_RECOVERY,
        WL1271_FLAG_IO_FAILED,
+       WL1271_FLAG_REINIT_TX_WDOG,
 };
 
 enum wl12xx_vif_flags {
@@ -368,7 +360,7 @@ struct wl12xx_vif {
 
                        /* HLIDs bitmap of associated stations */
                        unsigned long sta_hlid_map[BITS_TO_LONGS(
-                                                       WL12XX_MAX_LINKS)];
+                                                       WLCORE_MAX_LINKS)];
 
                        /* recoreded keys - set here before AP startup */
                        struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
@@ -385,7 +377,7 @@ struct wl12xx_vif {
        /* counters of packets per AC, across all links in the vif */
        int tx_queue_count[NUM_TX_QUEUES];
 
-       unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+       unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
 
        u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
        u8 ssid_len;
index d24d4a958c6731a44fe10d9c83ee32567c3fe7ed..d5c371d77ddf238d00e6ce7c3e46e2c6a4a6136b 100644 (file)
@@ -42,8 +42,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
+#include <net/cfg80211.h>
 
 #include <net/iw_handler.h>
 
@@ -1454,7 +1453,8 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
 {
        struct wl3501_card *this = netdev_priv(dev);
 
-       wrqu->freq.m = ieee80211_dsss_chan_to_freq(this->chan) * 100000;
+       wrqu->freq.m = 100000 *
+               ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
        wrqu->freq.e = 1;
        return 0;
 }
index d39c4178c33a61b3291958904c74f03a754f47a3..6f5c793a7855e90f7255633e15baa0608676c957 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/wireless.h>
-#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
 #include <net/iw_handler.h>
 #include <linux/string.h>
 #include <linux/if_arp.h>
@@ -914,11 +914,8 @@ static int zd1201_set_freq(struct net_device *dev,
 
        if (freq->e == 0)
                channel = freq->m;
-       else {
-               channel = ieee80211_freq_to_dsss_chan(freq->m);
-               if (channel < 0)
-                       channel = 0;
-       }
+       else
+               channel = ieee80211_frequency_to_channel(freq->m);
 
        err = zd1201_setconfig16(zd, ZD1201_RID_CNFOWNCHANNEL, channel);
        if (err)
index ae413a2cbee71402b5cc3732940d262237d6c792..89b2d429c4405ff6c54c8f6fca3bd7e8825bc623 100644 (file)
 typedef unsigned int pending_ring_idx_t;
 #define INVALID_PENDING_RING_IDX (~0U)
 
-/* For the head field in pending_tx_info: it is used to indicate
- * whether this tx info is the head of one or more coalesced requests.
- *
- * When head != INVALID_PENDING_RING_IDX, it means the start of a new
- * tx requests queue and the end of previous queue.
- *
- * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
- *
- * ...|0 I I I|5 I|9 I I I|...
- * -->|<-INUSE----------------
- *
- * After consuming the first slot(s) we have:
- *
- * ...|V V V V|5 I|9 I I I|...
- * -----FREE->|<-INUSE--------
- *
- * where V stands for "valid pending ring index". Any number other
- * than INVALID_PENDING_RING_IDX is OK. These entries are considered
- * free and can contain any number other than
- * INVALID_PENDING_RING_IDX. In practice we use 0.
- *
- * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
- * above example) number is the index into pending_tx_info and
- * mmap_pages arrays.
- */
 struct pending_tx_info {
-       struct xen_netif_tx_request req; /* coalesced tx request */
-       pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
-                                 * if it is head of one or more tx
-                                 * reqs
-                                 */
+       struct xen_netif_tx_request req; /* tx request */
+       /* Callback data for released SKBs. The callback is always
+        * xenvif_zerocopy_callback, desc contains the pending_idx, which is
+        * also an index in pending_tx_info array. It is initialized in
+        * xenvif_alloc and it never changes.
+        * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
+        * callback_struct in this array of struct pending_tx_info's, then ctx
+        * to the next, or NULL if there is no more slot for this skb.
+        * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
+        * to this field.
+        */
+       struct ubuf_info callback_struct;
 };
 
 #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
@@ -99,7 +81,7 @@ struct xenvif_rx_meta {
 
 #define MAX_BUFFER_OFFSET PAGE_SIZE
 
-#define MAX_PENDING_REQS 256
+#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
 
 /* It's possible for an skb to have a maximal number of frags
  * but still be less than MAX_BUFFER_OFFSET in size. Thus the
@@ -108,6 +90,15 @@ struct xenvif_rx_meta {
  */
 #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
 
+#define NETBACK_INVALID_HANDLE -1
+
+/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
+ * the maximum slots a valid packet can use. Now this value is defined
+ * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
+ * all backend.
+ */
+#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
+
 struct xenvif {
        /* Unique identifier for this interface. */
        domid_t          domid;
@@ -126,13 +117,26 @@ struct xenvif {
        pending_ring_idx_t pending_cons;
        u16 pending_ring[MAX_PENDING_REQS];
        struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
-
-       /* Coalescing tx requests before copying makes number of grant
-        * copy ops greater or equal to number of slots required. In
-        * worst case a tx request consumes 2 gnttab_copy.
+       grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
+
+       struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
+       struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
+       /* passed to gnttab_[un]map_refs with pages under (un)mapping */
+       struct page *pages_to_map[MAX_PENDING_REQS];
+       struct page *pages_to_unmap[MAX_PENDING_REQS];
+
+       /* This prevents zerocopy callbacks  to race over dealloc_ring */
+       spinlock_t callback_lock;
+       /* This prevents dealloc thread and NAPI instance to race over response
+        * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
+        * it only protect response creation
         */
-       struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
-
+       spinlock_t response_lock;
+       pending_ring_idx_t dealloc_prod;
+       pending_ring_idx_t dealloc_cons;
+       u16 dealloc_ring[MAX_PENDING_REQS];
+       struct task_struct *dealloc_task;
+       wait_queue_head_t dealloc_wq;
 
        /* Use kthread for guest RX */
        struct task_struct *task;
@@ -144,6 +148,9 @@ struct xenvif {
        struct xen_netif_rx_back_ring rx;
        struct sk_buff_head rx_queue;
        RING_IDX rx_last_skb_slots;
+       bool rx_queue_purge;
+
+       struct timer_list wake_queue;
 
        /* This array is allocated seperately as it is large */
        struct gnttab_copy *grant_copy_op;
@@ -175,6 +182,10 @@ struct xenvif {
 
        /* Statistics */
        unsigned long rx_gso_checksum_fixup;
+       unsigned long tx_zerocopy_sent;
+       unsigned long tx_zerocopy_success;
+       unsigned long tx_zerocopy_fail;
+       unsigned long tx_frag_overflow;
 
        /* Miscellaneous private stuff. */
        struct net_device *dev;
@@ -216,9 +227,11 @@ void xenvif_carrier_off(struct xenvif *vif);
 
 int xenvif_tx_action(struct xenvif *vif, int budget);
 
-int xenvif_kthread(void *data);
+int xenvif_kthread_guest_rx(void *data);
 void xenvif_kick_thread(struct xenvif *vif);
 
+int xenvif_dealloc_kthread(void *data);
+
 /* Determine whether the needed number of slots (req) are available,
  * and set req_event if not.
  */
@@ -226,6 +239,24 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
 
 void xenvif_stop_queue(struct xenvif *vif);
 
+/* Callback from stack when TX packet can be released */
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+
+/* Unmap a pending page and release it back to the guest */
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx);
+
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
+{
+       return MAX_PENDING_REQS -
+               vif->pending_prod + vif->pending_cons;
+}
+
+/* Callback from stack when TX packet can be released */
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
+
 extern bool separate_tx_rx_irq;
 
+extern unsigned int rx_drain_timeout_msecs;
+extern unsigned int rx_drain_timeout_jiffies;
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
index 301cc037fda886f2bc2de48a347a8693eadee178..cdc298e3b747b71a58c625f5e1aefe7dc9d8cb7c 100644 (file)
@@ -38,6 +38,7 @@
 
 #include <xen/events.h>
 #include <asm/xen/hypercall.h>
+#include <xen/balloon.h>
 
 #define XENVIF_QUEUE_LENGTH 32
 #define XENVIF_NAPI_WEIGHT  64
@@ -113,6 +114,18 @@ static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static void xenvif_wake_queue(unsigned long data)
+{
+       struct xenvif *vif = (struct xenvif *)data;
+
+       if (netif_queue_stopped(vif->dev)) {
+               netdev_err(vif->dev, "draining TX queue\n");
+               vif->rx_queue_purge = true;
+               xenvif_kick_thread(vif);
+               netif_wake_queue(vif->dev);
+       }
+}
+
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
@@ -121,7 +134,9 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
        BUG_ON(skb->dev != dev);
 
        /* Drop the packet if vif is not ready */
-       if (vif->task == NULL || !xenvif_schedulable(vif))
+       if (vif->task == NULL ||
+           vif->dealloc_task == NULL ||
+           !xenvif_schedulable(vif))
                goto drop;
 
        /* At best we'll need one slot for the header and one for each
@@ -139,8 +154,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * then turn off the queue to give the ring a chance to
         * drain.
         */
-       if (!xenvif_rx_ring_slots_available(vif, min_slots_needed))
+       if (!xenvif_rx_ring_slots_available(vif, min_slots_needed)) {
+               vif->wake_queue.function = xenvif_wake_queue;
+               vif->wake_queue.data = (unsigned long)vif;
                xenvif_stop_queue(vif);
+               mod_timer(&vif->wake_queue,
+                       jiffies + rx_drain_timeout_jiffies);
+       }
 
        skb_queue_tail(&vif->rx_queue, skb);
        xenvif_kick_thread(vif);
@@ -233,6 +253,28 @@ static const struct xenvif_stat {
                "rx_gso_checksum_fixup",
                offsetof(struct xenvif, rx_gso_checksum_fixup)
        },
+       /* If (sent != success + fail), there are probably packets never
+        * freed up properly!
+        */
+       {
+               "tx_zerocopy_sent",
+               offsetof(struct xenvif, tx_zerocopy_sent),
+       },
+       {
+               "tx_zerocopy_success",
+               offsetof(struct xenvif, tx_zerocopy_success),
+       },
+       {
+               "tx_zerocopy_fail",
+               offsetof(struct xenvif, tx_zerocopy_fail)
+       },
+       /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
+        * a guest with the same MAX_SKB_FRAG
+        */
+       {
+               "tx_frag_overflow",
+               offsetof(struct xenvif, tx_frag_overflow)
+       },
 };
 
 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
@@ -326,6 +368,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        init_timer(&vif->credit_timeout);
        vif->credit_window_start = get_jiffies_64();
 
+       init_timer(&vif->wake_queue);
+
        dev->netdev_ops = &xenvif_netdev_ops;
        dev->hw_features = NETIF_F_SG |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -342,8 +386,26 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->pending_prod = MAX_PENDING_REQS;
        for (i = 0; i < MAX_PENDING_REQS; i++)
                vif->pending_ring[i] = i;
-       for (i = 0; i < MAX_PENDING_REQS; i++)
-               vif->mmap_pages[i] = NULL;
+       spin_lock_init(&vif->callback_lock);
+       spin_lock_init(&vif->response_lock);
+       /* If ballooning is disabled, this will consume real memory, so you
+        * better enable it. The long term solution would be to use just a
+        * bunch of valid page descriptors, without dependency on ballooning
+        */
+       err = alloc_xenballooned_pages(MAX_PENDING_REQS,
+                                      vif->mmap_pages,
+                                      false);
+       if (err) {
+               netdev_err(dev, "Could not reserve mmap_pages\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       for (i = 0; i < MAX_PENDING_REQS; i++) {
+               vif->pending_tx_info[i].callback_struct = (struct ubuf_info)
+                       { .callback = xenvif_zerocopy_callback,
+                         .ctx = NULL,
+                         .desc = i };
+               vif->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
+       }
 
        /*
         * Initialise a dummy MAC address. We choose the numerically
@@ -381,12 +443,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
 
        BUG_ON(vif->tx_irq);
        BUG_ON(vif->task);
+       BUG_ON(vif->dealloc_task);
 
        err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
        if (err < 0)
                goto err;
 
        init_waitqueue_head(&vif->wq);
+       init_waitqueue_head(&vif->dealloc_wq);
 
        if (tx_evtchn == rx_evtchn) {
                /* feature-split-event-channels == 0 */
@@ -420,8 +484,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
                disable_irq(vif->rx_irq);
        }
 
-       task = kthread_create(xenvif_kthread,
-                             (void *)vif, "%s", vif->dev->name);
+       task = kthread_create(xenvif_kthread_guest_rx,
+                             (void *)vif, "%s-guest-rx", vif->dev->name);
        if (IS_ERR(task)) {
                pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
                err = PTR_ERR(task);
@@ -430,6 +494,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
 
        vif->task = task;
 
+       task = kthread_create(xenvif_dealloc_kthread,
+                             (void *)vif, "%s-dealloc", vif->dev->name);
+       if (IS_ERR(task)) {
+               pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+               err = PTR_ERR(task);
+               goto err_rx_unbind;
+       }
+
+       vif->dealloc_task = task;
+
        rtnl_lock();
        if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
                dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -440,6 +514,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
        rtnl_unlock();
 
        wake_up_process(vif->task);
+       wake_up_process(vif->dealloc_task);
 
        return 0;
 
@@ -473,10 +548,16 @@ void xenvif_disconnect(struct xenvif *vif)
                xenvif_carrier_off(vif);
 
        if (vif->task) {
+               del_timer_sync(&vif->wake_queue);
                kthread_stop(vif->task);
                vif->task = NULL;
        }
 
+       if (vif->dealloc_task) {
+               kthread_stop(vif->dealloc_task);
+               vif->dealloc_task = NULL;
+       }
+
        if (vif->tx_irq) {
                if (vif->tx_irq == vif->rx_irq)
                        unbind_from_irqhandler(vif->tx_irq, vif);
@@ -492,6 +573,43 @@ void xenvif_disconnect(struct xenvif *vif)
 
 void xenvif_free(struct xenvif *vif)
 {
+       int i, unmap_timeout = 0;
+       /* Here we want to avoid timeout messages if an skb can be legitimately
+        * stuck somewhere else. Realistically this could be an another vif's
+        * internal or QDisc queue. That another vif also has this
+        * rx_drain_timeout_msecs timeout, but the timer only ditches the
+        * internal queue. After that, the QDisc queue can put in worst case
+        * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
+        * internal queue, so we need several rounds of such timeouts until we
+        * can be sure that no another vif should have skb's from us. We are
+        * not sending more skb's, so newly stuck packets are not interesting
+        * for us here.
+        */
+       unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
+               DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
+
+       for (i = 0; i < MAX_PENDING_REQS; ++i) {
+               if (vif->grant_tx_handle[i] != NETBACK_INVALID_HANDLE) {
+                       unmap_timeout++;
+                       schedule_timeout(msecs_to_jiffies(1000));
+                       if (unmap_timeout > worst_case_skb_lifetime &&
+                           net_ratelimit())
+                               netdev_err(vif->dev,
+                                          "Page still granted! Index: %x\n",
+                                          i);
+                       /* If there are still unmapped pages, reset the loop to
+                        * start checking again. We shouldn't exit here until
+                        * dealloc thread and NAPI instance release all the
+                        * pages. If a kernel bug causes the skbs to stall
+                        * somewhere, the interface cannot be brought down
+                        * properly.
+                        */
+                       i = -1;
+               }
+       }
+
+       free_xenballooned_pages(MAX_PENDING_REQS, vif->mmap_pages);
+
        netif_napi_del(&vif->napi);
 
        unregister_netdev(vif->dev);
index cd0bd95ccc14b5ac4cac7e13aed5c58ba5280e23..ae34f5fc7fbc503f0feda999a0f729e5ab08cdc7 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kthread.h>
 #include <linux/if_vlan.h>
 #include <linux/udp.h>
+#include <linux/highmem.h>
 
 #include <net/tcp.h>
 
 bool separate_tx_rx_irq = 1;
 module_param(separate_tx_rx_irq, bool, 0644);
 
+/* When guest ring is filled up, qdisc queues the packets for us, but we have
+ * to timeout them, otherwise other guests' packets can get stuck there
+ */
+unsigned int rx_drain_timeout_msecs = 10000;
+module_param(rx_drain_timeout_msecs, uint, 0444);
+unsigned int rx_drain_timeout_jiffies;
+
 /*
  * This is the maximum slots a skb can have. If a guest sends a skb
  * which exceeds this limit it is considered malicious.
@@ -62,24 +70,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
 module_param(fatal_skb_slots, uint, 0444);
 
-/*
- * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
- * the maximum slots a valid packet can use. Now this value is defined
- * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
- * all backend.
- */
-#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-
-/*
- * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
- * one or more merged tx requests, otherwise it is the continuation of
- * previous tx request.
- */
-static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
-{
-       return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
-}
-
 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
                               u8 status);
 
@@ -109,6 +99,21 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
        return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
 }
 
+#define callback_param(vif, pending_idx) \
+       (vif->pending_tx_info[pending_idx].callback_struct)
+
+/* Find the containing VIF's structure from a pointer in pending_tx_info array
+ */
+static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf)
+{
+       u16 pending_idx = ubuf->desc;
+       struct pending_tx_info *temp =
+               container_of(ubuf, struct pending_tx_info, callback_struct);
+       return container_of(temp - pending_idx,
+                           struct xenvif,
+                           pending_tx_info[0]);
+}
+
 /* This is a miniumum size for the linear area to avoid lots of
  * calls to __pskb_pull_tail() as we set up checksum offsets. The
  * value 128 was chosen as it covers all IPv4 and most likely
@@ -131,12 +136,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
        return i & (MAX_PENDING_REQS-1);
 }
 
-static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
-{
-       return MAX_PENDING_REQS -
-               vif->pending_prod + vif->pending_cons;
-}
-
 bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
 {
        RING_IDX prod, cons;
@@ -235,7 +234,9 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                                 struct netrx_pending_operations *npo,
                                 struct page *page, unsigned long size,
-                                unsigned long offset, int *head)
+                                unsigned long offset, int *head,
+                                struct xenvif *foreign_vif,
+                                grant_ref_t foreign_gref)
 {
        struct gnttab_copy *copy_gop;
        struct xenvif_rx_meta *meta;
@@ -277,8 +278,15 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                copy_gop->flags = GNTCOPY_dest_gref;
                copy_gop->len = bytes;
 
-               copy_gop->source.domid = DOMID_SELF;
-               copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
+               if (foreign_vif) {
+                       copy_gop->source.domid = foreign_vif->domid;
+                       copy_gop->source.u.ref = foreign_gref;
+                       copy_gop->flags |= GNTCOPY_source_gref;
+               } else {
+                       copy_gop->source.domid = DOMID_SELF;
+                       copy_gop->source.u.gmfn =
+                               virt_to_mfn(page_address(page));
+               }
                copy_gop->source.offset = offset;
 
                copy_gop->dest.domid = vif->domid;
@@ -338,6 +346,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        int head = 1;
        int old_meta_prod;
        int gso_type;
+       struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
+       grant_ref_t foreign_grefs[MAX_SKB_FRAGS];
+       struct xenvif *foreign_vif = NULL;
 
        old_meta_prod = npo->meta_prod;
 
@@ -375,6 +386,19 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        npo->copy_off = 0;
        npo->copy_gref = req->gref;
 
+       if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
+                (ubuf->callback == &xenvif_zerocopy_callback)) {
+               int i = 0;
+               foreign_vif = ubuf_to_vif(ubuf);
+
+               do {
+                       u16 pending_idx = ubuf->desc;
+                       foreign_grefs[i++] =
+                               foreign_vif->pending_tx_info[pending_idx].req.gref;
+                       ubuf = (struct ubuf_info *) ubuf->ctx;
+               } while (ubuf);
+       }
+
        data = skb->data;
        while (data < skb_tail_pointer(skb)) {
                unsigned int offset = offset_in_page(data);
@@ -384,7 +408,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                        len = skb_tail_pointer(skb) - data;
 
                xenvif_gop_frag_copy(vif, skb, npo,
-                                    virt_to_page(data), len, offset, &head);
+                                    virt_to_page(data), len, offset, &head,
+                                    NULL,
+                                    0);
                data += len;
        }
 
@@ -393,7 +419,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
                                     skb_frag_page(&skb_shinfo(skb)->frags[i]),
                                     skb_frag_size(&skb_shinfo(skb)->frags[i]),
                                     skb_shinfo(skb)->frags[i].page_offset,
-                                    &head);
+                                    &head,
+                                    foreign_vif,
+                                    foreign_grefs[i]);
        }
 
        return npo->meta_prod - old_meta_prod;
@@ -451,10 +479,12 @@ static void xenvif_add_frag_responses(struct xenvif *vif, int status,
        }
 }
 
-struct skb_cb_overlay {
+struct xenvif_rx_cb {
        int meta_slots_used;
 };
 
+#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
+
 void xenvif_kick_thread(struct xenvif *vif)
 {
        wake_up(&vif->wq);
@@ -470,7 +500,6 @@ static void xenvif_rx_action(struct xenvif *vif)
        LIST_HEAD(notify);
        int ret;
        unsigned long offset;
-       struct skb_cb_overlay *sco;
        bool need_to_notify = false;
 
        struct netrx_pending_operations npo = {
@@ -531,10 +560,8 @@ static void xenvif_rx_action(struct xenvif *vif)
                } else
                        vif->rx_last_skb_slots = 0;
 
-               sco = (struct skb_cb_overlay *)skb->cb;
-
                old_req_cons = vif->rx.req_cons;
-               sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
+               XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
                ring_slots_used = vif->rx.req_cons - old_req_cons;
 
                BUG_ON(ring_slots_used > max_slots_needed);
@@ -551,7 +578,6 @@ static void xenvif_rx_action(struct xenvif *vif)
        gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
 
        while ((skb = __skb_dequeue(&rxq)) != NULL) {
-               sco = (struct skb_cb_overlay *)skb->cb;
 
                if ((1 << vif->meta[npo.meta_cons].gso_type) &
                    vif->gso_prefix_mask) {
@@ -562,19 +588,21 @@ static void xenvif_rx_action(struct xenvif *vif)
 
                        resp->offset = vif->meta[npo.meta_cons].gso_size;
                        resp->id = vif->meta[npo.meta_cons].id;
-                       resp->status = sco->meta_slots_used;
+                       resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
 
                        npo.meta_cons++;
-                       sco->meta_slots_used--;
+                       XENVIF_RX_CB(skb)->meta_slots_used--;
                }
 
 
                vif->dev->stats.tx_bytes += skb->len;
                vif->dev->stats.tx_packets++;
 
-               status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
+               status = xenvif_check_gop(vif,
+                                         XENVIF_RX_CB(skb)->meta_slots_used,
+                                         &npo);
 
-               if (sco->meta_slots_used == 1)
+               if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
                        flags = 0;
                else
                        flags = XEN_NETRXF_more_data;
@@ -611,13 +639,13 @@ static void xenvif_rx_action(struct xenvif *vif)
 
                xenvif_add_frag_responses(vif, status,
                                          vif->meta + npo.meta_cons + 1,
-                                         sco->meta_slots_used);
+                                         XENVIF_RX_CB(skb)->meta_slots_used);
 
                RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
 
                need_to_notify |= !!ret;
 
-               npo.meta_cons += sco->meta_slots_used;
+               npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
                dev_kfree_skb(skb);
        }
 
@@ -667,9 +695,12 @@ static void xenvif_tx_err(struct xenvif *vif,
                          struct xen_netif_tx_request *txp, RING_IDX end)
 {
        RING_IDX cons = vif->tx.req_cons;
+       unsigned long flags;
 
        do {
+               spin_lock_irqsave(&vif->response_lock, flags);
                make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
+               spin_unlock_irqrestore(&vif->response_lock, flags);
                if (cons == end)
                        break;
                txp = RING_GET_REQUEST(&vif->tx, cons++);
@@ -781,180 +812,168 @@ static int xenvif_count_requests(struct xenvif *vif,
        return slots;
 }
 
-static struct page *xenvif_alloc_page(struct xenvif *vif,
-                                     u16 pending_idx)
+
+struct xenvif_tx_cb {
+       u16 pending_idx;
+};
+
+#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
+
+static inline void xenvif_tx_create_gop(struct xenvif *vif,
+                                       u16 pending_idx,
+                                       struct xen_netif_tx_request *txp,
+                                       struct gnttab_map_grant_ref *gop)
 {
-       struct page *page;
+       vif->pages_to_map[gop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
+       gnttab_set_map_op(gop, idx_to_kaddr(vif, pending_idx),
+                         GNTMAP_host_map | GNTMAP_readonly,
+                         txp->gref, vif->domid);
 
-       page = alloc_page(GFP_ATOMIC|__GFP_COLD);
-       if (!page)
+       memcpy(&vif->pending_tx_info[pending_idx].req, txp,
+              sizeof(*txp));
+}
+
+static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
+{
+       struct sk_buff *skb =
+               alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
+                         GFP_ATOMIC | __GFP_NOWARN);
+       if (unlikely(skb == NULL))
                return NULL;
-       vif->mmap_pages[pending_idx] = page;
 
-       return page;
+       /* Packets passed to netif_rx() must have some headroom. */
+       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+
+       /* Initialize it here to avoid later surprises */
+       skb_shinfo(skb)->destructor_arg = NULL;
+
+       return skb;
 }
 
-static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
-                                              struct sk_buff *skb,
-                                              struct xen_netif_tx_request *txp,
-                                              struct gnttab_copy *gop)
+static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
+                                                       struct sk_buff *skb,
+                                                       struct xen_netif_tx_request *txp,
+                                                       struct gnttab_map_grant_ref *gop)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
-       u16 pending_idx = *((u16 *)skb->data);
-       u16 head_idx = 0;
-       int slot, start;
-       struct page *page;
-       pending_ring_idx_t index, start_idx = 0;
-       uint16_t dst_offset;
-       unsigned int nr_slots;
-       struct pending_tx_info *first = NULL;
+       u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       int start;
+       pending_ring_idx_t index;
+       unsigned int nr_slots, frag_overflow = 0;
 
        /* At this point shinfo->nr_frags is in fact the number of
         * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
         */
+       if (shinfo->nr_frags > MAX_SKB_FRAGS) {
+               frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
+               BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+               shinfo->nr_frags = MAX_SKB_FRAGS;
+       }
        nr_slots = shinfo->nr_frags;
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
-       /* Coalesce tx requests, at this point the packet passed in
-        * should be <= 64K. Any packets larger than 64K have been
-        * handled in xenvif_count_requests().
-        */
-       for (shinfo->nr_frags = slot = start; slot < nr_slots;
-            shinfo->nr_frags++) {
-               struct pending_tx_info *pending_tx_info =
-                       vif->pending_tx_info;
+       for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
+            shinfo->nr_frags++, txp++, gop++) {
+               index = pending_index(vif->pending_cons++);
+               pending_idx = vif->pending_ring[index];
+               xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+               frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+       }
 
-               page = alloc_page(GFP_ATOMIC|__GFP_COLD);
-               if (!page)
-                       goto err;
-
-               dst_offset = 0;
-               first = NULL;
-               while (dst_offset < PAGE_SIZE && slot < nr_slots) {
-                       gop->flags = GNTCOPY_source_gref;
-
-                       gop->source.u.ref = txp->gref;
-                       gop->source.domid = vif->domid;
-                       gop->source.offset = txp->offset;
-
-                       gop->dest.domid = DOMID_SELF;
-
-                       gop->dest.offset = dst_offset;
-                       gop->dest.u.gmfn = virt_to_mfn(page_address(page));
-
-                       if (dst_offset + txp->size > PAGE_SIZE) {
-                               /* This page can only merge a portion
-                                * of tx request. Do not increment any
-                                * pointer / counter here. The txp
-                                * will be dealt with in future
-                                * rounds, eventually hitting the
-                                * `else` branch.
-                                */
-                               gop->len = PAGE_SIZE - dst_offset;
-                               txp->offset += gop->len;
-                               txp->size -= gop->len;
-                               dst_offset += gop->len; /* quit loop */
-                       } else {
-                               /* This tx request can be merged in the page */
-                               gop->len = txp->size;
-                               dst_offset += gop->len;
-
-                               index = pending_index(vif->pending_cons++);
-
-                               pending_idx = vif->pending_ring[index];
-
-                               memcpy(&pending_tx_info[pending_idx].req, txp,
-                                      sizeof(*txp));
-
-                               /* Poison these fields, corresponding
-                                * fields for head tx req will be set
-                                * to correct values after the loop.
-                                */
-                               vif->mmap_pages[pending_idx] = (void *)(~0UL);
-                               pending_tx_info[pending_idx].head =
-                                       INVALID_PENDING_RING_IDX;
-
-                               if (!first) {
-                                       first = &pending_tx_info[pending_idx];
-                                       start_idx = index;
-                                       head_idx = pending_idx;
-                               }
-
-                               txp++;
-                               slot++;
-                       }
+       if (frag_overflow) {
+               struct sk_buff *nskb = xenvif_alloc_skb(0);
+               if (unlikely(nskb == NULL)) {
+                       if (net_ratelimit())
+                               netdev_err(vif->dev,
+                                          "Can't allocate the frag_list skb.\n");
+                       return NULL;
+               }
 
-                       gop++;
+               shinfo = skb_shinfo(nskb);
+               frags = shinfo->frags;
+
+               for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
+                    shinfo->nr_frags++, txp++, gop++) {
+                       index = pending_index(vif->pending_cons++);
+                       pending_idx = vif->pending_ring[index];
+                       xenvif_tx_create_gop(vif, pending_idx, txp, gop);
+                       frag_set_pending_idx(&frags[shinfo->nr_frags],
+                                            pending_idx);
                }
 
-               first->req.offset = 0;
-               first->req.size = dst_offset;
-               first->head = start_idx;
-               vif->mmap_pages[head_idx] = page;
-               frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
+               skb_shinfo(skb)->frag_list = nskb;
        }
 
-       BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
-
        return gop;
-err:
-       /* Unwind, freeing all pages and sending error responses. */
-       while (shinfo->nr_frags-- > start) {
-               xenvif_idx_release(vif,
-                               frag_get_pending_idx(&frags[shinfo->nr_frags]),
-                               XEN_NETIF_RSP_ERROR);
+}
+
+static inline void xenvif_grant_handle_set(struct xenvif *vif,
+                                          u16 pending_idx,
+                                          grant_handle_t handle)
+{
+       if (unlikely(vif->grant_tx_handle[pending_idx] !=
+                    NETBACK_INVALID_HANDLE)) {
+               netdev_err(vif->dev,
+                          "Trying to overwrite active handle! pending_idx: %x\n",
+                          pending_idx);
+               BUG();
        }
-       /* The head too, if necessary. */
-       if (start)
-               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+       vif->grant_tx_handle[pending_idx] = handle;
+}
 
-       return NULL;
+static inline void xenvif_grant_handle_reset(struct xenvif *vif,
+                                            u16 pending_idx)
+{
+       if (unlikely(vif->grant_tx_handle[pending_idx] ==
+                    NETBACK_INVALID_HANDLE)) {
+               netdev_err(vif->dev,
+                          "Trying to unmap invalid handle! pending_idx: %x\n",
+                          pending_idx);
+               BUG();
+       }
+       vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
 }
 
 static int xenvif_tx_check_gop(struct xenvif *vif,
                               struct sk_buff *skb,
-                              struct gnttab_copy **gopp)
+                              struct gnttab_map_grant_ref **gopp)
 {
-       struct gnttab_copy *gop = *gopp;
-       u16 pending_idx = *((u16 *)skb->data);
+       struct gnttab_map_grant_ref *gop = *gopp;
+       u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        struct pending_tx_info *tx_info;
        int nr_frags = shinfo->nr_frags;
        int i, err, start;
-       u16 peek; /* peek into next tx request */
+       struct sk_buff *first_skb = NULL;
 
        /* Check status of header. */
        err = gop->status;
        if (unlikely(err))
                xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
+       else
+               xenvif_grant_handle_set(vif, pending_idx , gop->handle);
 
        /* Skip first skb fragment if it is on same page as header fragment. */
        start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
 
+check_frags:
        for (i = start; i < nr_frags; i++) {
                int j, newerr;
-               pending_ring_idx_t head;
 
                pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
                tx_info = &vif->pending_tx_info[pending_idx];
-               head = tx_info->head;
 
                /* Check error status: if okay then remember grant handle. */
-               do {
-                       newerr = (++gop)->status;
-                       if (newerr)
-                               break;
-                       peek = vif->pending_ring[pending_index(++head)];
-               } while (!pending_tx_is_head(vif, peek));
+               newerr = (++gop)->status;
 
                if (likely(!newerr)) {
+                       xenvif_grant_handle_set(vif, pending_idx , gop->handle);
                        /* Had a previous error? Invalidate this fragment. */
                        if (unlikely(err))
-                               xenvif_idx_release(vif, pending_idx,
-                                                  XEN_NETIF_RSP_OKAY);
+                               xenvif_idx_unmap(vif, pending_idx);
                        continue;
                }
 
@@ -964,20 +983,45 @@ static int xenvif_tx_check_gop(struct xenvif *vif,
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
                        continue;
-
                /* First error: invalidate header and preceding fragments. */
-               pending_idx = *((u16 *)skb->data);
-               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+               if (!first_skb)
+                       pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+               else
+                       pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+               xenvif_idx_unmap(vif, pending_idx);
                for (j = start; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xenvif_idx_release(vif, pending_idx,
-                                          XEN_NETIF_RSP_OKAY);
+                       xenvif_idx_unmap(vif, pending_idx);
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
                err = newerr;
        }
 
+       if (skb_has_frag_list(skb)) {
+               first_skb = skb;
+               skb = shinfo->frag_list;
+               shinfo = skb_shinfo(skb);
+               nr_frags = shinfo->nr_frags;
+               start = 0;
+
+               goto check_frags;
+       }
+
+       /* There was a mapping error in the frag_list skb. We have to unmap
+        * the first skb's frags
+        */
+       if (first_skb && err) {
+               int j;
+               shinfo = skb_shinfo(first_skb);
+               pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+               start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
+               for (j = start; j < shinfo->nr_frags; j++) {
+                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
+                       xenvif_idx_unmap(vif, pending_idx);
+               }
+       }
+
        *gopp = gop + 1;
        return err;
 }
@@ -987,6 +1031,10 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        int nr_frags = shinfo->nr_frags;
        int i;
+       u16 prev_pending_idx = INVALID_PENDING_IDX;
+
+       if (skb_shinfo(skb)->destructor_arg)
+               prev_pending_idx = XENVIF_TX_CB(skb)->pending_idx;
 
        for (i = 0; i < nr_frags; i++) {
                skb_frag_t *frag = shinfo->frags + i;
@@ -996,6 +1044,17 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
 
                pending_idx = frag_get_pending_idx(frag);
 
+               /* If this is not the first frag, chain it to the previous*/
+               if (unlikely(prev_pending_idx == INVALID_PENDING_IDX))
+                       skb_shinfo(skb)->destructor_arg =
+                               &callback_param(vif, pending_idx);
+               else if (likely(pending_idx != prev_pending_idx))
+                       callback_param(vif, prev_pending_idx).ctx =
+                               &callback_param(vif, pending_idx);
+
+               callback_param(vif, pending_idx).ctx = NULL;
+               prev_pending_idx = pending_idx;
+
                txp = &vif->pending_tx_info[pending_idx].req;
                page = virt_to_page(idx_to_kaddr(vif, pending_idx));
                __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
@@ -1003,10 +1062,15 @@ static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
                skb->data_len += txp->size;
                skb->truesize += txp->size;
 
-               /* Take an extra reference to offset xenvif_idx_release */
+               /* Take an extra reference to offset network stack's put_page */
                get_page(vif->mmap_pages[pending_idx]);
-               xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
        }
+       /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
+        * overlaps with "index", and "mapping" is not set. I think mapping
+        * should be set. If delivered to local stack, it would drop this
+        * skb in sk_filter unless the socket has the right to use it.
+        */
+       skb->pfmemalloc = false;
 }
 
 static int xenvif_get_extras(struct xenvif *vif,
@@ -1126,16 +1190,13 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
 
 static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
 {
-       struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
+       struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
        struct sk_buff *skb;
        int ret;
 
-       while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
-               < MAX_PENDING_REQS) &&
-              (skb_queue_len(&vif->tx_queue) < budget)) {
+       while (skb_queue_len(&vif->tx_queue) < budget) {
                struct xen_netif_tx_request txreq;
                struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
-               struct page *page;
                struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
                u16 pending_idx;
                RING_IDX idx;
@@ -1211,8 +1272,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
                            ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
                        PKT_PROT_LEN : txreq.size;
 
-               skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
-                               GFP_ATOMIC | __GFP_NOWARN);
+               skb = xenvif_alloc_skb(data_len);
                if (unlikely(skb == NULL)) {
                        netdev_dbg(vif->dev,
                                   "Can't allocate a skb in start_xmit.\n");
@@ -1220,9 +1280,6 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
                        break;
                }
 
-               /* Packets passed to netif_rx() must have some headroom. */
-               skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1234,31 +1291,11 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
                        }
                }
 
-               /* XXX could copy straight to head */
-               page = xenvif_alloc_page(vif, pending_idx);
-               if (!page) {
-                       kfree_skb(skb);
-                       xenvif_tx_err(vif, &txreq, idx);
-                       break;
-               }
-
-               gop->source.u.ref = txreq.gref;
-               gop->source.domid = vif->domid;
-               gop->source.offset = txreq.offset;
-
-               gop->dest.u.gmfn = virt_to_mfn(page_address(page));
-               gop->dest.domid = DOMID_SELF;
-               gop->dest.offset = txreq.offset;
-
-               gop->len = txreq.size;
-               gop->flags = GNTCOPY_source_gref;
+               xenvif_tx_create_gop(vif, pending_idx, &txreq, gop);
 
                gop++;
 
-               memcpy(&vif->pending_tx_info[pending_idx].req,
-                      &txreq, sizeof(txreq));
-               vif->pending_tx_info[pending_idx].head = index;
-               *((u16 *)skb->data) = pending_idx;
+               XENVIF_TX_CB(skb)->pending_idx = pending_idx;
 
                __skb_put(skb, data_len);
 
@@ -1286,17 +1323,82 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
 
                vif->tx.req_cons = idx;
 
-               if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
+               if ((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops))
                        break;
        }
 
-       return gop - vif->tx_copy_ops;
+       return gop - vif->tx_map_ops;
 }
 
+/* Consolidate skb with a frag_list into a brand new one with local pages on
+ * frags. Returns 0 or -ENOMEM if can't allocate new pages.
+ */
+static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
+{
+       unsigned int offset = skb_headlen(skb);
+       skb_frag_t frags[MAX_SKB_FRAGS];
+       int i;
+       struct ubuf_info *uarg;
+       struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+
+       vif->tx_zerocopy_sent += 2;
+       vif->tx_frag_overflow++;
+
+       xenvif_fill_frags(vif, nskb);
+       /* Subtract frags size, we will correct it later */
+       skb->truesize -= skb->data_len;
+       skb->len += nskb->len;
+       skb->data_len += nskb->len;
+
+       /* create a brand new frags array and coalesce there */
+       for (i = 0; offset < skb->len; i++) {
+               struct page *page;
+               unsigned int len;
+
+               BUG_ON(i >= MAX_SKB_FRAGS);
+               page = alloc_page(GFP_ATOMIC|__GFP_COLD);
+               if (!page) {
+                       int j;
+                       skb->truesize += skb->data_len;
+                       for (j = 0; j < i; j++)
+                               put_page(frags[j].page.p);
+                       return -ENOMEM;
+               }
+
+               if (offset + PAGE_SIZE < skb->len)
+                       len = PAGE_SIZE;
+               else
+                       len = skb->len - offset;
+               if (skb_copy_bits(skb, offset, page_address(page), len))
+                       BUG();
+
+               offset += len;
+               frags[i].page.p = page;
+               frags[i].page_offset = 0;
+               skb_frag_size_set(&frags[i], len);
+       }
+       /* swap out with old one */
+       memcpy(skb_shinfo(skb)->frags,
+              frags,
+              i * sizeof(skb_frag_t));
+       skb_shinfo(skb)->nr_frags = i;
+       skb->truesize += i * PAGE_SIZE;
+
+       /* remove traces of mapped pages and frag_list */
+       skb_frag_list_init(skb);
+       uarg = skb_shinfo(skb)->destructor_arg;
+       uarg->callback(uarg, true);
+       skb_shinfo(skb)->destructor_arg = NULL;
+
+       skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+       kfree_skb(nskb);
+
+       return 0;
+}
 
 static int xenvif_tx_submit(struct xenvif *vif)
 {
-       struct gnttab_copy *gop = vif->tx_copy_ops;
+       struct gnttab_map_grant_ref *gop = vif->tx_map_ops;
        struct sk_buff *skb;
        int work_done = 0;
 
@@ -1305,7 +1407,7 @@ static int xenvif_tx_submit(struct xenvif *vif)
                u16 pending_idx;
                unsigned data_len;
 
-               pending_idx = *((u16 *)skb->data);
+               pending_idx = XENVIF_TX_CB(skb)->pending_idx;
                txp = &vif->pending_tx_info[pending_idx].req;
 
                /* Check the remap error code. */
@@ -1320,14 +1422,16 @@ static int xenvif_tx_submit(struct xenvif *vif)
                memcpy(skb->data,
                       (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
                       data_len);
+               callback_param(vif, pending_idx).ctx = NULL;
                if (data_len < txp->size) {
                        /* Append the packet payload as a fragment. */
                        txp->offset += data_len;
                        txp->size -= data_len;
+                       skb_shinfo(skb)->destructor_arg =
+                               &callback_param(vif, pending_idx);
                } else {
                        /* Schedule a response immediately. */
-                       xenvif_idx_release(vif, pending_idx,
-                                          XEN_NETIF_RSP_OKAY);
+                       xenvif_idx_unmap(vif, pending_idx);
                }
 
                if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1337,6 +1441,17 @@ static int xenvif_tx_submit(struct xenvif *vif)
 
                xenvif_fill_frags(vif, skb);
 
+               if (unlikely(skb_has_frag_list(skb))) {
+                       if (xenvif_handle_frag_list(vif, skb)) {
+                               if (net_ratelimit())
+                                       netdev_err(vif->dev,
+                                                  "Not enough memory to consolidate frag_list!\n");
+                               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+                               kfree_skb(skb);
+                               continue;
+                       }
+               }
+
                if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
                        int target = min_t(int, skb->len, PKT_PROT_LEN);
                        __pskb_pull_tail(skb, target - skb_headlen(skb));
@@ -1349,6 +1464,9 @@ static int xenvif_tx_submit(struct xenvif *vif)
                if (checksum_setup(vif, skb)) {
                        netdev_dbg(vif->dev,
                                   "Can't setup checksum in net_tx_action\n");
+                       /* We have to set this flag to trigger the callback */
+                       if (skb_shinfo(skb)->destructor_arg)
+                               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
                        kfree_skb(skb);
                        continue;
                }
@@ -1374,17 +1492,126 @@ static int xenvif_tx_submit(struct xenvif *vif)
 
                work_done++;
 
+               /* Set this flag right before netif_receive_skb, otherwise
+                * someone might think this packet already left netback, and
+                * do a skb_copy_ubufs while we are still in control of the
+                * skb. E.g. the __pskb_pull_tail earlier can do such thing.
+                */
+               if (skb_shinfo(skb)->destructor_arg) {
+                       skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+                       vif->tx_zerocopy_sent++;
+               }
+
                netif_receive_skb(skb);
        }
 
        return work_done;
 }
 
+void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
+{
+       unsigned long flags;
+       pending_ring_idx_t index;
+       struct xenvif *vif = ubuf_to_vif(ubuf);
+
+       /* This is the only place where we grab this lock, to protect callbacks
+        * from each other.
+        */
+       spin_lock_irqsave(&vif->callback_lock, flags);
+       do {
+               u16 pending_idx = ubuf->desc;
+               ubuf = (struct ubuf_info *) ubuf->ctx;
+               BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
+                       MAX_PENDING_REQS);
+               index = pending_index(vif->dealloc_prod);
+               vif->dealloc_ring[index] = pending_idx;
+               /* Sync with xenvif_tx_dealloc_action:
+                * insert idx then incr producer.
+                */
+               smp_wmb();
+               vif->dealloc_prod++;
+       } while (ubuf);
+       wake_up(&vif->dealloc_wq);
+       spin_unlock_irqrestore(&vif->callback_lock, flags);
+
+       if (likely(zerocopy_success))
+               vif->tx_zerocopy_success++;
+       else
+               vif->tx_zerocopy_fail++;
+}
+
+static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
+{
+       struct gnttab_unmap_grant_ref *gop;
+       pending_ring_idx_t dc, dp;
+       u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
+       unsigned int i = 0;
+
+       dc = vif->dealloc_cons;
+       gop = vif->tx_unmap_ops;
+
+       /* Free up any grants we have finished using */
+       do {
+               dp = vif->dealloc_prod;
+
+               /* Ensure we see all indices enqueued by all
+                * xenvif_zerocopy_callback().
+                */
+               smp_rmb();
+
+               while (dc != dp) {
+                       BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
+                       pending_idx =
+                               vif->dealloc_ring[pending_index(dc++)];
+
+                       pending_idx_release[gop-vif->tx_unmap_ops] =
+                               pending_idx;
+                       vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
+                               vif->mmap_pages[pending_idx];
+                       gnttab_set_unmap_op(gop,
+                                           idx_to_kaddr(vif, pending_idx),
+                                           GNTMAP_host_map,
+                                           vif->grant_tx_handle[pending_idx]);
+                       xenvif_grant_handle_reset(vif, pending_idx);
+                       ++gop;
+               }
+
+       } while (dp != vif->dealloc_prod);
+
+       vif->dealloc_cons = dc;
+
+       if (gop - vif->tx_unmap_ops > 0) {
+               int ret;
+               ret = gnttab_unmap_refs(vif->tx_unmap_ops,
+                                       NULL,
+                                       vif->pages_to_unmap,
+                                       gop - vif->tx_unmap_ops);
+               if (ret) {
+                       netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
+                                  gop - vif->tx_unmap_ops, ret);
+                       for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
+                               if (gop[i].status != GNTST_okay)
+                                       netdev_err(vif->dev,
+                                                  " host_addr: %llx handle: %x status: %d\n",
+                                                  gop[i].host_addr,
+                                                  gop[i].handle,
+                                                  gop[i].status);
+                       }
+                       BUG();
+               }
+       }
+
+       for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
+               xenvif_idx_release(vif, pending_idx_release[i],
+                                  XEN_NETIF_RSP_OKAY);
+}
+
+
 /* Called after netfront has transmitted */
 int xenvif_tx_action(struct xenvif *vif, int budget)
 {
        unsigned nr_gops;
-       int work_done;
+       int work_done, ret;
 
        if (unlikely(!tx_work_todo(vif)))
                return 0;
@@ -1394,7 +1621,11 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
        if (nr_gops == 0)
                return 0;
 
-       gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
+       ret = gnttab_map_refs(vif->tx_map_ops,
+                             NULL,
+                             vif->pages_to_map,
+                             nr_gops);
+       BUG_ON(ret);
 
        work_done = xenvif_tx_submit(vif);
 
@@ -1405,45 +1636,18 @@ static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
                               u8 status)
 {
        struct pending_tx_info *pending_tx_info;
-       pending_ring_idx_t head;
-       u16 peek; /* peek into next tx request */
-
-       BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
-
-       /* Already complete? */
-       if (vif->mmap_pages[pending_idx] == NULL)
-               return;
+       pending_ring_idx_t index;
+       unsigned long flags;
 
        pending_tx_info = &vif->pending_tx_info[pending_idx];
-
-       head = pending_tx_info->head;
-
-       BUG_ON(!pending_tx_is_head(vif, head));
-       BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
-
-       do {
-               pending_ring_idx_t index;
-               pending_ring_idx_t idx = pending_index(head);
-               u16 info_idx = vif->pending_ring[idx];
-
-               pending_tx_info = &vif->pending_tx_info[info_idx];
-               make_tx_response(vif, &pending_tx_info->req, status);
-
-               /* Setting any number other than
-                * INVALID_PENDING_RING_IDX indicates this slot is
-                * starting a new packet / ending a previous packet.
-                */
-               pending_tx_info->head = 0;
-
-               index = pending_index(vif->pending_prod++);
-               vif->pending_ring[index] = vif->pending_ring[info_idx];
-
-               peek = vif->pending_ring[pending_index(++head)];
-
-       } while (!pending_tx_is_head(vif, peek));
-
-       put_page(vif->mmap_pages[pending_idx]);
-       vif->mmap_pages[pending_idx] = NULL;
+       spin_lock_irqsave(&vif->response_lock, flags);
+       make_tx_response(vif, &pending_tx_info->req, status);
+       index = pending_index(vif->pending_prod);
+       vif->pending_ring[index] = pending_idx;
+       /* TX shouldn't use the index before we give it back here */
+       mb();
+       vif->pending_prod++;
+       spin_unlock_irqrestore(&vif->response_lock, flags);
 }
 
 
@@ -1491,23 +1695,54 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
        return resp;
 }
 
+void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
+{
+       int ret;
+       struct gnttab_unmap_grant_ref tx_unmap_op;
+
+       gnttab_set_unmap_op(&tx_unmap_op,
+                           idx_to_kaddr(vif, pending_idx),
+                           GNTMAP_host_map,
+                           vif->grant_tx_handle[pending_idx]);
+       xenvif_grant_handle_reset(vif, pending_idx);
+
+       ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
+                               &vif->mmap_pages[pending_idx], 1);
+       if (ret) {
+               netdev_err(vif->dev,
+                          "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
+                          ret,
+                          pending_idx,
+                          tx_unmap_op.host_addr,
+                          tx_unmap_op.handle,
+                          tx_unmap_op.status);
+               BUG();
+       }
+
+       xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
+}
+
 static inline int rx_work_todo(struct xenvif *vif)
 {
-       return !skb_queue_empty(&vif->rx_queue) &&
-              xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
+       return (!skb_queue_empty(&vif->rx_queue) &&
+              xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
+              vif->rx_queue_purge;
 }
 
 static inline int tx_work_todo(struct xenvif *vif)
 {
 
-       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
-           (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
-            < MAX_PENDING_REQS))
+       if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
                return 1;
 
        return 0;
 }
 
+static inline bool tx_dealloc_work_todo(struct xenvif *vif)
+{
+       return vif->dealloc_cons != vif->dealloc_prod;
+}
+
 void xenvif_unmap_frontend_rings(struct xenvif *vif)
 {
        if (vif->tx.sring)
@@ -1565,7 +1800,7 @@ static void xenvif_start_queue(struct xenvif *vif)
                netif_wake_queue(vif->dev);
 }
 
-int xenvif_kthread(void *data)
+int xenvif_kthread_guest_rx(void *data)
 {
        struct xenvif *vif = data;
        struct sk_buff *skb;
@@ -1577,12 +1812,19 @@ int xenvif_kthread(void *data)
                if (kthread_should_stop())
                        break;
 
+               if (vif->rx_queue_purge) {
+                       skb_queue_purge(&vif->rx_queue);
+                       vif->rx_queue_purge = false;
+               }
+
                if (!skb_queue_empty(&vif->rx_queue))
                        xenvif_rx_action(vif);
 
                if (skb_queue_empty(&vif->rx_queue) &&
-                   netif_queue_stopped(vif->dev))
+                   netif_queue_stopped(vif->dev)) {
+                       del_timer_sync(&vif->wake_queue);
                        xenvif_start_queue(vif);
+               }
 
                cond_resched();
        }
@@ -1594,6 +1836,28 @@ int xenvif_kthread(void *data)
        return 0;
 }
 
+int xenvif_dealloc_kthread(void *data)
+{
+       struct xenvif *vif = data;
+
+       while (!kthread_should_stop()) {
+               wait_event_interruptible(vif->dealloc_wq,
+                                        tx_dealloc_work_todo(vif) ||
+                                        kthread_should_stop());
+               if (kthread_should_stop())
+                       break;
+
+               xenvif_tx_dealloc_action(vif);
+               cond_resched();
+       }
+
+       /* Unmap anything remaining*/
+       if (tx_dealloc_work_todo(vif))
+               xenvif_tx_dealloc_action(vif);
+
+       return 0;
+}
+
 static int __init netback_init(void)
 {
        int rc = 0;
@@ -1611,6 +1875,8 @@ static int __init netback_init(void)
        if (rc)
                goto failed_init;
 
+       rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
+
        return 0;
 
 failed_init:
index e30d80033cbc4bb4f93d4e84897ff6fb29417a91..057b05700f8baa081014d80b1142e7b2a7808489 100644 (file)
@@ -658,7 +658,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
  drop:
        dev->stats.tx_dropped++;
-       dev_kfree_skb(skb);
+       dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
 }
 
@@ -1060,13 +1060,13 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
                unsigned int start;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&stats->syncp);
 
                        rx_packets = stats->rx_packets;
                        tx_packets = stats->tx_packets;
                        rx_bytes = stats->rx_bytes;
                        tx_bytes = stats->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
 
                tot->rx_packets += rx_packets;
                tot->tx_packets += tx_packets;
@@ -1282,16 +1282,10 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        np->rx_refill_timer.function = rx_refill_timeout;
 
        err = -ENOMEM;
-       np->stats = alloc_percpu(struct netfront_stats);
+       np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
        if (np->stats == NULL)
                goto exit;
 
-       for_each_possible_cpu(i) {
-               struct netfront_stats *xen_nf_stats;
-               xen_nf_stats = per_cpu_ptr(np->stats, i);
-               u64_stats_init(&xen_nf_stats->syncp);
-       }
-
        /* Initialise tx_skbs as a free chain containing every entry. */
        np->tx_skb_freelist = 0;
        for (i = 0; i < NET_TX_RING_SIZE; i++) {
index fe20e1cc0545bf4bb9700f054bddea12fb1ec643..65d4ca19d1328ec737c68684c30de01fc8347b8d 100644 (file)
@@ -26,6 +26,18 @@ config NFC_WILINK
          Say Y here to compile support for Texas Instrument's NFC WiLink driver
          into the kernel or say M to compile it as module.
 
+config NFC_TRF7970A
+       tristate "Texas Instruments TRF7970a NFC driver"
+       depends on SPI && NFC_DIGITAL
+       help
+         This option enables the NFC driver for Texas Instruments' TRF7970a
+         device. Such device supports 5 different protocols: ISO14443A,
+         ISO14443B, FeLiCa, ISO15693 and ISO18000-3.
+
+         Say Y here to compile support for TRF7970a into the kernel or
+         say M  to compile it as a module. The module will be called
+         trf7970a.ko.
+
 config NFC_MEI_PHY
        tristate "MEI bus NFC device support"
        depends on INTEL_MEI && NFC_HCI
index 56ab822ba03d9f1be61a544f975368de086b75ba..ae42a3fa60c981b965bbb0cdb004b7c3c591d01d 100644 (file)
@@ -10,5 +10,6 @@ obj-$(CONFIG_NFC_MEI_PHY)     += mei_phy.o
 obj-$(CONFIG_NFC_SIM)          += nfcsim.o
 obj-$(CONFIG_NFC_PORT100)      += port100.o
 obj-$(CONFIG_NFC_MRVL)         += nfcmrvl/
+obj-$(CONFIG_NFC_TRF7970A)     += trf7970a.o
 
 ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
index cf1a87bb74f86b405d3edc73249155f51bfb661a..d46a700a9637a9152aee5bc5c41b58eacfbe8ec5 100644 (file)
                                   NFC_PROTO_NFC_DEP_MASK)
 
 static const struct usb_device_id pn533_table[] = {
-       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE,
-         .idVendor             = PN533_VENDOR_ID,
-         .idProduct            = PN533_PRODUCT_ID,
-         .driver_info          = PN533_DEVICE_STD,
-       },
-       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE,
-         .idVendor             = SCM_VENDOR_ID,
-         .idProduct            = SCL3711_PRODUCT_ID,
-         .driver_info          = PN533_DEVICE_STD,
-       },
-       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE,
-         .idVendor             = SONY_VENDOR_ID,
-         .idProduct            = PASORI_PRODUCT_ID,
-         .driver_info          = PN533_DEVICE_PASORI,
-       },
-       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE,
-         .idVendor             = ACS_VENDOR_ID,
-         .idProduct            = ACR122U_PRODUCT_ID,
-         .driver_info          = PN533_DEVICE_ACR122U,
-       },
+       { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID),
+         .driver_info = PN533_DEVICE_STD },
+       { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID),
+         .driver_info = PN533_DEVICE_STD },
+       { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID),
+         .driver_info = PN533_DEVICE_PASORI },
+       { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID),
+         .driver_info = PN533_DEVICE_ACR122U },
        { }
 };
 MODULE_DEVICE_TABLE(usb, pn533_table);
index d6185ff2f87b0aa80aba29a55a806057301e39d4..f2acd85be86ea1d2016562c38aa4f9b6767ae754 100644 (file)
@@ -58,8 +58,19 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
 
 #define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
 
+/*
+ * Exposed through the 4 most significant bytes
+ * from the HCI SW_VERSION first byte, a.k.a.
+ * SW RomLib.
+ */
+#define PN544_HW_VARIANT_C2 0xa
+#define PN544_HW_VARIANT_C3 0xb
+
+#define PN544_FW_CMD_RESET 0x01
 #define PN544_FW_CMD_WRITE 0x08
 #define PN544_FW_CMD_CHECK 0x06
+#define PN544_FW_CMD_SECURE_WRITE 0x0C
+#define PN544_FW_CMD_SECURE_CHUNK_WRITE 0x0D
 
 struct pn544_i2c_fw_frame_write {
        u8 cmd;
@@ -88,13 +99,31 @@ struct pn544_i2c_fw_blob {
        u8 data[];
 };
 
+struct pn544_i2c_fw_secure_frame {
+       u8 cmd;
+       u16 be_datalen;
+       u8 data[];
+} __packed;
+
+struct pn544_i2c_fw_secure_blob {
+       u64 header;
+       u8 data[];
+};
+
 #define PN544_FW_CMD_RESULT_TIMEOUT 0x01
 #define PN544_FW_CMD_RESULT_BAD_CRC 0x02
 #define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08
 #define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B
 #define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11
+#define PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND 0x13
 #define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18
+#define PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR 0x19
+#define PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR 0x1D
+#define PN544_FW_CMD_RESULT_MEMORY_ERROR 0x20
+#define PN544_FW_CMD_RESULT_CHUNK_OK 0x21
 #define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74
+#define PN544_FW_CMD_RESULT_COMMAND_REJECTED 0xE0
+#define PN544_FW_CMD_RESULT_CHUNK_ERROR 0xE6
 
 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
 
@@ -104,11 +133,17 @@ struct pn544_i2c_fw_blob {
 #define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\
                                         PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\
                                         PN544_FW_WRITE_BUFFER_MAX_LEN)
+#define PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN 3
+#define PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN (PN544_FW_I2C_MAX_PAYLOAD -\
+                       PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN)
+#define PN544_FW_SECURE_FRAME_HEADER_LEN 3
+#define PN544_FW_SECURE_BLOB_HEADER_LEN 8
 
 #define FW_WORK_STATE_IDLE 1
 #define FW_WORK_STATE_START 2
 #define FW_WORK_STATE_WAIT_WRITE_ANSWER 3
 #define FW_WORK_STATE_WAIT_CHECK_ANSWER 4
+#define FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER 5
 
 struct pn544_i2c_phy {
        struct i2c_client *i2c_dev;
@@ -119,6 +154,8 @@ struct pn544_i2c_phy {
        unsigned int gpio_fw;
        unsigned int en_polarity;
 
+       u8 hw_variant;
+
        struct work_struct fw_work;
        int fw_work_state;
        char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
@@ -127,6 +164,8 @@ struct pn544_i2c_phy {
        size_t fw_blob_size;
        const u8 *fw_blob_data;
        size_t fw_written;
+       size_t fw_size;
+
        int fw_cmd_result;
 
        int powered;
@@ -390,6 +429,8 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
        switch (response.status) {
        case 0:
                return 0;
+       case PN544_FW_CMD_RESULT_CHUNK_OK:
+               return response.status;
        case PN544_FW_CMD_RESULT_TIMEOUT:
                return -ETIMEDOUT;
        case PN544_FW_CMD_RESULT_BAD_CRC:
@@ -400,9 +441,20 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
                return -EPROTO;
        case PN544_FW_CMD_RESULT_INVALID_PARAMETER:
                return -EINVAL;
+       case PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND:
+               return -ENOTSUPP;
        case PN544_FW_CMD_RESULT_INVALID_LENGTH:
                return -EBADMSG;
+       case PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR:
+               return -ENOKEY;
+       case PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR:
+               return -EINVAL;
+       case PN544_FW_CMD_RESULT_MEMORY_ERROR:
+               return -ENOMEM;
+       case PN544_FW_CMD_RESULT_COMMAND_REJECTED:
+               return -EACCES;
        case PN544_FW_CMD_RESULT_WRITE_FAILED:
+       case PN544_FW_CMD_RESULT_CHUNK_ERROR:
                return -EIO;
        default:
                return -EIO;
@@ -469,7 +521,8 @@ static struct nfc_phy_ops i2c_phy_ops = {
        .disable = pn544_hci_i2c_disable,
 };
 
-static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
+static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name,
+                                       u8 hw_variant)
 {
        struct pn544_i2c_phy *phy = phy_id;
 
@@ -477,6 +530,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
 
        strcpy(phy->firmware_name, firmware_name);
 
+       phy->hw_variant = hw_variant;
        phy->fw_work_state = FW_WORK_STATE_START;
 
        schedule_work(&phy->fw_work);
@@ -598,12 +652,93 @@ static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy)
        return 0;
 }
 
+static int pn544_hci_i2c_fw_secure_write_frame_cmd(struct pn544_i2c_phy *phy,
+                                       const u8 *data, u16 datalen)
+{
+       u8 buf[PN544_FW_I2C_MAX_PAYLOAD];
+       struct pn544_i2c_fw_secure_frame *chunk;
+       int chunklen;
+       int r;
+
+       if (datalen > PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN)
+               datalen = PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN;
+
+       chunk = (struct pn544_i2c_fw_secure_frame *) buf;
+
+       chunk->cmd = PN544_FW_CMD_SECURE_CHUNK_WRITE;
+
+       put_unaligned_be16(datalen, &chunk->be_datalen);
+
+       memcpy(chunk->data, data, datalen);
+
+       chunklen = sizeof(chunk->cmd) + sizeof(chunk->be_datalen) + datalen;
+
+       r = i2c_master_send(phy->i2c_dev, buf, chunklen);
+
+       if (r == chunklen)
+               return datalen;
+       else if (r < 0)
+               return r;
+       else
+               return -EIO;
+
+}
+
+static int pn544_hci_i2c_fw_secure_write_frame(struct pn544_i2c_phy *phy)
+{
+       struct pn544_i2c_fw_secure_frame *framep;
+       int r;
+
+       framep = (struct pn544_i2c_fw_secure_frame *) phy->fw_blob_data;
+       if (phy->fw_written == 0)
+               phy->fw_blob_size = get_unaligned_be16(&framep->be_datalen)
+                               + PN544_FW_SECURE_FRAME_HEADER_LEN;
+
+       /* Only secure write command can be chunked*/
+       if (phy->fw_blob_size > PN544_FW_I2C_MAX_PAYLOAD &&
+                       framep->cmd != PN544_FW_CMD_SECURE_WRITE)
+               return -EINVAL;
+
+       /* The firmware also have other commands, we just send them directly */
+       if (phy->fw_blob_size < PN544_FW_I2C_MAX_PAYLOAD) {
+               r = i2c_master_send(phy->i2c_dev,
+                       (const char *) phy->fw_blob_data, phy->fw_blob_size);
+
+               if (r == phy->fw_blob_size)
+                       goto exit;
+               else if (r < 0)
+                       return r;
+               else
+                       return -EIO;
+       }
+
+       r = pn544_hci_i2c_fw_secure_write_frame_cmd(phy,
+                                      phy->fw_blob_data + phy->fw_written,
+                                      phy->fw_blob_size - phy->fw_written);
+       if (r < 0)
+               return r;
+
+exit:
+       phy->fw_written += r;
+       phy->fw_work_state = FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER;
+
+       /* SW reset command will not trig any response from PN544 */
+       if (framep->cmd == PN544_FW_CMD_RESET) {
+               pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE);
+               phy->fw_cmd_result = 0;
+               schedule_work(&phy->fw_work);
+       }
+
+       return 0;
+}
+
 static void pn544_hci_i2c_fw_work(struct work_struct *work)
 {
        struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
                                                fw_work);
        int r;
        struct pn544_i2c_fw_blob *blob;
+       struct pn544_i2c_fw_secure_blob *secure_blob;
 
        switch (phy->fw_work_state) {
        case FW_WORK_STATE_START:
@@ -614,13 +749,29 @@ static void pn544_hci_i2c_fw_work(struct work_struct *work)
                if (r < 0)
                        goto exit_state_start;
 
-               blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
-               phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
-               phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr);
-               phy->fw_blob_data = blob->data;
-
                phy->fw_written = 0;
-               r = pn544_hci_i2c_fw_write_chunk(phy);
+
+               switch (phy->hw_variant) {
+               case PN544_HW_VARIANT_C2:
+                       blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
+                       phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+                       phy->fw_blob_dest_addr = get_unaligned_be32(
+                                                       &blob->be_destaddr);
+                       phy->fw_blob_data = blob->data;
+
+                       r = pn544_hci_i2c_fw_write_chunk(phy);
+                       break;
+               case PN544_HW_VARIANT_C3:
+                       secure_blob = (struct pn544_i2c_fw_secure_blob *)
+                                                               phy->fw->data;
+                       phy->fw_blob_data = secure_blob->data;
+                       phy->fw_size = phy->fw->size;
+                       r = pn544_hci_i2c_fw_secure_write_frame(phy);
+                       break;
+               default:
+                       r = -ENOTSUPP;
+                       break;
+               }
 
 exit_state_start:
                if (r < 0)
@@ -672,6 +823,35 @@ exit_state_wait_check_answer:
                        pn544_hci_i2c_fw_work_complete(phy, r);
                break;
 
+       case FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER:
+               r = phy->fw_cmd_result;
+               if (r < 0)
+                       goto exit_state_wait_secure_write_answer;
+
+               if (r == PN544_FW_CMD_RESULT_CHUNK_OK) {
+                       r = pn544_hci_i2c_fw_secure_write_frame(phy);
+                       goto exit_state_wait_secure_write_answer;
+               }
+
+               if (phy->fw_written == phy->fw_blob_size) {
+                       secure_blob = (struct pn544_i2c_fw_secure_blob *)
+                               (phy->fw_blob_data + phy->fw_blob_size);
+                       phy->fw_size -= phy->fw_blob_size +
+                               PN544_FW_SECURE_BLOB_HEADER_LEN;
+                       if (phy->fw_size >= PN544_FW_SECURE_BLOB_HEADER_LEN
+                                       + PN544_FW_SECURE_FRAME_HEADER_LEN) {
+                               phy->fw_blob_data = secure_blob->data;
+
+                               phy->fw_written = 0;
+                               r = pn544_hci_i2c_fw_secure_write_frame(phy);
+                       }
+               }
+
+exit_state_wait_secure_write_answer:
+               if (r < 0 || phy->fw_size == 0)
+                       pn544_hci_i2c_fw_work_complete(phy, r);
+               break;
+
        default:
                break;
        }
index 3df4a109cfadfb36a5ccd08e788feef43da0048e..9c8051d20cea98fa4377edb79167ddef2b14cf24 100644 (file)
@@ -786,7 +786,7 @@ static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
        if (info->fw_download == NULL)
                return -ENOTSUPP;
 
-       return info->fw_download(info->phy_id, firmware_name);
+       return info->fw_download(info->phy_id, firmware_name, hdev->sw_romlib);
 }
 
 static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
index 491bf45da35822f706401bd25bbf9b9eaaa78e99..2aa9233e808647d9e7581aa9326b313696d85380 100644 (file)
@@ -25,7 +25,8 @@
 #define PN544_HCI_MODE 0
 #define PN544_FW_MODE 1
 
-typedef int (*fw_download_t)(void *context, const char *firmware_name);
+typedef int (*fw_download_t)(void *context, const char *firmware_name,
+                               u8 hw_variant);
 
 int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                    int phy_headroom, int phy_tailroom, int phy_payload,
index a8555f81cbbac2ce4dac42c509d898228cc451f3..b7a372af5eb75c7a99a108e76faedd8c7bb7c1c4 100644 (file)
@@ -27,7 +27,8 @@
 #define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK    | \
                           NFC_PROTO_MIFARE_MASK   | \
                           NFC_PROTO_FELICA_MASK   | \
-                          NFC_PROTO_NFC_DEP_MASK)
+                          NFC_PROTO_NFC_DEP_MASK  | \
+                          NFC_PROTO_ISO14443_MASK)
 
 #define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
                              NFC_DIGITAL_DRV_CAPS_TG_CRC)
@@ -139,6 +140,8 @@ static const struct port100_in_rf_setting in_rf_settings[] = {
                .in_recv_set_number = 15,
                .in_recv_comm_type  = PORT100_COMM_TYPE_IN_106A,
        },
+       /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
+       [NFC_DIGITAL_RF_TECH_LAST] = { 0 },
 };
 
 /**
@@ -174,6 +177,9 @@ static const struct port100_tg_rf_setting tg_rf_settings[] = {
                .tg_set_number = 8,
                .tg_comm_type = PORT100_COMM_TYPE_TG_424F,
        },
+       /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */
+       [NFC_DIGITAL_RF_TECH_LAST] = { 0 },
+
 };
 
 #define PORT100_IN_PROT_INITIAL_GUARD_TIME      0x00
@@ -293,6 +299,10 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
                { PORT100_IN_PROT_CHECK_CRC, 0 },
                { PORT100_IN_PROT_END,       0 },
        },
+       [NFC_DIGITAL_FRAMING_NFCA_T4T] = {
+               /* nfc_digital_framing_nfca_standard_with_crc_a */
+               { PORT100_IN_PROT_END,       0 },
+       },
        [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
                /* nfc_digital_framing_nfca_standard */
                { PORT100_IN_PROT_END, 0 },
@@ -330,6 +340,10 @@ in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
        [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
                { PORT100_IN_PROT_END, 0 },
        },
+       /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
+       [NFC_DIGITAL_FRAMING_LAST] = {
+               { PORT100_IN_PROT_END, 0 },
+       },
 };
 
 static struct port100_protocol
@@ -371,6 +385,10 @@ tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
                { PORT100_TG_PROT_RF_OFF, 1 },
                { PORT100_TG_PROT_END,    0 },
        },
+       /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */
+       [NFC_DIGITAL_FRAMING_LAST] = {
+               { PORT100_TG_PROT_END,    0 },
+       },
 };
 
 struct port100 {
@@ -1356,10 +1374,7 @@ static struct nfc_digital_ops port100_digital_ops = {
 };
 
 static const struct usb_device_id port100_table[] = {
-       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE,
-         .idVendor             = SONY_VENDOR_ID,
-         .idProduct            = RCS380_PRODUCT_ID,
-       },
+       { USB_DEVICE(SONY_VENDOR_ID, RCS380_PRODUCT_ID), },
        { }
 };
 MODULE_DEVICE_TABLE(usb, port100_table);
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
new file mode 100644 (file)
index 0000000..d9babe9
--- /dev/null
@@ -0,0 +1,1370 @@
+/*
+ * TI TRF7970a RFID/NFC Transceiver Driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Erick Macias <emacias@ti.com>
+ * Author: Felipe Balbi <balbi@ti.com>
+ * Author: Mark A. Greer <mgreer@animalcreek.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/nfc.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/digital.h>
+
+/* There are 3 ways the host can communicate with the trf7970a:
+ * parallel mode, SPI with Slave Select (SS) mode, and SPI without
+ * SS mode.  The driver only supports the two SPI modes.
+ *
+ * The trf7970a is very timing sensitive and the VIN, EN2, and EN
+ * pins must asserted in that order and with specific delays in between.
+ * The delays used in the driver were provided by TI and have been
+ * confirmed to work with this driver.
+ *
+ * Timeouts are implemented using the delayed workqueue kernel facility.
+ * Timeouts are required so things don't hang when there is no response
+ * from the trf7970a (or tag).  Using this mechanism creates a race with
+ * interrupts, however.  That is, an interrupt and a timeout could occur
+ * closely enough together that one is blocked by the mutex while the other
+ * executes.  When the timeout handler executes first and blocks the
+ * interrupt handler, it will eventually set the state to IDLE so the
+ * interrupt handler will check the state and exit with no harm done.
+ * When the interrupt handler executes first and blocks the timeout handler,
+ * the cancel_delayed_work() call will know that it didn't cancel the
+ * work item (i.e., timeout) and will return zero.  That return code is
+ * used by the timer handler to indicate that it should ignore the timeout
+ * once its unblocked.
+ *
+ * Aborting an active command isn't as simple as it seems because the only
+ * way to abort a command that's already been sent to the tag is so turn
+ * off power to the tag.  If we do that, though, we'd have to go through
+ * the entire anticollision procedure again but the digital layer doesn't
+ * support that.  So, if an abort is received before trf7970a_in_send_cmd()
+ * has sent the command to the tag, it simply returns -ECANCELED.  If the
+ * command has already been sent to the tag, then the driver continues
+ * normally and recieves the response data (or error) but just before
+ * sending the data upstream, it frees the rx_skb and sends -ECANCELED
+ * upstream instead.  If the command failed, that error will be sent
+ * upstream.
+ *
+ * When recieving data from a tag and the interrupt status register has
+ * only the SRX bit set, it means that all of the data has been received
+ * (once what's in the fifo has been read).  However, depending on timing
+ * an interrupt status with only the SRX bit set may not be recived.  In
+ * those cases, the timeout mechanism is used to wait 5 ms in case more
+ * data arrives.  After 5 ms, it is assumed that all of the data has been
+ * received and the accumulated rx data is sent upstream.  The
+ * 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose
+ * (i.e., it indicates that some data has been received but we're not sure
+ * if there is more coming so a timeout in this state means all data has
+ * been received and there isn't an error).  The delay is 5 ms since delays
+ * over 2 ms have been observed during testing (a little extra just in case).
+ *
+ * Type 2 write and sector select commands respond with a 4-bit ACK or NACK.
+ * Having only 4 bits in the FIFO won't normally generate an interrupt so
+ * driver enables the '4_bit_RX' bit of the Special Functions register 1
+ * to cause an interrupt in that case.  Leaving that bit for a read command
+ * messes up the data returned so it is only enabled when the framing is
+ * 'NFC_DIGITAL_FRAMING_NFCA_T2T' and the command is not a read command.
+ * Unfortunately, that means that the driver has to peek into tx frames
+ * when the framing is 'NFC_DIGITAL_FRAMING_NFCA_T2T'.  This is done by
+ * the trf7970a_per_cmd_config() routine.
+ *
+ * ISO/IEC 15693 frames specify whether to use single or double sub-carrier
+ * frequencies and whether to use low or high data rates in the flags byte
+ * of the frame.  This means that the driver has to peek at all 15693 frames
+ * to determine what speed to set the communication to.  In addition, write
+ * and lock commands use the OPTION flag to indicate that an EOF must be
+ * sent to the tag before it will send its response.  So the driver has to
+ * examine all frames for that reason too.
+ *
+ * It is unclear how long to wait before sending the EOF.  According to the
+ * Note under Table 1-1 in section 1.6 of
+ * http://www.ti.com/lit/ug/scbu011/scbu011.pdf, that wait should be at least
+ * 10 ms for TI Tag-it HF-I tags; however testing has shown that is not long
+ * enough.  For this reason, the driver waits 20 ms which seems to work
+ * reliably.
+ */
+
+#define TRF7970A_SUPPORTED_PROTOCOLS \
+               (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK |      \
+                NFC_PROTO_ISO15693_MASK)
+
+/* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
+ * on what the current framing is, the address of the TX length byte 1
+ * register (0x1d), and the 2 byte length of the data to be transmitted.
+ * That totals 5 bytes.
+ */
+#define TRF7970A_TX_SKB_HEADROOM               5
+
+#define TRF7970A_RX_SKB_ALLOC_SIZE             256
+
+#define TRF7970A_FIFO_SIZE                     128
+
+/* TX length is 3 nibbles long ==> 4KB - 1 bytes max */
+#define TRF7970A_TX_MAX                                (4096 - 1)
+
+#define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT      5
+#define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT   3
+#define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF    20
+
+/* Quirks */
+/* Erratum: When reading IRQ Status register on trf7970a, we must issue a
+ * read continuous command for IRQ Status and Collision Position registers.
+ */
+#define TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA  BIT(0)
+
+/* Direct commands */
+#define TRF7970A_CMD_IDLE                      0x00
+#define TRF7970A_CMD_SOFT_INIT                 0x03
+#define TRF7970A_CMD_RF_COLLISION              0x04
+#define TRF7970A_CMD_RF_COLLISION_RESPONSE_N   0x05
+#define TRF7970A_CMD_RF_COLLISION_RESPONSE_0   0x06
+#define TRF7970A_CMD_FIFO_RESET                        0x0f
+#define TRF7970A_CMD_TRANSMIT_NO_CRC           0x10
+#define TRF7970A_CMD_TRANSMIT                  0x11
+#define TRF7970A_CMD_DELAY_TRANSMIT_NO_CRC     0x12
+#define TRF7970A_CMD_DELAY_TRANSMIT            0x13
+#define TRF7970A_CMD_EOF                       0x14
+#define TRF7970A_CMD_CLOSE_SLOT                        0x15
+#define TRF7970A_CMD_BLOCK_RX                  0x16
+#define TRF7970A_CMD_ENABLE_RX                 0x17
+#define TRF7970A_CMD_TEST_EXT_RF               0x18
+#define TRF7970A_CMD_TEST_INT_RF               0x19
+#define TRF7970A_CMD_RX_GAIN_ADJUST            0x1a
+
+/* Bits determining whether its a direct command or register R/W,
+ * whether to use a continuous SPI transaction or not, and the actual
+ * direct cmd opcode or regster address.
+ */
+#define TRF7970A_CMD_BIT_CTRL                  BIT(7)
+#define TRF7970A_CMD_BIT_RW                    BIT(6)
+#define TRF7970A_CMD_BIT_CONTINUOUS            BIT(5)
+#define TRF7970A_CMD_BIT_OPCODE(opcode)                ((opcode) & 0x1f)
+
+/* Registers addresses */
+#define TRF7970A_CHIP_STATUS_CTRL              0x00
+#define TRF7970A_ISO_CTRL                      0x01
+#define TRF7970A_ISO14443B_TX_OPTIONS          0x02
+#define TRF7970A_ISO14443A_HIGH_BITRATE_OPTIONS        0x03
+#define TRF7970A_TX_TIMER_SETTING_H_BYTE       0x04
+#define TRF7970A_TX_TIMER_SETTING_L_BYTE       0x05
+#define TRF7970A_TX_PULSE_LENGTH_CTRL          0x06
+#define TRF7970A_RX_NO_RESPONSE_WAIT           0x07
+#define TRF7970A_RX_WAIT_TIME                  0x08
+#define TRF7970A_MODULATOR_SYS_CLK_CTRL                0x09
+#define TRF7970A_RX_SPECIAL_SETTINGS           0x0a
+#define TRF7970A_REG_IO_CTRL                   0x0b
+#define TRF7970A_IRQ_STATUS                    0x0c
+#define TRF7970A_COLLISION_IRQ_MASK            0x0d
+#define TRF7970A_COLLISION_POSITION            0x0e
+#define TRF7970A_RSSI_OSC_STATUS               0x0f
+#define TRF7970A_SPECIAL_FCN_REG1              0x10
+#define TRF7970A_SPECIAL_FCN_REG2              0x11
+#define TRF7970A_RAM1                          0x12
+#define TRF7970A_RAM2                          0x13
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS     0x14
+#define TRF7970A_NFC_LOW_FIELD_LEVEL           0x16
+#define TRF7970A_NFCID1                                0x17
+#define TRF7970A_NFC_TARGET_LEVEL              0x18
+#define TRF79070A_NFC_TARGET_PROTOCOL          0x19
+#define TRF7970A_TEST_REGISTER1                        0x1a
+#define TRF7970A_TEST_REGISTER2                        0x1b
+#define TRF7970A_FIFO_STATUS                   0x1c
+#define TRF7970A_TX_LENGTH_BYTE1               0x1d
+#define TRF7970A_TX_LENGTH_BYTE2               0x1e
+#define TRF7970A_FIFO_IO_REGISTER              0x1f
+
+/* Chip Status Control Register Bits */
+#define TRF7970A_CHIP_STATUS_VRS5_3            BIT(0)
+#define TRF7970A_CHIP_STATUS_REC_ON            BIT(1)
+#define TRF7970A_CHIP_STATUS_AGC_ON            BIT(2)
+#define TRF7970A_CHIP_STATUS_PM_ON             BIT(3)
+#define TRF7970A_CHIP_STATUS_RF_PWR            BIT(4)
+#define TRF7970A_CHIP_STATUS_RF_ON             BIT(5)
+#define TRF7970A_CHIP_STATUS_DIRECT            BIT(6)
+#define TRF7970A_CHIP_STATUS_STBY              BIT(7)
+
+/* ISO Control Register Bits */
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_662   0x00
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_662 0x01
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648  0x02
+#define TRF7970A_ISO_CTRL_15693_SGL_1OF256_2648        0x03
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a  0x04
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_667 0x05
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669  0x06
+#define TRF7970A_ISO_CTRL_15693_DBL_1OF256_2669        0x07
+#define TRF7970A_ISO_CTRL_14443A_106           0x08
+#define TRF7970A_ISO_CTRL_14443A_212           0x09
+#define TRF7970A_ISO_CTRL_14443A_424           0x0a
+#define TRF7970A_ISO_CTRL_14443A_848           0x0b
+#define TRF7970A_ISO_CTRL_14443B_106           0x0c
+#define TRF7970A_ISO_CTRL_14443B_212           0x0d
+#define TRF7970A_ISO_CTRL_14443B_424           0x0e
+#define TRF7970A_ISO_CTRL_14443B_848           0x0f
+#define TRF7970A_ISO_CTRL_FELICA_212           0x1a
+#define TRF7970A_ISO_CTRL_FELICA_424           0x1b
+#define TRF7970A_ISO_CTRL_RFID                 BIT(5)
+#define TRF7970A_ISO_CTRL_DIR_MODE             BIT(6)
+#define TRF7970A_ISO_CTRL_RX_CRC_N             BIT(7)  /* true == No CRC */
+
+#define TRF7970A_ISO_CTRL_RFID_SPEED_MASK      0x1f
+
+/* Modulator and SYS_CLK Control Register Bits */
+#define TRF7970A_MODULATOR_DEPTH(n)            ((n) & 0x7)
+#define TRF7970A_MODULATOR_DEPTH_ASK10         (TRF7970A_MODULATOR_DEPTH(0))
+#define TRF7970A_MODULATOR_DEPTH_OOK           (TRF7970A_MODULATOR_DEPTH(1))
+#define TRF7970A_MODULATOR_DEPTH_ASK7          (TRF7970A_MODULATOR_DEPTH(2))
+#define TRF7970A_MODULATOR_DEPTH_ASK8_5                (TRF7970A_MODULATOR_DEPTH(3))
+#define TRF7970A_MODULATOR_DEPTH_ASK13         (TRF7970A_MODULATOR_DEPTH(4))
+#define TRF7970A_MODULATOR_DEPTH_ASK16         (TRF7970A_MODULATOR_DEPTH(5))
+#define TRF7970A_MODULATOR_DEPTH_ASK22         (TRF7970A_MODULATOR_DEPTH(6))
+#define TRF7970A_MODULATOR_DEPTH_ASK30         (TRF7970A_MODULATOR_DEPTH(7))
+#define TRF7970A_MODULATOR_EN_ANA              BIT(3)
+#define TRF7970A_MODULATOR_CLK(n)              (((n) & 0x3) << 4)
+#define TRF7970A_MODULATOR_CLK_DISABLED                (TRF7970A_MODULATOR_CLK(0))
+#define TRF7970A_MODULATOR_CLK_3_6             (TRF7970A_MODULATOR_CLK(1))
+#define TRF7970A_MODULATOR_CLK_6_13            (TRF7970A_MODULATOR_CLK(2))
+#define TRF7970A_MODULATOR_CLK_13_27           (TRF7970A_MODULATOR_CLK(3))
+#define TRF7970A_MODULATOR_EN_OOK              BIT(6)
+#define TRF7970A_MODULATOR_27MHZ               BIT(7)
+
+/* IRQ Status Register Bits */
+#define TRF7970A_IRQ_STATUS_NORESP             BIT(0) /* ISO15693 only */
+#define TRF7970A_IRQ_STATUS_COL                        BIT(1)
+#define TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR  BIT(2)
+#define TRF7970A_IRQ_STATUS_PARITY_ERROR       BIT(3)
+#define TRF7970A_IRQ_STATUS_CRC_ERROR          BIT(4)
+#define TRF7970A_IRQ_STATUS_FIFO               BIT(5)
+#define TRF7970A_IRQ_STATUS_SRX                        BIT(6)
+#define TRF7970A_IRQ_STATUS_TX                 BIT(7)
+
+#define TRF7970A_IRQ_STATUS_ERROR                              \
+               (TRF7970A_IRQ_STATUS_COL |                      \
+                TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR |        \
+                TRF7970A_IRQ_STATUS_PARITY_ERROR |             \
+                TRF7970A_IRQ_STATUS_CRC_ERROR)
+
+#define TRF7970A_SPECIAL_FCN_REG1_COL_7_6              BIT(0)
+#define TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL          BIT(1)
+#define TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX             BIT(2)
+#define TRF7970A_SPECIAL_FCN_REG1_SP_DIR_MODE          BIT(3)
+#define TRF7970A_SPECIAL_FCN_REG1_NEXT_SLOT_37US       BIT(4)
+#define TRF7970A_SPECIAL_FCN_REG1_PAR43                        BIT(5)
+
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_124     (0x0 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_120     (0x1 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_112     (0x2 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96      (0x3 << 2)
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_4       0x0
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_8       0x1
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_16      0x2
+#define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32      0x3
+
+#define TRF7970A_FIFO_STATUS_OVERFLOW          BIT(7)
+
+/* NFC (ISO/IEC 14443A) Type 2 Tag commands */
+#define NFC_T2T_CMD_READ                       0x30
+
+/* ISO 15693 commands codes */
+#define ISO15693_CMD_INVENTORY                 0x01
+#define ISO15693_CMD_READ_SINGLE_BLOCK         0x20
+#define ISO15693_CMD_WRITE_SINGLE_BLOCK                0x21
+#define ISO15693_CMD_LOCK_BLOCK                        0x22
+#define ISO15693_CMD_READ_MULTIPLE_BLOCK       0x23
+#define ISO15693_CMD_WRITE_MULTIPLE_BLOCK      0x24
+#define ISO15693_CMD_SELECT                    0x25
+#define ISO15693_CMD_RESET_TO_READY            0x26
+#define ISO15693_CMD_WRITE_AFI                 0x27
+#define ISO15693_CMD_LOCK_AFI                  0x28
+#define ISO15693_CMD_WRITE_DSFID               0x29
+#define ISO15693_CMD_LOCK_DSFID                        0x2a
+#define ISO15693_CMD_GET_SYSTEM_INFO           0x2b
+#define ISO15693_CMD_GET_MULTIPLE_BLOCK_SECURITY_STATUS        0x2c
+
+/* ISO 15693 request and response flags */
+#define ISO15693_REQ_FLAG_SUB_CARRIER          BIT(0)
+#define ISO15693_REQ_FLAG_DATA_RATE            BIT(1)
+#define ISO15693_REQ_FLAG_INVENTORY            BIT(2)
+#define ISO15693_REQ_FLAG_PROTOCOL_EXT         BIT(3)
+#define ISO15693_REQ_FLAG_SELECT               BIT(4)
+#define ISO15693_REQ_FLAG_AFI                  BIT(4)
+#define ISO15693_REQ_FLAG_ADDRESS              BIT(5)
+#define ISO15693_REQ_FLAG_NB_SLOTS             BIT(5)
+#define ISO15693_REQ_FLAG_OPTION               BIT(6)
+
+#define ISO15693_REQ_FLAG_SPEED_MASK \
+               (ISO15693_REQ_FLAG_SUB_CARRIER | ISO15693_REQ_FLAG_DATA_RATE)
+
+enum trf7970a_state {
+       TRF7970A_ST_OFF,
+       TRF7970A_ST_IDLE,
+       TRF7970A_ST_IDLE_RX_BLOCKED,
+       TRF7970A_ST_WAIT_FOR_TX_FIFO,
+       TRF7970A_ST_WAIT_FOR_RX_DATA,
+       TRF7970A_ST_WAIT_FOR_RX_DATA_CONT,
+       TRF7970A_ST_WAIT_TO_ISSUE_EOF,
+       TRF7970A_ST_MAX
+};
+
+struct trf7970a {
+       enum trf7970a_state             state;
+       struct device                   *dev;
+       struct spi_device               *spi;
+       struct regulator                *regulator;
+       struct nfc_digital_dev          *ddev;
+       u32                             quirks;
+       bool                            powering_up;
+       bool                            aborting;
+       struct sk_buff                  *tx_skb;
+       struct sk_buff                  *rx_skb;
+       nfc_digital_cmd_complete_t      cb;
+       void                            *cb_arg;
+       u8                              iso_ctrl;
+       u8                              special_fcn_reg1;
+       int                             technology;
+       int                             framing;
+       u8                              tx_cmd;
+       bool                            issue_eof;
+       int                             en2_gpio;
+       int                             en_gpio;
+       struct mutex                    lock;
+       unsigned int                    timeout;
+       bool                            ignore_timeout;
+       struct delayed_work             timeout_work;
+};
+
+
+static int trf7970a_cmd(struct trf7970a *trf, u8 opcode)
+{
+       u8 cmd = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(opcode);
+       int ret;
+
+       dev_dbg(trf->dev, "cmd: 0x%x\n", cmd);
+
+       ret = spi_write(trf->spi, &cmd, 1);
+       if (ret)
+               dev_err(trf->dev, "%s - cmd: 0x%x, ret: %d\n", __func__, cmd,
+                               ret);
+       return ret;
+}
+
+static int trf7970a_read(struct trf7970a *trf, u8 reg, u8 *val)
+{
+       u8 addr = TRF7970A_CMD_BIT_RW | reg;
+       int ret;
+
+       ret = spi_write_then_read(trf->spi, &addr, 1, val, 1);
+       if (ret)
+               dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
+                               ret);
+
+       dev_dbg(trf->dev, "read(0x%x): 0x%x\n", addr, *val);
+
+       return ret;
+}
+
+static int trf7970a_read_cont(struct trf7970a *trf, u8 reg,
+               u8 *buf, size_t len)
+{
+       u8 addr = reg | TRF7970A_CMD_BIT_RW | TRF7970A_CMD_BIT_CONTINUOUS;
+       int ret;
+
+       dev_dbg(trf->dev, "read_cont(0x%x, %zd)\n", addr, len);
+
+       ret = spi_write_then_read(trf->spi, &addr, 1, buf, len);
+       if (ret)
+               dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr,
+                               ret);
+       return ret;
+}
+
+static int trf7970a_write(struct trf7970a *trf, u8 reg, u8 val)
+{
+       u8 buf[2] = { reg, val };
+       int ret;
+
+       dev_dbg(trf->dev, "write(0x%x): 0x%x\n", reg, val);
+
+       ret = spi_write(trf->spi, buf, 2);
+       if (ret)
+               dev_err(trf->dev, "%s - write: 0x%x 0x%x, ret: %d\n", __func__,
+                               buf[0], buf[1], ret);
+
+       return ret;
+}
+
+static int trf7970a_read_irqstatus(struct trf7970a *trf, u8 *status)
+{
+       int ret;
+       u8 buf[2];
+       u8 addr;
+
+       addr = TRF7970A_IRQ_STATUS | TRF7970A_CMD_BIT_RW;
+
+       if (trf->quirks & TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA) {
+               addr |= TRF7970A_CMD_BIT_CONTINUOUS;
+               ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2);
+       } else {
+               ret = spi_write_then_read(trf->spi, &addr, 1, buf, 1);
+       }
+
+       if (ret)
+               dev_err(trf->dev, "%s - irqstatus: Status read failed: %d\n",
+                               __func__, ret);
+       else
+               *status = buf[0];
+
+       return ret;
+}
+
+static void trf7970a_send_upstream(struct trf7970a *trf)
+{
+       u8 rssi;
+
+       dev_kfree_skb_any(trf->tx_skb);
+       trf->tx_skb = NULL;
+
+       if (trf->rx_skb && !IS_ERR(trf->rx_skb) && !trf->aborting)
+               print_hex_dump_debug("trf7970a rx data: ", DUMP_PREFIX_NONE,
+                               16, 1, trf->rx_skb->data, trf->rx_skb->len,
+                               false);
+
+       /* According to the manual it is "good form" to reset the fifo and
+        * read the RSSI levels & oscillator status register here.  It doesn't
+        * explain why.
+        */
+       trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+       trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi);
+
+       trf->state = TRF7970A_ST_IDLE;
+
+       if (trf->aborting) {
+               dev_dbg(trf->dev, "Abort process complete\n");
+
+               if (!IS_ERR(trf->rx_skb)) {
+                       kfree_skb(trf->rx_skb);
+                       trf->rx_skb = ERR_PTR(-ECANCELED);
+               }
+
+               trf->aborting = false;
+       }
+
+       trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb);
+
+       trf->rx_skb = NULL;
+}
+
+static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno)
+{
+       dev_dbg(trf->dev, "Error - state: %d, errno: %d\n", trf->state, errno);
+
+       kfree_skb(trf->rx_skb);
+       trf->rx_skb = ERR_PTR(errno);
+
+       trf7970a_send_upstream(trf);
+}
+
+static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb,
+               unsigned int len)
+{
+       unsigned int timeout;
+       int ret;
+
+       print_hex_dump_debug("trf7970a tx data: ", DUMP_PREFIX_NONE,
+                       16, 1, skb->data, len, false);
+
+       ret = spi_write(trf->spi, skb->data, len);
+       if (ret) {
+               dev_err(trf->dev, "%s - Can't send tx data: %d\n", __func__,
+                               ret);
+               return ret;
+       }
+
+       skb_pull(skb, len);
+
+       if (skb->len > 0) {
+               trf->state = TRF7970A_ST_WAIT_FOR_TX_FIFO;
+               timeout = TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT;
+       } else {
+               if (trf->issue_eof) {
+                       trf->state = TRF7970A_ST_WAIT_TO_ISSUE_EOF;
+                       timeout = TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF;
+               } else {
+                       trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
+                       timeout = trf->timeout;
+               }
+       }
+
+       dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n", timeout,
+                       trf->state);
+
+       schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout));
+
+       return 0;
+}
+
+static void trf7970a_fill_fifo(struct trf7970a *trf)
+{
+       struct sk_buff *skb = trf->tx_skb;
+       unsigned int len;
+       int ret;
+       u8 fifo_bytes;
+
+       ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
+       if (ret) {
+               trf7970a_send_err_upstream(trf, ret);
+               return;
+       }
+
+       dev_dbg(trf->dev, "Filling FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
+
+       if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) {
+               dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__,
+                               fifo_bytes);
+               trf7970a_send_err_upstream(trf, -EIO);
+               return;
+       }
+
+       /* Calculate how much more data can be written to the fifo */
+       len = TRF7970A_FIFO_SIZE - fifo_bytes;
+       len = min(skb->len, len);
+
+       ret = trf7970a_transmit(trf, skb, len);
+       if (ret)
+               trf7970a_send_err_upstream(trf, ret);
+}
+
+static void trf7970a_drain_fifo(struct trf7970a *trf, u8 status)
+{
+       struct sk_buff *skb = trf->rx_skb;
+       int ret;
+       u8 fifo_bytes;
+
+       if (status & TRF7970A_IRQ_STATUS_ERROR) {
+               trf7970a_send_err_upstream(trf, -EIO);
+               return;
+       }
+
+       ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes);
+       if (ret) {
+               trf7970a_send_err_upstream(trf, ret);
+               return;
+       }
+
+       dev_dbg(trf->dev, "Draining FIFO - fifo_bytes: 0x%x\n", fifo_bytes);
+
+       if (!fifo_bytes)
+               goto no_rx_data;
+
+       if (fifo_bytes & TRF7970A_FIFO_STATUS_OVERFLOW) {
+               dev_err(trf->dev, "%s - fifo overflow: 0x%x\n", __func__,
+                               fifo_bytes);
+               trf7970a_send_err_upstream(trf, -EIO);
+               return;
+       }
+
+       if (fifo_bytes > skb_tailroom(skb)) {
+               skb = skb_copy_expand(skb, skb_headroom(skb),
+                               max_t(int, fifo_bytes,
+                                       TRF7970A_RX_SKB_ALLOC_SIZE),
+                               GFP_KERNEL);
+               if (!skb) {
+                       trf7970a_send_err_upstream(trf, -ENOMEM);
+                       return;
+               }
+
+               kfree_skb(trf->rx_skb);
+               trf->rx_skb = skb;
+       }
+
+       ret = trf7970a_read_cont(trf, TRF7970A_FIFO_IO_REGISTER,
+                       skb_put(skb, fifo_bytes), fifo_bytes);
+       if (ret) {
+               trf7970a_send_err_upstream(trf, ret);
+               return;
+       }
+
+       /* If received Type 2 ACK/NACK, shift right 4 bits and pass up */
+       if ((trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T) && (skb->len == 1) &&
+                       (trf->special_fcn_reg1 ==
+                                TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX)) {
+               skb->data[0] >>= 4;
+               status = TRF7970A_IRQ_STATUS_SRX;
+       } else {
+               trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA_CONT;
+       }
+
+no_rx_data:
+       if (status == TRF7970A_IRQ_STATUS_SRX) { /* Receive complete */
+               trf7970a_send_upstream(trf);
+               return;
+       }
+
+       dev_dbg(trf->dev, "Setting timeout for %d ms\n",
+                       TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT);
+
+       schedule_delayed_work(&trf->timeout_work,
+                       msecs_to_jiffies(TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT));
+}
+
+static irqreturn_t trf7970a_irq(int irq, void *dev_id)
+{
+       struct trf7970a *trf = dev_id;
+       int ret;
+       u8 status;
+
+       mutex_lock(&trf->lock);
+
+       if (trf->state == TRF7970A_ST_OFF) {
+               mutex_unlock(&trf->lock);
+               return IRQ_NONE;
+       }
+
+       ret = trf7970a_read_irqstatus(trf, &status);
+       if (ret) {
+               mutex_unlock(&trf->lock);
+               return IRQ_NONE;
+       }
+
+       dev_dbg(trf->dev, "IRQ - state: %d, status: 0x%x\n", trf->state,
+                       status);
+
+       if (!status) {
+               mutex_unlock(&trf->lock);
+               return IRQ_NONE;
+       }
+
+       switch (trf->state) {
+       case TRF7970A_ST_IDLE:
+       case TRF7970A_ST_IDLE_RX_BLOCKED:
+               /* If getting interrupts caused by RF noise, turn off the
+                * receiver to avoid unnecessary interrupts.  It will be
+                * turned back on in trf7970a_in_send_cmd() when the next
+                * command is issued.
+                */
+               if (status & TRF7970A_IRQ_STATUS_ERROR) {
+                       trf7970a_cmd(trf, TRF7970A_CMD_BLOCK_RX);
+                       trf->state = TRF7970A_ST_IDLE_RX_BLOCKED;
+               }
+
+               trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+               break;
+       case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+               if (status & TRF7970A_IRQ_STATUS_TX) {
+                       trf->ignore_timeout =
+                               !cancel_delayed_work(&trf->timeout_work);
+                       trf7970a_fill_fifo(trf);
+               } else {
+                       trf7970a_send_err_upstream(trf, -EIO);
+               }
+               break;
+       case TRF7970A_ST_WAIT_FOR_RX_DATA:
+       case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+               if (status & TRF7970A_IRQ_STATUS_SRX) {
+                       trf->ignore_timeout =
+                               !cancel_delayed_work(&trf->timeout_work);
+                       trf7970a_drain_fifo(trf, status);
+               } else if (!(status & TRF7970A_IRQ_STATUS_TX)) {
+                       trf7970a_send_err_upstream(trf, -EIO);
+               }
+               break;
+       case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+               if (status != TRF7970A_IRQ_STATUS_TX)
+                       trf7970a_send_err_upstream(trf, -EIO);
+               break;
+       default:
+               dev_err(trf->dev, "%s - Driver in invalid state: %d\n",
+                               __func__, trf->state);
+       }
+
+       mutex_unlock(&trf->lock);
+       return IRQ_HANDLED;
+}
+
+static void trf7970a_issue_eof(struct trf7970a *trf)
+{
+       int ret;
+
+       dev_dbg(trf->dev, "Issuing EOF\n");
+
+       ret = trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET);
+       if (ret)
+               trf7970a_send_err_upstream(trf, ret);
+
+       ret = trf7970a_cmd(trf, TRF7970A_CMD_EOF);
+       if (ret)
+               trf7970a_send_err_upstream(trf, ret);
+
+       trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA;
+
+       dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n",
+                       trf->timeout, trf->state);
+
+       schedule_delayed_work(&trf->timeout_work,
+                       msecs_to_jiffies(trf->timeout));
+}
+
+static void trf7970a_timeout_work_handler(struct work_struct *work)
+{
+       struct trf7970a *trf = container_of(work, struct trf7970a,
+                       timeout_work.work);
+
+       dev_dbg(trf->dev, "Timeout - state: %d, ignore_timeout: %d\n",
+                       trf->state, trf->ignore_timeout);
+
+       mutex_lock(&trf->lock);
+
+       if (trf->ignore_timeout)
+               trf->ignore_timeout = false;
+       else if (trf->state == TRF7970A_ST_WAIT_FOR_RX_DATA_CONT)
+               trf7970a_send_upstream(trf); /* No more rx data so send up */
+       else if (trf->state == TRF7970A_ST_WAIT_TO_ISSUE_EOF)
+               trf7970a_issue_eof(trf);
+       else
+               trf7970a_send_err_upstream(trf, -ETIMEDOUT);
+
+       mutex_unlock(&trf->lock);
+}
+
+static int trf7970a_init(struct trf7970a *trf)
+{
+       int ret;
+
+       dev_dbg(trf->dev, "Initializing device - state: %d\n", trf->state);
+
+       ret = trf7970a_cmd(trf, TRF7970A_CMD_SOFT_INIT);
+       if (ret)
+               goto err_out;
+
+       ret = trf7970a_cmd(trf, TRF7970A_CMD_IDLE);
+       if (ret)
+               goto err_out;
+
+       ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL,
+                       TRF7970A_MODULATOR_DEPTH_OOK);
+       if (ret)
+               goto err_out;
+
+       ret = trf7970a_write(trf, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS,
+                       TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 |
+                       TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32);
+       if (ret)
+               goto err_out;
+
+       ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1, 0);
+       if (ret)
+               goto err_out;
+
+       trf->special_fcn_reg1 = 0;
+
+       ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL,
+                       TRF7970A_CHIP_STATUS_RF_ON |
+                               TRF7970A_CHIP_STATUS_VRS5_3);
+       if (ret)
+               goto err_out;
+
+       return 0;
+
+err_out:
+       dev_dbg(trf->dev, "Couldn't init device: %d\n", ret);
+       return ret;
+}
+
+static void trf7970a_switch_rf_off(struct trf7970a *trf)
+{
+       dev_dbg(trf->dev, "Switching rf off\n");
+
+       gpio_set_value(trf->en_gpio, 0);
+       gpio_set_value(trf->en2_gpio, 0);
+
+       trf->aborting = false;
+       trf->state = TRF7970A_ST_OFF;
+}
+
+static int trf7970a_switch_rf_on(struct trf7970a *trf)
+{
+       unsigned long delay;
+       int ret;
+
+       dev_dbg(trf->dev, "Switching rf on\n");
+
+       if (trf->powering_up)
+               usleep_range(5000, 6000);
+
+       gpio_set_value(trf->en2_gpio, 1);
+       usleep_range(1000, 2000);
+       gpio_set_value(trf->en_gpio, 1);
+
+       /* The delay between enabling the trf7970a and issuing the first
+        * command is significantly longer the very first time after powering
+        * up.  Make sure the longer delay is only done the first time.
+        */
+       if (trf->powering_up) {
+               delay = 20000;
+               trf->powering_up = false;
+       } else {
+               delay = 5000;
+       }
+
+       usleep_range(delay, delay + 1000);
+
+       ret = trf7970a_init(trf);
+       if (ret)
+               trf7970a_switch_rf_off(trf);
+       else
+               trf->state = TRF7970A_ST_IDLE;
+
+       return ret;
+}
+
+static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+       int ret = 0;
+
+       dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on);
+
+       mutex_lock(&trf->lock);
+
+       if (on) {
+               switch (trf->state) {
+               case TRF7970A_ST_OFF:
+                       ret = trf7970a_switch_rf_on(trf);
+                       break;
+               case TRF7970A_ST_IDLE:
+               case TRF7970A_ST_IDLE_RX_BLOCKED:
+                       break;
+               default:
+                       dev_err(trf->dev, "%s - Invalid request: %d %d\n",
+                                       __func__, trf->state, on);
+                       trf7970a_switch_rf_off(trf);
+               }
+       } else {
+               switch (trf->state) {
+               case TRF7970A_ST_OFF:
+                       break;
+               default:
+                       dev_err(trf->dev, "%s - Invalid request: %d %d\n",
+                                       __func__, trf->state, on);
+                       /* FALLTHROUGH */
+               case TRF7970A_ST_IDLE:
+               case TRF7970A_ST_IDLE_RX_BLOCKED:
+                       trf7970a_switch_rf_off(trf);
+               }
+       }
+
+       mutex_unlock(&trf->lock);
+       return ret;
+}
+
+static int trf7970a_config_rf_tech(struct trf7970a *trf, int tech)
+{
+       int ret = 0;
+
+       dev_dbg(trf->dev, "rf technology: %d\n", tech);
+
+       switch (tech) {
+       case NFC_DIGITAL_RF_TECH_106A:
+               trf->iso_ctrl = TRF7970A_ISO_CTRL_14443A_106;
+               break;
+       case NFC_DIGITAL_RF_TECH_ISO15693:
+               trf->iso_ctrl = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+               break;
+       default:
+               dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech);
+               return -EINVAL;
+       }
+
+       trf->technology = tech;
+
+       return ret;
+}
+
+static int trf7970a_config_framing(struct trf7970a *trf, int framing)
+{
+       dev_dbg(trf->dev, "framing: %d\n", framing);
+
+       switch (framing) {
+       case NFC_DIGITAL_FRAMING_NFCA_SHORT:
+       case NFC_DIGITAL_FRAMING_NFCA_STANDARD:
+               trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC;
+               trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+               break;
+       case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A:
+       case NFC_DIGITAL_FRAMING_NFCA_T4T:
+       case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY:
+       case NFC_DIGITAL_FRAMING_ISO15693_T5T:
+               trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
+               trf->iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N;
+               break;
+       case NFC_DIGITAL_FRAMING_NFCA_T2T:
+               trf->tx_cmd = TRF7970A_CMD_TRANSMIT;
+               trf->iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N;
+               break;
+       default:
+               dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing);
+               return -EINVAL;
+       }
+
+       trf->framing = framing;
+
+       return trf7970a_write(trf, TRF7970A_ISO_CTRL, trf->iso_ctrl);
+}
+
+static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type,
+               int param)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+       int ret = 0;
+
+       dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param);
+
+       mutex_lock(&trf->lock);
+
+       if (trf->state == TRF7970A_ST_OFF) {
+               ret = trf7970a_switch_rf_on(trf);
+               if (ret)
+                       goto err_out;
+       }
+
+       switch (type) {
+       case NFC_DIGITAL_CONFIG_RF_TECH:
+               ret = trf7970a_config_rf_tech(trf, param);
+               break;
+       case NFC_DIGITAL_CONFIG_FRAMING:
+               ret = trf7970a_config_framing(trf, param);
+               break;
+       default:
+               dev_dbg(trf->dev, "Unknown type: %d\n", type);
+               ret = -EINVAL;
+       }
+
+err_out:
+       mutex_unlock(&trf->lock);
+       return ret;
+}
+
+static int trf7970a_is_iso15693_write_or_lock(u8 cmd)
+{
+       switch (cmd) {
+       case ISO15693_CMD_WRITE_SINGLE_BLOCK:
+       case ISO15693_CMD_LOCK_BLOCK:
+       case ISO15693_CMD_WRITE_MULTIPLE_BLOCK:
+       case ISO15693_CMD_WRITE_AFI:
+       case ISO15693_CMD_LOCK_AFI:
+       case ISO15693_CMD_WRITE_DSFID:
+       case ISO15693_CMD_LOCK_DSFID:
+               return 1;
+               break;
+       default:
+               return 0;
+       }
+}
+
+static int trf7970a_per_cmd_config(struct trf7970a *trf, struct sk_buff *skb)
+{
+       u8 *req = skb->data;
+       u8 special_fcn_reg1, iso_ctrl;
+       int ret;
+
+       trf->issue_eof = false;
+
+       /* When issuing Type 2 read command, make sure the '4_bit_RX' bit in
+        * special functions register 1 is cleared; otherwise, its a write or
+        * sector select command and '4_bit_RX' must be set.
+        *
+        * When issuing an ISO 15693 command, inspect the flags byte to see
+        * what speed to use.  Also, remember if the OPTION flag is set on
+        * a Type 5 write or lock command so the driver will know that it
+        * has to send an EOF in order to get a response.
+        */
+       if ((trf->technology == NFC_DIGITAL_RF_TECH_106A) &&
+                       (trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T)) {
+               if (req[0] == NFC_T2T_CMD_READ)
+                       special_fcn_reg1 = 0;
+               else
+                       special_fcn_reg1 = TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX;
+
+               if (special_fcn_reg1 != trf->special_fcn_reg1) {
+                       ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1,
+                                       special_fcn_reg1);
+                       if (ret)
+                               return ret;
+
+                       trf->special_fcn_reg1 = special_fcn_reg1;
+               }
+       } else if (trf->technology == NFC_DIGITAL_RF_TECH_ISO15693) {
+               iso_ctrl = trf->iso_ctrl & ~TRF7970A_ISO_CTRL_RFID_SPEED_MASK;
+
+               switch (req[0] & ISO15693_REQ_FLAG_SPEED_MASK) {
+               case 0x00:
+                       iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_662;
+                       break;
+               case ISO15693_REQ_FLAG_SUB_CARRIER:
+                       iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a;
+                       break;
+               case ISO15693_REQ_FLAG_DATA_RATE:
+                       iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648;
+                       break;
+               case (ISO15693_REQ_FLAG_SUB_CARRIER |
+                               ISO15693_REQ_FLAG_DATA_RATE):
+                       iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669;
+                       break;
+               }
+
+               if (iso_ctrl != trf->iso_ctrl) {
+                       ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl);
+                       if (ret)
+                               return ret;
+
+                       trf->iso_ctrl = iso_ctrl;
+               }
+
+               if ((trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) &&
+                               trf7970a_is_iso15693_write_or_lock(req[1]) &&
+                               (req[0] & ISO15693_REQ_FLAG_OPTION))
+                       trf->issue_eof = true;
+       }
+
+       return 0;
+}
+
+static int trf7970a_in_send_cmd(struct nfc_digital_dev *ddev,
+               struct sk_buff *skb, u16 timeout,
+               nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+       char *prefix;
+       unsigned int len;
+       int ret;
+
+       dev_dbg(trf->dev, "New request - state: %d, timeout: %d ms, len: %d\n",
+                       trf->state, timeout, skb->len);
+
+       if (skb->len > TRF7970A_TX_MAX)
+               return -EINVAL;
+
+       mutex_lock(&trf->lock);
+
+       if ((trf->state != TRF7970A_ST_IDLE) &&
+                       (trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) {
+               dev_err(trf->dev, "%s - Bogus state: %d\n", __func__,
+                               trf->state);
+               ret = -EIO;
+               goto out_err;
+       }
+
+       if (trf->aborting) {
+               dev_dbg(trf->dev, "Abort process complete\n");
+               trf->aborting = false;
+               ret = -ECANCELED;
+               goto out_err;
+       }
+
+       trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE,
+                       GFP_KERNEL);
+       if (!trf->rx_skb) {
+               dev_dbg(trf->dev, "Can't alloc rx_skb\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       if (trf->state == TRF7970A_ST_IDLE_RX_BLOCKED) {
+               ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX);
+               if (ret)
+                       goto out_err;
+
+               trf->state = TRF7970A_ST_IDLE;
+       }
+
+       ret = trf7970a_per_cmd_config(trf, skb);
+       if (ret)
+               goto out_err;
+
+       trf->ddev = ddev;
+       trf->tx_skb = skb;
+       trf->cb = cb;
+       trf->cb_arg = arg;
+       trf->timeout = timeout;
+       trf->ignore_timeout = false;
+
+       len = skb->len;
+       prefix = skb_push(skb, TRF7970A_TX_SKB_HEADROOM);
+
+       /* TX data must be prefixed with a FIFO reset cmd, a cmd that depends
+        * on what the current framing is, the address of the TX length byte 1
+        * register (0x1d), and the 2 byte length of the data to be transmitted.
+        */
+       prefix[0] = TRF7970A_CMD_BIT_CTRL |
+                       TRF7970A_CMD_BIT_OPCODE(TRF7970A_CMD_FIFO_RESET);
+       prefix[1] = TRF7970A_CMD_BIT_CTRL |
+                       TRF7970A_CMD_BIT_OPCODE(trf->tx_cmd);
+       prefix[2] = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_TX_LENGTH_BYTE1;
+
+       if (trf->framing == NFC_DIGITAL_FRAMING_NFCA_SHORT) {
+               prefix[3] = 0x00;
+               prefix[4] = 0x0f; /* 7 bits */
+       } else {
+               prefix[3] = (len & 0xf00) >> 4;
+               prefix[3] |= ((len & 0xf0) >> 4);
+               prefix[4] = ((len & 0x0f) << 4);
+       }
+
+       len = min_t(int, skb->len, TRF7970A_FIFO_SIZE);
+
+       usleep_range(1000, 2000);
+
+       ret = trf7970a_transmit(trf, skb, len);
+       if (ret) {
+               kfree_skb(trf->rx_skb);
+               trf->rx_skb = NULL;
+       }
+
+out_err:
+       mutex_unlock(&trf->lock);
+       return ret;
+}
+
+static int trf7970a_tg_configure_hw(struct nfc_digital_dev *ddev,
+               int type, int param)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+       dev_dbg(trf->dev, "Unsupported interface\n");
+
+       return -EINVAL;
+}
+
+static int trf7970a_tg_send_cmd(struct nfc_digital_dev *ddev,
+               struct sk_buff *skb, u16 timeout,
+               nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+       dev_dbg(trf->dev, "Unsupported interface\n");
+
+       return -EINVAL;
+}
+
+static int trf7970a_tg_listen(struct nfc_digital_dev *ddev,
+               u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+       dev_dbg(trf->dev, "Unsupported interface\n");
+
+       return -EINVAL;
+}
+
+static int trf7970a_tg_listen_mdaa(struct nfc_digital_dev *ddev,
+               struct digital_tg_mdaa_params *mdaa_params,
+               u16 timeout, nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+       dev_dbg(trf->dev, "Unsupported interface\n");
+
+       return -EINVAL;
+}
+
+static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev)
+{
+       struct trf7970a *trf = nfc_digital_get_drvdata(ddev);
+
+       dev_dbg(trf->dev, "Abort process initiated\n");
+
+       mutex_lock(&trf->lock);
+       trf->aborting = true;
+       mutex_unlock(&trf->lock);
+}
+
+static struct nfc_digital_ops trf7970a_nfc_ops = {
+       .in_configure_hw        = trf7970a_in_configure_hw,
+       .in_send_cmd            = trf7970a_in_send_cmd,
+       .tg_configure_hw        = trf7970a_tg_configure_hw,
+       .tg_send_cmd            = trf7970a_tg_send_cmd,
+       .tg_listen              = trf7970a_tg_listen,
+       .tg_listen_mdaa         = trf7970a_tg_listen_mdaa,
+       .switch_rf              = trf7970a_switch_rf,
+       .abort_cmd              = trf7970a_abort_cmd,
+};
+
+static int trf7970a_probe(struct spi_device *spi)
+{
+       struct device_node *np = spi->dev.of_node;
+       const struct spi_device_id *id = spi_get_device_id(spi);
+       struct trf7970a *trf;
+       int ret;
+
+       if (!np) {
+               dev_err(&spi->dev, "No Device Tree entry\n");
+               return -EINVAL;
+       }
+
+       trf = devm_kzalloc(&spi->dev, sizeof(*trf), GFP_KERNEL);
+       if (!trf)
+               return -ENOMEM;
+
+       trf->state = TRF7970A_ST_OFF;
+       trf->dev = &spi->dev;
+       trf->spi = spi;
+       trf->quirks = id->driver_data;
+
+       spi->mode = SPI_MODE_1;
+       spi->bits_per_word = 8;
+
+       /* There are two enable pins - both must be present */
+       trf->en_gpio = of_get_named_gpio(np, "ti,enable-gpios", 0);
+       if (!gpio_is_valid(trf->en_gpio)) {
+               dev_err(trf->dev, "No EN GPIO property\n");
+               return trf->en_gpio;
+       }
+
+       ret = devm_gpio_request_one(trf->dev, trf->en_gpio,
+                       GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN");
+       if (ret) {
+               dev_err(trf->dev, "Can't request EN GPIO: %d\n", ret);
+               return ret;
+       }
+
+       trf->en2_gpio = of_get_named_gpio(np, "ti,enable-gpios", 1);
+       if (!gpio_is_valid(trf->en2_gpio)) {
+               dev_err(trf->dev, "No EN2 GPIO property\n");
+               return trf->en2_gpio;
+       }
+
+       ret = devm_gpio_request_one(trf->dev, trf->en2_gpio,
+                       GPIOF_DIR_OUT | GPIOF_INIT_LOW, "EN2");
+       if (ret) {
+               dev_err(trf->dev, "Can't request EN2 GPIO: %d\n", ret);
+               return ret;
+       }
+
+       ret = devm_request_threaded_irq(trf->dev, spi->irq, NULL,
+                       trf7970a_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                       "trf7970a", trf);
+       if (ret) {
+               dev_err(trf->dev, "Can't request IRQ#%d: %d\n", spi->irq, ret);
+               return ret;
+       }
+
+       mutex_init(&trf->lock);
+       INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
+
+       trf->regulator = devm_regulator_get(&spi->dev, "vin");
+       if (IS_ERR(trf->regulator)) {
+               ret = PTR_ERR(trf->regulator);
+               dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
+               goto err_destroy_lock;
+       }
+
+       ret = regulator_enable(trf->regulator);
+       if (ret) {
+               dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
+               goto err_destroy_lock;
+       }
+
+       trf->powering_up = true;
+
+       trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops,
+                       TRF7970A_SUPPORTED_PROTOCOLS,
+                       NFC_DIGITAL_DRV_CAPS_IN_CRC, TRF7970A_TX_SKB_HEADROOM,
+                       0);
+       if (!trf->ddev) {
+               dev_err(trf->dev, "Can't allocate NFC digital device\n");
+               ret = -ENOMEM;
+               goto err_disable_regulator;
+       }
+
+       nfc_digital_set_parent_dev(trf->ddev, trf->dev);
+       nfc_digital_set_drvdata(trf->ddev, trf);
+       spi_set_drvdata(spi, trf);
+
+       ret = nfc_digital_register_device(trf->ddev);
+       if (ret) {
+               dev_err(trf->dev, "Can't register NFC digital device: %d\n",
+                               ret);
+               goto err_free_ddev;
+       }
+
+       return 0;
+
+err_free_ddev:
+       nfc_digital_free_device(trf->ddev);
+err_disable_regulator:
+       regulator_disable(trf->regulator);
+err_destroy_lock:
+       mutex_destroy(&trf->lock);
+       return ret;
+}
+
+static int trf7970a_remove(struct spi_device *spi)
+{
+       struct trf7970a *trf = spi_get_drvdata(spi);
+
+       mutex_lock(&trf->lock);
+
+       trf7970a_switch_rf_off(trf);
+       trf7970a_init(trf);
+
+       switch (trf->state) {
+       case TRF7970A_ST_WAIT_FOR_TX_FIFO:
+       case TRF7970A_ST_WAIT_FOR_RX_DATA:
+       case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT:
+       case TRF7970A_ST_WAIT_TO_ISSUE_EOF:
+               trf7970a_send_err_upstream(trf, -ECANCELED);
+               break;
+       default:
+               break;
+       }
+
+       mutex_unlock(&trf->lock);
+
+       nfc_digital_unregister_device(trf->ddev);
+       nfc_digital_free_device(trf->ddev);
+
+       regulator_disable(trf->regulator);
+
+       mutex_destroy(&trf->lock);
+
+       return 0;
+}
+
+static const struct spi_device_id trf7970a_id_table[] = {
+       { "trf7970a", TRF7970A_QUIRK_IRQ_STATUS_READ_ERRATA },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, trf7970a_id_table);
+
+static struct spi_driver trf7970a_spi_driver = {
+       .probe          = trf7970a_probe,
+       .remove         = trf7970a_remove,
+       .id_table       = trf7970a_id_table,
+       .driver         = {
+               .name   = "trf7970a",
+               .owner  = THIS_MODULE,
+       },
+};
+
+module_spi_driver(trf7970a_spi_driver);
+
+MODULE_AUTHOR("Mark A. Greer <mgreer@animalcreek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI trf7970a RFID/NFC Transceiver Driver");
index 5b3c24f3cde57e8f0c26f578c1ef12fd0d58d650..9a95831bd065c2ba1c5af83f6a73927a3b9d8181 100644 (file)
@@ -43,6 +43,23 @@ static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
        }
 }
 
+/* Extract the clause 22 phy ID from the compatible string of the form
+ * ethernet-phy-idAAAA.BBBB */
+static int of_get_phy_id(struct device_node *device, u32 *phy_id)
+{
+       struct property *prop;
+       const char *cp;
+       unsigned int upper, lower;
+
+       of_property_for_each_string(device, "compatible", prop, cp) {
+               if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) == 2) {
+                       *phy_id = ((upper & 0xFFFF) << 16) | (lower & 0xFFFF);
+                       return 0;
+               }
+       }
+       return -EINVAL;
+}
+
 static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
                                   u32 addr)
 {
@@ -50,11 +67,15 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
        bool is_c45;
        int rc;
        u32 max_speed = 0;
+       u32 phy_id;
 
        is_c45 = of_device_is_compatible(child,
                                         "ethernet-phy-ieee802.3-c45");
 
-       phy = get_phy_device(mdio, addr, is_c45);
+       if (!is_c45 && !of_get_phy_id(child, &phy_id))
+               phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
+       else
+               phy = get_phy_device(mdio, addr, is_c45);
        if (!phy || IS_ERR(phy))
                return 1;
 
index a208a457558c758a47ac7c0a2c43ca6e6426c85c..84215c1929c400d53af1ce0c1938fedd87d045b5 100644 (file)
 #include <linux/phy.h>
 #include <linux/export.h>
 
-/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
- * into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
- */
-static const char *phy_modes[] = {
-       [PHY_INTERFACE_MODE_NA]         = "",
-       [PHY_INTERFACE_MODE_MII]        = "mii",
-       [PHY_INTERFACE_MODE_GMII]       = "gmii",
-       [PHY_INTERFACE_MODE_SGMII]      = "sgmii",
-       [PHY_INTERFACE_MODE_TBI]        = "tbi",
-       [PHY_INTERFACE_MODE_REVMII]     = "rev-mii",
-       [PHY_INTERFACE_MODE_RMII]       = "rmii",
-       [PHY_INTERFACE_MODE_RGMII]      = "rgmii",
-       [PHY_INTERFACE_MODE_RGMII_ID]   = "rgmii-id",
-       [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
-       [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
-       [PHY_INTERFACE_MODE_RTBI]       = "rtbi",
-       [PHY_INTERFACE_MODE_SMII]       = "smii",
-       [PHY_INTERFACE_MODE_XGMII]      = "xgmii",
-};
-
 /**
  * of_get_phy_mode - Get phy mode for given device_node
  * @np:        Pointer to the given device_node
@@ -49,8 +27,8 @@ int of_get_phy_mode(struct device_node *np)
        if (err < 0)
                return err;
 
-       for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
-               if (!strcasecmp(pm, phy_modes[i]))
+       for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+               if (!strcasecmp(pm, phy_modes(i)))
                        return i;
 
        return -ENODEV;
index 34a0c607318eb2173e4d1e44cb16aff2c889164d..419056d7887ec9f516e46699bdc0f46dfe85eeb7 100644 (file)
 
 #include "ptp_private.h"
 
+static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
+                              enum ptp_pin_function func, unsigned int chan)
+{
+       struct ptp_clock_request rq;
+       int err = 0;
+
+       memset(&rq, 0, sizeof(rq));
+
+       switch (func) {
+       case PTP_PF_NONE:
+               break;
+       case PTP_PF_EXTTS:
+               rq.type = PTP_CLK_REQ_EXTTS;
+               rq.extts.index = chan;
+               err = ops->enable(ops, &rq, 0);
+               break;
+       case PTP_PF_PEROUT:
+               rq.type = PTP_CLK_REQ_PEROUT;
+               rq.perout.index = chan;
+               err = ops->enable(ops, &rq, 0);
+               break;
+       case PTP_PF_PHYSYNC:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return err;
+}
+
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+                   enum ptp_pin_function func, unsigned int chan)
+{
+       struct ptp_clock_info *info = ptp->info;
+       struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
+       unsigned int i;
+
+       /* Check to see if any other pin previously had this function. */
+       for (i = 0; i < info->n_pins; i++) {
+               if (info->pin_config[i].func == func &&
+                   info->pin_config[i].chan == chan) {
+                       pin1 = &info->pin_config[i];
+                       break;
+               }
+       }
+       if (pin1 && i == pin)
+               return 0;
+
+       /* Check the desired function and channel. */
+       switch (func) {
+       case PTP_PF_NONE:
+               break;
+       case PTP_PF_EXTTS:
+               if (chan >= info->n_ext_ts)
+                       return -EINVAL;
+               break;
+       case PTP_PF_PEROUT:
+               if (chan >= info->n_per_out)
+                       return -EINVAL;
+               break;
+       case PTP_PF_PHYSYNC:
+               pr_err("sorry, cannot reassign the calibration pin\n");
+               return -EINVAL;
+       default:
+               return -EINVAL;
+       }
+
+       if (pin2->func == PTP_PF_PHYSYNC) {
+               pr_err("sorry, cannot reprogram the calibration pin\n");
+               return -EINVAL;
+       }
+
+       if (info->verify(info, pin, func, chan)) {
+               pr_err("driver cannot use function %u on pin %u\n", func, chan);
+               return -EOPNOTSUPP;
+       }
+
+       /* Disable whatever function was previously assigned. */
+       if (pin1) {
+               ptp_disable_pinfunc(info, func, chan);
+               pin1->func = PTP_PF_NONE;
+               pin1->chan = 0;
+       }
+       ptp_disable_pinfunc(info, pin2->func, pin2->chan);
+       pin2->func = func;
+       pin2->chan = chan;
+
+       return 0;
+}
+
 int ptp_open(struct posix_clock *pc, fmode_t fmode)
 {
        return 0;
@@ -35,12 +125,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
        struct ptp_clock_caps caps;
        struct ptp_clock_request req;
        struct ptp_sys_offset *sysoff = NULL;
+       struct ptp_pin_desc pd;
        struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
        struct ptp_clock_info *ops = ptp->info;
        struct ptp_clock_time *pct;
        struct timespec ts;
        int enable, err = 0;
-       unsigned int i;
+       unsigned int i, pin_index;
 
        switch (cmd) {
 
@@ -51,6 +142,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                caps.n_ext_ts = ptp->info->n_ext_ts;
                caps.n_per_out = ptp->info->n_per_out;
                caps.pps = ptp->info->pps;
+               caps.n_pins = ptp->info->n_pins;
                if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
                        err = -EFAULT;
                break;
@@ -126,6 +218,40 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        err = -EFAULT;
                break;
 
+       case PTP_PIN_GETFUNC:
+               if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+                       err = -EFAULT;
+                       break;
+               }
+               pin_index = pd.index;
+               if (pin_index >= ops->n_pins) {
+                       err = -EINVAL;
+                       break;
+               }
+               if (mutex_lock_interruptible(&ptp->pincfg_mux))
+                       return -ERESTARTSYS;
+               pd = ops->pin_config[pin_index];
+               mutex_unlock(&ptp->pincfg_mux);
+               if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
+                       err = -EFAULT;
+               break;
+
+       case PTP_PIN_SETFUNC:
+               if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
+                       err = -EFAULT;
+                       break;
+               }
+               pin_index = pd.index;
+               if (pin_index >= ops->n_pins) {
+                       err = -EINVAL;
+                       break;
+               }
+               if (mutex_lock_interruptible(&ptp->pincfg_mux))
+                       return -ERESTARTSYS;
+               err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
+               mutex_unlock(&ptp->pincfg_mux);
+               break;
+
        default:
                err = -ENOTTY;
                break;
index a8319b26664312eed6ab4735fd3f437cc2186ddd..e25d2bc898e5b6e4eb7b43e6df87129a2a19781e 100644 (file)
@@ -169,6 +169,7 @@ static void delete_ptp_clock(struct posix_clock *pc)
        struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
 
        mutex_destroy(&ptp->tsevq_mux);
+       mutex_destroy(&ptp->pincfg_mux);
        ida_simple_remove(&ptp_clocks_map, ptp->index);
        kfree(ptp);
 }
@@ -203,6 +204,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        ptp->index = index;
        spin_lock_init(&ptp->tsevq.lock);
        mutex_init(&ptp->tsevq_mux);
+       mutex_init(&ptp->pincfg_mux);
        init_waitqueue_head(&ptp->tsev_wq);
 
        /* Create a new device in our class. */
@@ -249,6 +251,7 @@ no_sysfs:
        device_destroy(ptp_class, ptp->devid);
 no_device:
        mutex_destroy(&ptp->tsevq_mux);
+       mutex_destroy(&ptp->pincfg_mux);
 no_slot:
        kfree(ptp);
 no_memory:
@@ -305,6 +308,26 @@ int ptp_clock_index(struct ptp_clock *ptp)
 }
 EXPORT_SYMBOL(ptp_clock_index);
 
+int ptp_find_pin(struct ptp_clock *ptp,
+                enum ptp_pin_function func, unsigned int chan)
+{
+       struct ptp_pin_desc *pin = NULL;
+       int i;
+
+       mutex_lock(&ptp->pincfg_mux);
+       for (i = 0; i < ptp->info->n_pins; i++) {
+               if (ptp->info->pin_config[i].func == func &&
+                   ptp->info->pin_config[i].chan == chan) {
+                       pin = &ptp->info->pin_config[i];
+                       break;
+               }
+       }
+       mutex_unlock(&ptp->pincfg_mux);
+
+       return pin ? i : -1;
+}
+EXPORT_SYMBOL(ptp_find_pin);
+
 /* module operations */
 
 static void __exit ptp_exit(void)
index 4a08727fcaf39b55f409c005863603215c9b27cb..604d340f20956bc1d0df55cc692d76d0f313df02 100644 (file)
@@ -244,6 +244,7 @@ static struct ptp_clock_info ptp_ixp_caps = {
        .name           = "IXP46X timer",
        .max_adj        = 66666655,
        .n_ext_ts       = N_EXT_TS,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = ptp_ixp_adjfreq,
        .adjtime        = ptp_ixp_adjtime,
index 71a2559278d7a0d41bf773edcf5219f35462ddb1..90a106308c4f7ee21fd12565cbc1ff429f2eebde 100644 (file)
@@ -514,6 +514,7 @@ static struct ptp_clock_info ptp_pch_caps = {
        .name           = "PCH timer",
        .max_adj        = 50000000,
        .n_ext_ts       = N_EXT_TS,
+       .n_pins         = 0,
        .pps            = 0,
        .adjfreq        = ptp_pch_adjfreq,
        .adjtime        = ptp_pch_adjtime,
index df03f2e30ad91068b0beb63d34e742eeff4f0404..9c5d41421b6510f55298b70020f33578e82e97e7 100644 (file)
@@ -48,8 +48,12 @@ struct ptp_clock {
        long dialed_frequency; /* remembers the frequency adjustment */
        struct timestamp_event_queue tsevq; /* simple fifo for time stamps */
        struct mutex tsevq_mux; /* one process at a time reading the fifo */
+       struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
        wait_queue_head_t tsev_wq;
        int defunct; /* tells readers to go away when clock is being removed */
+       struct device_attribute *pin_dev_attr;
+       struct attribute **pin_attr;
+       struct attribute_group pin_attr_group;
 };
 
 /*
@@ -69,6 +73,10 @@ static inline int queue_cnt(struct timestamp_event_queue *q)
  * see ptp_chardev.c
  */
 
+/* caller must hold pincfg_mux */
+int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
+                   enum ptp_pin_function func, unsigned int chan);
+
 long ptp_ioctl(struct posix_clock *pc,
               unsigned int cmd, unsigned long arg);
 
index 13ec5311746a01948e7c09dcf7c1db7bed421f47..302e626fe6b01777523c371e2760ee8f48acc68b 100644 (file)
@@ -18,6 +18,7 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/capability.h>
+#include <linux/slab.h>
 
 #include "ptp_private.h"
 
@@ -42,6 +43,7 @@ PTP_SHOW_INT(max_adjustment, max_adj);
 PTP_SHOW_INT(n_alarms, n_alarm);
 PTP_SHOW_INT(n_external_timestamps, n_ext_ts);
 PTP_SHOW_INT(n_periodic_outputs, n_per_out);
+PTP_SHOW_INT(n_programmable_pins, n_pins);
 PTP_SHOW_INT(pps_available, pps);
 
 static struct attribute *ptp_attrs[] = {
@@ -50,6 +52,7 @@ static struct attribute *ptp_attrs[] = {
        &dev_attr_n_alarms.attr,
        &dev_attr_n_external_timestamps.attr,
        &dev_attr_n_periodic_outputs.attr,
+       &dev_attr_n_programmable_pins.attr,
        &dev_attr_pps_available.attr,
        NULL,
 };
@@ -175,6 +178,63 @@ out:
        return err;
 }
 
+static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
+{
+       int i;
+       for (i = 0; i < ptp->info->n_pins; i++) {
+               if (!strcmp(ptp->info->pin_config[i].name, name))
+                       return i;
+       }
+       return -1;
+}
+
+static ssize_t ptp_pin_show(struct device *dev, struct device_attribute *attr,
+                           char *page)
+{
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       unsigned int func, chan;
+       int index;
+
+       index = ptp_pin_name2index(ptp, attr->attr.name);
+       if (index < 0)
+               return -EINVAL;
+
+       if (mutex_lock_interruptible(&ptp->pincfg_mux))
+               return -ERESTARTSYS;
+
+       func = ptp->info->pin_config[index].func;
+       chan = ptp->info->pin_config[index].chan;
+
+       mutex_unlock(&ptp->pincfg_mux);
+
+       return snprintf(page, PAGE_SIZE, "%u %u\n", func, chan);
+}
+
+static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
+{
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       unsigned int func, chan;
+       int cnt, err, index;
+
+       cnt = sscanf(buf, "%u %u", &func, &chan);
+       if (cnt != 2)
+               return -EINVAL;
+
+       index = ptp_pin_name2index(ptp, attr->attr.name);
+       if (index < 0)
+               return -EINVAL;
+
+       if (mutex_lock_interruptible(&ptp->pincfg_mux))
+               return -ERESTARTSYS;
+       err = ptp_set_pinfunc(ptp, index, func, chan);
+       mutex_unlock(&ptp->pincfg_mux);
+       if (err)
+               return err;
+
+       return count;
+}
+
 static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
 static DEVICE_ATTR(fifo,         0444, extts_fifo_show, NULL);
 static DEVICE_ATTR(period,       0220, NULL, period_store);
@@ -195,9 +255,56 @@ int ptp_cleanup_sysfs(struct ptp_clock *ptp)
        if (info->pps)
                device_remove_file(dev, &dev_attr_pps_enable);
 
+       if (info->n_pins) {
+               sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group);
+               kfree(ptp->pin_attr);
+               kfree(ptp->pin_dev_attr);
+       }
        return 0;
 }
 
+static int ptp_populate_pins(struct ptp_clock *ptp)
+{
+       struct device *dev = ptp->dev;
+       struct ptp_clock_info *info = ptp->info;
+       int err = -ENOMEM, i, n_pins = info->n_pins;
+
+       ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr),
+                                   GFP_KERNEL);
+       if (!ptp->pin_dev_attr)
+               goto no_dev_attr;
+
+       ptp->pin_attr = kzalloc((1 + n_pins) * sizeof(struct attribute *),
+                               GFP_KERNEL);
+       if (!ptp->pin_attr)
+               goto no_pin_attr;
+
+       for (i = 0; i < n_pins; i++) {
+               struct device_attribute *da = &ptp->pin_dev_attr[i];
+               sysfs_attr_init(&da->attr);
+               da->attr.name = info->pin_config[i].name;
+               da->attr.mode = 0644;
+               da->show = ptp_pin_show;
+               da->store = ptp_pin_store;
+               ptp->pin_attr[i] = &da->attr;
+       }
+
+       ptp->pin_attr_group.name = "pins";
+       ptp->pin_attr_group.attrs = ptp->pin_attr;
+
+       err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group);
+       if (err)
+               goto no_group;
+       return 0;
+
+no_group:
+       kfree(ptp->pin_attr);
+no_pin_attr:
+       kfree(ptp->pin_dev_attr);
+no_dev_attr:
+       return err;
+}
+
 int ptp_populate_sysfs(struct ptp_clock *ptp)
 {
        struct device *dev = ptp->dev;
@@ -222,7 +329,15 @@ int ptp_populate_sysfs(struct ptp_clock *ptp)
                if (err)
                        goto out4;
        }
+       if (info->n_pins) {
+               err = ptp_populate_pins(ptp);
+               if (err)
+                       goto out5;
+       }
        return 0;
+out5:
+       if (info->pps)
+               device_remove_file(dev, &dev_attr_pps_enable);
 out4:
        if (info->n_per_out)
                device_remove_file(dev, &dev_attr_period);
index a0de045eb227d5eeb845bf9f6c925bbfcfe73634..5333b2c018e781541905e855c7cf3ff0a5d84a9e 100644 (file)
@@ -854,8 +854,11 @@ static inline int qeth_get_micros(void)
 
 static inline int qeth_get_ip_version(struct sk_buff *skb)
 {
-       struct ethhdr *ehdr = (struct ethhdr *)skb->data;
-       switch (ehdr->h_proto) {
+       __be16 *p = &((struct ethhdr *)skb->data)->h_proto;
+
+       if (*p == ETH_P_8021Q)
+               p += 2;
+       switch (*p) {
        case ETH_P_IPV6:
                return 6;
        case ETH_P_IP:
index 795ed61a549632adc16b808622f75c911f66634d..e1c3a3828cb1f788118553b6bab9de5ab4aee14a 100644 (file)
@@ -4610,8 +4610,8 @@ out:
 }
 EXPORT_SYMBOL_GPL(qeth_query_oat_command);
 
-int qeth_query_card_info_cb(struct qeth_card *card,
-                       struct qeth_reply *reply, unsigned long data)
+static int qeth_query_card_info_cb(struct qeth_card *card,
+                                  struct qeth_reply *reply, unsigned long data)
 {
        struct qeth_ipa_cmd *cmd;
        struct qeth_query_card_info *card_info;
index 908d82529ee9c3e04a42caf92b9e0787836ef7d0..8dea3f12ccc1714defe7d4d65869817dd6b69135 100644 (file)
@@ -241,7 +241,7 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card,
 }
 
 static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
-                       struct sk_buff *skb, int ipv, int cast_type)
+                       struct sk_buff *skb, int cast_type)
 {
        struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
 
@@ -762,7 +762,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                goto tx_drop;
                        elements_needed++;
                        skb_reset_mac_header(new_skb);
-                       qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+                       qeth_l2_fill_header(card, hdr, new_skb, cast_type);
                        hdr->hdr.l2.pkt_length = new_skb->len;
                        memcpy(((char *)hdr) + sizeof(struct qeth_hdr),
                                skb_mac_header(new_skb), ETH_HLEN);
@@ -775,7 +775,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        hdr = (struct qeth_hdr *)skb_push(new_skb,
                                                sizeof(struct qeth_hdr));
                        skb_set_mac_header(new_skb, sizeof(struct qeth_hdr));
-                       qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
+                       qeth_l2_fill_header(card, hdr, new_skb, cast_type);
                }
        }
 
index 47541e1608f35404cd80d93f768cecf67e549ba2..ebb3ebc7176b1e0fdc100329fbc5127ca48d6499 100644 (file)
@@ -554,7 +554,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
                printk_ratelimited("%s: Failed to allocate a work queue entry\n",
                                   dev->name);
                priv->stats.tx_dropped++;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return 0;
        }
 
@@ -565,7 +565,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
                                   dev->name);
                cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
                priv->stats.tx_dropped++;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return 0;
        }
 
@@ -682,7 +682,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
                             work->grp);
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += skb->len;
-       dev_kfree_skb(skb);
+       dev_consume_skb_any(skb);
        return 0;
 }
 
index d387f13ea7dc158e9034d843a2fcf2aeb0e7dfa3..0cc32c60ddee74e4ff83c488a3ed6f39cfb99ba7 100644 (file)
@@ -286,7 +286,6 @@ static void rtl_rate_free_sta(void *rtlpriv,
 }
 
 static struct rate_control_ops rtl_rate_ops = {
-       .module = NULL,
        .name = "rtl_rc",
        .alloc = rtl_rate_alloc,
        .free = rtl_rate_free,
index 75ae4387fe19ff8f34cb828118dba6bab5cae1a4..963b55f661c8d370a05c09be7bbfca2e394c369e 100644 (file)
@@ -616,7 +616,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
                                return false;
                }
 
-               if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+               if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
                        (ieee80211_has_protected(hdr->frame_control)))
                        rx_status->flag &= ~RX_FLAG_DECRYPTED;
                else
index 965b1c0a475325d2ce79c9a4760328e1c20fd162..69bc0a01ae14e07612c970870b2d7e12b7f655eb 100644 (file)
@@ -715,7 +715,7 @@ int wl_send( struct wl_private *lp )
 
         /* Free the skb and perform queue cleanup, as the buffer was
             transmitted successfully */
-        dev_kfree_skb( lp->txF.skb );
+        dev_consume_skb_any( lp->txF.skb );
 
         lp->txF.skb = NULL;
         lp->txF.port = 0;
@@ -1730,7 +1730,7 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
             WL_WDS_NETIF_STOP_QUEUE( lp );
             lp->netif_queue_on = FALSE;
 
-            dev_kfree_skb( skb );
+            dev_kfree_skb_any( skb );
             return 0;
         }
     }
@@ -1755,7 +1755,7 @@ int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
 
     /* Free the skb and perform queue cleanup, as the buffer was
             transmitted successfully */
-    dev_kfree_skb( skb );
+    dev_consume_skb_any( skb );
 
     return TRUE;
 } // wl_send_dma
index a7d24c95191d15826ac808b226c26ae71e12ac98..7dd2b95416e87975f01670ba955865e89390c0b2 100644 (file)
@@ -416,7 +416,7 @@ static int prism2_scan(struct wiphy *wiphy,
                memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
                bss = cfg80211_inform_bss(wiphy,
                        ieee80211_get_channel(wiphy,
-                             ieee80211_dsss_chan_to_freq(msg2.dschannel.data)),
+                             ieee80211_channel_to_frequency(msg2.dschannel.data, IEEE80211_BAND_2GHZ)),
                        (const u8 *) &(msg2.bssid.data.data),
                        msg2.timestamp.data, msg2.capinfo.data,
                        msg2.beaconperiod.data,
index 677b4f01b2d0bfd2dd1619d930ec92992bcd1d7d..6f76277baf391733a3f9303f4f740b7151c5e96b 100644 (file)
 #define PHY_ID_BCM5461                 0x002060c0
 #define PHY_ID_BCM57780                        0x03625d90
 
+#define PHY_ID_BCM7366                 0x600d8490
+#define PHY_ID_BCM7439                 0x600d8480
+#define PHY_ID_BCM7445                 0x600d8510
+#define PHY_ID_BCM7XXX_28              0x600d8400
+
 #define PHY_BCM_OUI_MASK               0xfffffc00
 #define PHY_BCM_OUI_1                  0x00206000
 #define PHY_BCM_OUI_2                  0x0143bc00
 #define PHY_BCM_OUI_3                  0x03625c00
+#define PHY_BCM_OUI_4                  0x600d0000
+#define PHY_BCM_OUI_5                  0x03625e00
 
 
 #define PHY_BCM_FLAGS_MODE_COPPER      0x00000001
 #define PHY_BRCM_EXT_IBND_TX_ENABLE    0x00002000
 #define PHY_BRCM_CLEAR_RGMII_MODE      0x00004000
 #define PHY_BRCM_DIS_TXCRXC_NOENRGY    0x00008000
+/* Broadcom BCM7xxx specific workarounds */
+#define PHY_BRCM_100MBPS_WAR           0x00010000
 #define PHY_BCM_FLAGS_VALID            0x80000000
 
+/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
+#define MII_BCM54XX_ECR                0x10    /* BCM54xx extended control register */
+#define MII_BCM54XX_ECR_IM     0x1000  /* Interrupt mask */
+#define MII_BCM54XX_ECR_IF     0x0800  /* Interrupt force */
+
+#define MII_BCM54XX_ESR                0x11    /* BCM54xx extended status register */
+#define MII_BCM54XX_ESR_IS     0x1000  /* Interrupt status */
+
+#define MII_BCM54XX_EXP_DATA   0x15    /* Expansion register data */
+#define MII_BCM54XX_EXP_SEL    0x17    /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_SSD        0x0e00  /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_ER 0x0f00  /* Expansion register select */
+
+#define MII_BCM54XX_AUX_CTL    0x18    /* Auxiliary control register */
+#define MII_BCM54XX_ISR                0x1a    /* BCM54xx interrupt status register */
+#define MII_BCM54XX_IMR                0x1b    /* BCM54xx interrupt mask register */
+#define MII_BCM54XX_INT_CRCERR 0x0001  /* CRC error */
+#define MII_BCM54XX_INT_LINK   0x0002  /* Link status changed */
+#define MII_BCM54XX_INT_SPEED  0x0004  /* Link speed change */
+#define MII_BCM54XX_INT_DUPLEX 0x0008  /* Duplex mode changed */
+#define MII_BCM54XX_INT_LRS    0x0010  /* Local receiver status changed */
+#define MII_BCM54XX_INT_RRS    0x0020  /* Remote receiver status changed */
+#define MII_BCM54XX_INT_SSERR  0x0040  /* Scrambler synchronization error */
+#define MII_BCM54XX_INT_UHCD   0x0080  /* Unsupported HCD negotiated */
+#define MII_BCM54XX_INT_NHCD   0x0100  /* No HCD */
+#define MII_BCM54XX_INT_NHCDL  0x0200  /* No HCD link */
+#define MII_BCM54XX_INT_ANPR   0x0400  /* Auto-negotiation page received */
+#define MII_BCM54XX_INT_LC     0x0800  /* All counters below 128 */
+#define MII_BCM54XX_INT_HC     0x1000  /* Counter above 32768 */
+#define MII_BCM54XX_INT_MDIX   0x2000  /* MDIX status change */
+#define MII_BCM54XX_INT_PSERR  0x4000  /* Pair swap error */
+
+#define MII_BCM54XX_SHD                0x1c    /* 0x1c shadow registers */
+#define MII_BCM54XX_SHD_WRITE  0x8000
+#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
+#define MII_BCM54XX_SHD_DATA(x)        ((x & 0x3ff) << 0)
+
+/*
+ * AUXILIARY CONTROL SHADOW ACCESS REGISTERS.  (PHY REG 0x18)
+ */
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL      0x0000
+#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB         0x0400
+#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA      0x0800
+
+#define MII_BCM54XX_AUXCTL_MISC_WREN   0x8000
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX    0x0200
+#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC     0x7000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC        0x0007
+
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL      0x0000
+
 #endif /* _LINUX_BRCMPHY_H */
index fb0ab651a04196ac247b718221cd857de0cc5365..3ce5e526525f852f37ea242700363036c85068ca 100644 (file)
@@ -33,8 +33,9 @@ enum can_mode {
 struct can_priv {
        struct can_device_stats can_stats;
 
-       struct can_bittiming bittiming;
-       const struct can_bittiming_const *bittiming_const;
+       struct can_bittiming bittiming, data_bittiming;
+       const struct can_bittiming_const *bittiming_const,
+               *data_bittiming_const;
        struct can_clock clock;
 
        enum can_state state;
@@ -45,6 +46,7 @@ struct can_priv {
        struct timer_list restart_timer;
 
        int (*do_set_bittiming)(struct net_device *dev);
+       int (*do_set_data_bittiming)(struct net_device *dev);
        int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
        int (*do_get_state)(const struct net_device *dev,
                            enum can_state *state);
@@ -111,6 +113,7 @@ struct can_priv *safe_candev_priv(struct net_device *dev);
 
 int open_candev(struct net_device *dev);
 void close_candev(struct net_device *dev);
+int can_change_mtu(struct net_device *dev, int new_mtu);
 
 int register_candev(struct net_device *dev);
 void unregister_candev(struct net_device *dev);
@@ -124,6 +127,8 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
 void can_free_echo_skb(struct net_device *dev, unsigned int idx);
 
 struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+                               struct canfd_frame **cfd);
 struct sk_buff *alloc_can_err_skb(struct net_device *dev,
                                  struct can_frame **cf);
 
index c8e3e7e39c6bf26ffbfe534cdfa83f59ad561372..0a114d05f68d35bd275924a7290a6c3b75ef17fd 100644 (file)
@@ -183,6 +183,9 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
  * hold the RTNL lock.
  *
  * See the structures used by these operations for further documentation.
+ * Note that for all operations using a structure ending with a zero-
+ * length array, the array is allocated separately in the kernel and
+ * is passed to the driver as an additional parameter.
  *
  * See &struct net_device and &struct net_device_ops for documentation
  * of the generic netdev features interface.
index e568c8ef896bf170de5578d7d898343a457b7724..262dcbb75ffe3b343ae1513d715e2eae5fd1d902 100644 (file)
@@ -9,28 +9,81 @@
 #include <linux/workqueue.h>
 #include <uapi/linux/filter.h>
 
-#ifdef CONFIG_COMPAT
-/*
- * A struct sock_filter is architecture independent.
+/* Internally used and optimized filter representation with extended
+ * instruction set based on top of classic BPF.
  */
+
+/* instruction classes */
+#define BPF_ALU64      0x07    /* alu mode in double word width */
+
+/* ld/ldx fields */
+#define BPF_DW         0x18    /* double word */
+#define BPF_XADD       0xc0    /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_MOV                0xb0    /* mov reg to reg */
+#define BPF_ARSH       0xc0    /* sign extending arithmetic shift right */
+
+/* change endianness of a register */
+#define BPF_END                0xd0    /* flags for endianness conversion: */
+#define BPF_TO_LE      0x00    /* convert to little-endian */
+#define BPF_TO_BE      0x08    /* convert to big-endian */
+#define BPF_FROM_LE    BPF_TO_LE
+#define BPF_FROM_BE    BPF_TO_BE
+
+#define BPF_JNE                0x50    /* jump != */
+#define BPF_JSGT       0x60    /* SGT is signed '>', GT in x86 */
+#define BPF_JSGE       0x70    /* SGE is signed '>=', GE in x86 */
+#define BPF_CALL       0x80    /* function call */
+#define BPF_EXIT       0x90    /* function return */
+
+/* BPF has 10 general purpose 64-bit registers and stack frame. */
+#define MAX_BPF_REG    11
+
+/* BPF program can access up to 512 bytes of stack space. */
+#define MAX_BPF_STACK  512
+
+/* Arg1, context and stack frame pointer register positions. */
+#define ARG1_REG       1
+#define CTX_REG                6
+#define FP_REG         10
+
+struct sock_filter_int {
+       __u8    code;           /* opcode */
+       __u8    a_reg:4;        /* dest register */
+       __u8    x_reg:4;        /* source register */
+       __s16   off;            /* signed offset */
+       __s32   imm;            /* signed immediate constant */
+};
+
+#ifdef CONFIG_COMPAT
+/* A struct sock_filter is architecture independent. */
 struct compat_sock_fprog {
        u16             len;
-       compat_uptr_t   filter;         /* struct sock_filter * */
+       compat_uptr_t   filter; /* struct sock_filter * */
 };
 #endif
 
+struct sock_fprog_kern {
+       u16                     len;
+       struct sock_filter      *filter;
+};
+
 struct sk_buff;
 struct sock;
+struct seccomp_data;
 
-struct sk_filter
-{
+struct sk_filter {
        atomic_t                refcnt;
-       unsigned int            len;    /* Number of filter blocks */
+       u32                     jited:1,        /* Is our filter JIT'ed? */
+                               len:31;         /* Number of filter blocks */
+       struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
        struct rcu_head         rcu;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
-                                           const struct sock_filter *filter);
+                                           const struct sock_filter_int *filter);
        union {
-               struct sock_filter      insns[0];
+               struct sock_filter      insns[0];
+               struct sock_filter_int  insnsi[0];
                struct work_struct      work;
        };
 };
@@ -41,25 +94,44 @@ static inline unsigned int sk_filter_size(unsigned int proglen)
                   offsetof(struct sk_filter, insns[proglen]));
 }
 
-extern int sk_filter(struct sock *sk, struct sk_buff *skb);
-extern unsigned int sk_run_filter(const struct sk_buff *skb,
-                                 const struct sock_filter *filter);
-extern int sk_unattached_filter_create(struct sk_filter **pfp,
-                                      struct sock_fprog *fprog);
-extern void sk_unattached_filter_destroy(struct sk_filter *fp);
-extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
-extern int sk_detach_filter(struct sock *sk);
-extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
-extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
-extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
+#define sk_filter_proglen(fprog)                       \
+               (fprog->len * sizeof(fprog->filter[0]))
+
+#define SK_RUN_FILTER(filter, ctx)                     \
+               (*filter->bpf_func)(ctx, filter->insnsi)
+
+int sk_filter(struct sock *sk, struct sk_buff *skb);
+
+u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
+                             const struct sock_filter_int *insni);
+u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
+                         const struct sock_filter_int *insni);
+
+int sk_convert_filter(struct sock_filter *prog, int len,
+                     struct sock_filter_int *new_prog, int *new_len);
+
+int sk_unattached_filter_create(struct sk_filter **pfp,
+                               struct sock_fprog *fprog);
+void sk_unattached_filter_destroy(struct sk_filter *fp);
+
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int sk_detach_filter(struct sock *sk);
+
+int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
+int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
+                 unsigned int len);
+void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
+
+void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
+void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 #ifdef CONFIG_BPF_JIT
 #include <stdarg.h>
 #include <linux/linkage.h>
 #include <linux/printk.h>
 
-extern void bpf_jit_compile(struct sk_filter *fp);
-extern void bpf_jit_free(struct sk_filter *fp);
+void bpf_jit_compile(struct sk_filter *fp);
+void bpf_jit_free(struct sk_filter *fp);
 
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                                u32 pass, void *image)
@@ -70,7 +142,6 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
                               16, 1, image, proglen, false);
 }
-#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
 #else
 #include <linux/slab.h>
 static inline void bpf_jit_compile(struct sk_filter *fp)
@@ -80,7 +151,6 @@ static inline void bpf_jit_free(struct sk_filter *fp)
 {
        kfree(fp);
 }
-#define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
 #endif
 
 static inline int bpf_tell_extensions(void)
index e526a8cecb70a90533843bc26c90eaa2a7e23e55..f194ccb8539c9b7d95af35b00a426ce2a06b61a6 100644 (file)
@@ -154,6 +154,10 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
    802.11e clarifies the figure in section 7.1.2. The frame body is
    up to 2304 octets long (maximum MSDU size) plus any crypt overhead. */
 #define IEEE80211_MAX_DATA_LEN         2304
+/* 802.11ad extends maximum MSDU size for DMG (freq > 40Ghz) networks
+ * to 7920 bytes, see 8.2.3 General frame format
+ */
+#define IEEE80211_MAX_DATA_LEN_DMG     7920
 /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
 #define IEEE80211_MAX_FRAME_LEN                2352
 
@@ -596,6 +600,20 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc)
               cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
 }
 
+/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @fc: frame control field in little-endian byteorder
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+{
+       /* IEEE 802.11-2012, definition of "bufferable management frame";
+        * note that this ignores the IBSS special case. */
+       return ieee80211_is_mgmt(fc) &&
+              (ieee80211_is_action(fc) ||
+               ieee80211_is_disassoc(fc) ||
+               ieee80211_is_deauth(fc));
+}
+
 /**
  * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
  * @seq_ctrl: frame sequence control bytes in little-endian byteorder
@@ -1636,51 +1654,22 @@ enum ieee80211_reasoncode {
 enum ieee80211_eid {
        WLAN_EID_SSID = 0,
        WLAN_EID_SUPP_RATES = 1,
-       WLAN_EID_FH_PARAMS = 2,
+       WLAN_EID_FH_PARAMS = 2, /* reserved now */
        WLAN_EID_DS_PARAMS = 3,
        WLAN_EID_CF_PARAMS = 4,
        WLAN_EID_TIM = 5,
        WLAN_EID_IBSS_PARAMS = 6,
-       WLAN_EID_CHALLENGE = 16,
-
        WLAN_EID_COUNTRY = 7,
        WLAN_EID_HP_PARAMS = 8,
        WLAN_EID_HP_TABLE = 9,
        WLAN_EID_REQUEST = 10,
-
        WLAN_EID_QBSS_LOAD = 11,
        WLAN_EID_EDCA_PARAM_SET = 12,
        WLAN_EID_TSPEC = 13,
        WLAN_EID_TCLAS = 14,
        WLAN_EID_SCHEDULE = 15,
-       WLAN_EID_TS_DELAY = 43,
-       WLAN_EID_TCLAS_PROCESSING = 44,
-       WLAN_EID_QOS_CAPA = 46,
-       /* 802.11z */
-       WLAN_EID_LINK_ID = 101,
-       /* 802.11s */
-       WLAN_EID_MESH_CONFIG = 113,
-       WLAN_EID_MESH_ID = 114,
-       WLAN_EID_LINK_METRIC_REPORT = 115,
-       WLAN_EID_CONGESTION_NOTIFICATION = 116,
-       WLAN_EID_PEER_MGMT = 117,
-       WLAN_EID_CHAN_SWITCH_PARAM = 118,
-       WLAN_EID_MESH_AWAKE_WINDOW = 119,
-       WLAN_EID_BEACON_TIMING = 120,
-       WLAN_EID_MCCAOP_SETUP_REQ = 121,
-       WLAN_EID_MCCAOP_SETUP_RESP = 122,
-       WLAN_EID_MCCAOP_ADVERT = 123,
-       WLAN_EID_MCCAOP_TEARDOWN = 124,
-       WLAN_EID_GANN = 125,
-       WLAN_EID_RANN = 126,
-       WLAN_EID_PREQ = 130,
-       WLAN_EID_PREP = 131,
-       WLAN_EID_PERR = 132,
-       WLAN_EID_PXU = 137,
-       WLAN_EID_PXUC = 138,
-       WLAN_EID_AUTH_MESH_PEER_EXCH = 139,
-       WLAN_EID_MIC = 140,
-
+       WLAN_EID_CHALLENGE = 16,
+       /* 17-31 reserved for challenge text extension */
        WLAN_EID_PWR_CONSTRAINT = 32,
        WLAN_EID_PWR_CAPABILITY = 33,
        WLAN_EID_TPC_REQUEST = 34,
@@ -1691,66 +1680,114 @@ enum ieee80211_eid {
        WLAN_EID_MEASURE_REPORT = 39,
        WLAN_EID_QUIET = 40,
        WLAN_EID_IBSS_DFS = 41,
-
        WLAN_EID_ERP_INFO = 42,
-       WLAN_EID_EXT_SUPP_RATES = 50,
-
+       WLAN_EID_TS_DELAY = 43,
+       WLAN_EID_TCLAS_PROCESSING = 44,
        WLAN_EID_HT_CAPABILITY = 45,
-       WLAN_EID_HT_OPERATION = 61,
-       WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62,
-
+       WLAN_EID_QOS_CAPA = 46,
+       /* 47 reserved for Broadcom */
        WLAN_EID_RSN = 48,
-       WLAN_EID_MMIE = 76,
-       WLAN_EID_VENDOR_SPECIFIC = 221,
-       WLAN_EID_QOS_PARAMETER = 222,
-
+       WLAN_EID_802_15_COEX = 49,
+       WLAN_EID_EXT_SUPP_RATES = 50,
        WLAN_EID_AP_CHAN_REPORT = 51,
        WLAN_EID_NEIGHBOR_REPORT = 52,
        WLAN_EID_RCPI = 53,
+       WLAN_EID_MOBILITY_DOMAIN = 54,
+       WLAN_EID_FAST_BSS_TRANSITION = 55,
+       WLAN_EID_TIMEOUT_INTERVAL = 56,
+       WLAN_EID_RIC_DATA = 57,
+       WLAN_EID_DSE_REGISTERED_LOCATION = 58,
+       WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
+       WLAN_EID_EXT_CHANSWITCH_ANN = 60,
+       WLAN_EID_HT_OPERATION = 61,
+       WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62,
        WLAN_EID_BSS_AVG_ACCESS_DELAY = 63,
        WLAN_EID_ANTENNA_INFO = 64,
        WLAN_EID_RSNI = 65,
        WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66,
        WLAN_EID_BSS_AVAILABLE_CAPACITY = 67,
        WLAN_EID_BSS_AC_ACCESS_DELAY = 68,
+       WLAN_EID_TIME_ADVERTISEMENT = 69,
        WLAN_EID_RRM_ENABLED_CAPABILITIES = 70,
        WLAN_EID_MULTIPLE_BSSID = 71,
        WLAN_EID_BSS_COEX_2040 = 72,
        WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74,
-       WLAN_EID_EXT_CAPABILITY = 127,
-
-       WLAN_EID_MOBILITY_DOMAIN = 54,
-       WLAN_EID_FAST_BSS_TRANSITION = 55,
-       WLAN_EID_TIMEOUT_INTERVAL = 56,
-       WLAN_EID_RIC_DATA = 57,
        WLAN_EID_RIC_DESCRIPTOR = 75,
-
-       WLAN_EID_DSE_REGISTERED_LOCATION = 58,
-       WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
-       WLAN_EID_EXT_CHANSWITCH_ANN = 60,
-
-       WLAN_EID_VHT_CAPABILITY = 191,
-       WLAN_EID_VHT_OPERATION = 192,
-       WLAN_EID_OPMODE_NOTIF = 199,
-       WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
-       WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
-       WLAN_EID_EXTENDED_BSS_LOAD = 193,
-       WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
-       WLAN_EID_AID = 197,
-       WLAN_EID_QUIET_CHANNEL = 198,
-
-       /* 802.11ad */
+       WLAN_EID_MMIE = 76,
+       WLAN_EID_ASSOC_COMEBACK_TIME = 77,
+       WLAN_EID_EVENT_REQUEST = 78,
+       WLAN_EID_EVENT_REPORT = 79,
+       WLAN_EID_DIAGNOSTIC_REQUEST = 80,
+       WLAN_EID_DIAGNOSTIC_REPORT = 81,
+       WLAN_EID_LOCATION_PARAMS = 82,
        WLAN_EID_NON_TX_BSSID_CAP =  83,
+       WLAN_EID_SSID_LIST = 84,
+       WLAN_EID_MULTI_BSSID_IDX = 85,
+       WLAN_EID_FMS_DESCRIPTOR = 86,
+       WLAN_EID_FMS_REQUEST = 87,
+       WLAN_EID_FMS_RESPONSE = 88,
+       WLAN_EID_QOS_TRAFFIC_CAPA = 89,
+       WLAN_EID_BSS_MAX_IDLE_PERIOD = 90,
+       WLAN_EID_TSF_REQUEST = 91,
+       WLAN_EID_TSF_RESPOSNE = 92,
+       WLAN_EID_WNM_SLEEP_MODE = 93,
+       WLAN_EID_TIM_BCAST_REQ = 94,
+       WLAN_EID_TIM_BCAST_RESP = 95,
+       WLAN_EID_COLL_IF_REPORT = 96,
+       WLAN_EID_CHANNEL_USAGE = 97,
+       WLAN_EID_TIME_ZONE = 98,
+       WLAN_EID_DMS_REQUEST = 99,
+       WLAN_EID_DMS_RESPONSE = 100,
+       WLAN_EID_LINK_ID = 101,
+       WLAN_EID_WAKEUP_SCHEDUL = 102,
+       /* 103 reserved */
+       WLAN_EID_CHAN_SWITCH_TIMING = 104,
+       WLAN_EID_PTI_CONTROL = 105,
+       WLAN_EID_PU_BUFFER_STATUS = 106,
+       WLAN_EID_INTERWORKING = 107,
+       WLAN_EID_ADVERTISEMENT_PROTOCOL = 108,
+       WLAN_EID_EXPEDITED_BW_REQ = 109,
+       WLAN_EID_QOS_MAP_SET = 110,
+       WLAN_EID_ROAMING_CONSORTIUM = 111,
+       WLAN_EID_EMERGENCY_ALERT = 112,
+       WLAN_EID_MESH_CONFIG = 113,
+       WLAN_EID_MESH_ID = 114,
+       WLAN_EID_LINK_METRIC_REPORT = 115,
+       WLAN_EID_CONGESTION_NOTIFICATION = 116,
+       WLAN_EID_PEER_MGMT = 117,
+       WLAN_EID_CHAN_SWITCH_PARAM = 118,
+       WLAN_EID_MESH_AWAKE_WINDOW = 119,
+       WLAN_EID_BEACON_TIMING = 120,
+       WLAN_EID_MCCAOP_SETUP_REQ = 121,
+       WLAN_EID_MCCAOP_SETUP_RESP = 122,
+       WLAN_EID_MCCAOP_ADVERT = 123,
+       WLAN_EID_MCCAOP_TEARDOWN = 124,
+       WLAN_EID_GANN = 125,
+       WLAN_EID_RANN = 126,
+       WLAN_EID_EXT_CAPABILITY = 127,
+       /* 128, 129 reserved for Agere */
+       WLAN_EID_PREQ = 130,
+       WLAN_EID_PREP = 131,
+       WLAN_EID_PERR = 132,
+       /* 133-136 reserved for Cisco */
+       WLAN_EID_PXU = 137,
+       WLAN_EID_PXUC = 138,
+       WLAN_EID_AUTH_MESH_PEER_EXCH = 139,
+       WLAN_EID_MIC = 140,
+       WLAN_EID_DESTINATION_URI = 141,
+       WLAN_EID_UAPSD_COEX = 142,
        WLAN_EID_WAKEUP_SCHEDULE = 143,
        WLAN_EID_EXT_SCHEDULE = 144,
        WLAN_EID_STA_AVAILABILITY = 145,
        WLAN_EID_DMG_TSPEC = 146,
        WLAN_EID_DMG_AT = 147,
        WLAN_EID_DMG_CAP = 148,
+       /* 149-150 reserved for Cisco */
        WLAN_EID_DMG_OPERATION = 151,
        WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
        WLAN_EID_DMG_BEAM_REFINEMENT = 153,
        WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154,
+       /* 155-156 reserved for Cisco */
        WLAN_EID_AWAKE_WINDOW = 157,
        WLAN_EID_MULTI_BAND = 158,
        WLAN_EID_ADDBA_EXT = 159,
@@ -1767,11 +1804,34 @@ enum ieee80211_eid {
        WLAN_EID_MULTIPLE_MAC_ADDR = 170,
        WLAN_EID_U_PID = 171,
        WLAN_EID_DMG_LINK_ADAPT_ACK = 172,
+       /* 173 reserved for Symbol */
+       WLAN_EID_MCCAOP_ADV_OVERVIEW = 174,
        WLAN_EID_QUIET_PERIOD_REQ = 175,
+       /* 176 reserved for Symbol */
        WLAN_EID_QUIET_PERIOD_RESP = 177,
+       /* 178-179 reserved for Symbol */
+       /* 180 reserved for ISO/IEC 20011 */
        WLAN_EID_EPAC_POLICY = 182,
        WLAN_EID_CLISTER_TIME_OFF = 183,
+       WLAN_EID_INTER_AC_PRIO = 184,
+       WLAN_EID_SCS_DESCRIPTOR = 185,
+       WLAN_EID_QLOAD_REPORT = 186,
+       WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187,
+       WLAN_EID_HL_STREAM_ID = 188,
+       WLAN_EID_GCR_GROUP_ADDR = 189,
        WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190,
+       WLAN_EID_VHT_CAPABILITY = 191,
+       WLAN_EID_VHT_OPERATION = 192,
+       WLAN_EID_EXTENDED_BSS_LOAD = 193,
+       WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
+       WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+       WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
+       WLAN_EID_AID = 197,
+       WLAN_EID_QUIET_CHANNEL = 198,
+       WLAN_EID_OPMODE_NOTIF = 199,
+
+       WLAN_EID_VENDOR_SPECIFIC = 221,
+       WLAN_EID_QOS_PARAMETER = 222,
 };
 
 /* Action category code */
@@ -2192,10 +2252,10 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
 }
 
 /**
- * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
+ * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
  * @hdr: the frame (buffer must include at least the first octet of payload)
  */
-static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
+static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
 {
        if (ieee80211_is_disassoc(hdr->frame_control) ||
            ieee80211_is_deauth(hdr->frame_control))
@@ -2223,6 +2283,17 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
        return false;
 }
 
+/**
+ * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
+ * @skb: the skb containing the frame, length will be checked
+ */
+static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
+{
+       if (skb->len < 25)
+               return false;
+       return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
+}
+
 /**
  * ieee80211_is_public_action - check if frame is a public action frame
  * @hdr: the frame
@@ -2240,42 +2311,6 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
        return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
 }
 
-/**
- * ieee80211_dsss_chan_to_freq - get channel center frequency
- * @channel: the DSSS channel
- *
- * Convert IEEE802.11 DSSS channel to the center frequency (MHz).
- * Ref IEEE 802.11-2007 section 15.6
- */
-static inline int ieee80211_dsss_chan_to_freq(int channel)
-{
-       if ((channel > 0) && (channel < 14))
-               return 2407 + (channel * 5);
-       else if (channel == 14)
-               return 2484;
-       else
-               return -1;
-}
-
-/**
- * ieee80211_freq_to_dsss_chan - get channel
- * @freq: the frequency
- *
- * Convert frequency (MHz) to IEEE802.11 DSSS channel
- * Ref IEEE 802.11-2007 section 15.6
- *
- * This routine selects the channel with the closest center frequency.
- */
-static inline int ieee80211_freq_to_dsss_chan(int freq)
-{
-       if ((freq >= 2410) && (freq < 2475))
-               return (freq - 2405) / 5;
-       else if ((freq >= 2482) && (freq < 2487))
-               return 14;
-       else
-               return -1;
-}
-
 /**
  * ieee80211_tu_to_usec - convert time units (TU) to microseconds
  * @tu: the TUs
index bbedfb56bd66074545503f12e161ee2cc640eb58..13bbbde00e68de454c8cf30f0581796d55eb0a40 100644 (file)
@@ -110,6 +110,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
                                               __be16 vlan_proto, u16 vlan_id);
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
 
 /**
  *     struct vlan_priority_tci_mapping - vlan egress priority mappings
@@ -216,6 +217,12 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
        return 0;
 }
 
+static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
+{
+       BUG();
+       return 0;
+}
+
 static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
                                               u32 skprio)
 {
@@ -288,7 +295,7 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
        struct vlan_ethhdr *veth;
 
        if (skb_cow_head(skb, VLAN_HLEN) < 0) {
-               kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NULL;
        }
        veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
index d5f62bc5f4beb8499983d3c3a682b9c468c74b9e..8e10f57f109f5a9eff472fe7a90dbce48e4a20dc 100644 (file)
@@ -180,9 +180,8 @@ struct ippp_struct {
   struct slcompress *slcomp;
 #endif
 #ifdef CONFIG_IPPP_FILTER
-  struct sock_filter *pass_filter;     /* filter for packets to pass */
-  struct sock_filter *active_filter;   /* filter for pkts to reset idle */
-  unsigned pass_len, active_len;
+  struct sk_filter *pass_filter;   /* filter for packets to pass */
+  struct sk_filter *active_filter; /* filter for pkts to reset idle */
 #endif
   unsigned long debug;
   struct isdn_ppp_compressor *compressor,*decompressor;
index 3737f7218f51362ea9cf1991a66c0328143741b8..7bb6148d990fbdd89303d6edd5e5491c02095f39 100644 (file)
@@ -23,6 +23,7 @@
 #define TEMP_MINOR             131     /* Temperature Sensor */
 #define RTC_MINOR              135
 #define EFI_RTC_MINOR          136     /* EFI Time services */
+#define VHCI_MINOR             137
 #define SUN_OPENPROM_MINOR     139
 #define DMAPI_MINOR            140     /* DMAPI */
 #define NVRAM_MINOR            144
index 79a34723816895e59c794872f94d03893920c15b..c8450366c13019fe0ec0343487cd5ef6a8e2855f 100644 (file)
@@ -125,6 +125,7 @@ enum {
        /* miscellaneous commands */
        MLX4_CMD_DIAG_RPRT       = 0x30,
        MLX4_CMD_NOP             = 0x31,
+       MLX4_CMD_CONFIG_DEV      = 0x3a,
        MLX4_CMD_ACCESS_MEM      = 0x2e,
        MLX4_CMD_SET_VEP         = 0x52,
 
@@ -240,6 +241,13 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
+/*
+ * mlx4_get_slave_default_vlan -
+ * return true if VST ( default vlan)
+ * if VST, will return vlan & qos (if not NULL)
+ */
+bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
+                                u16 *vlan, u8 *qos);
 
 #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
 
index 5edd2c68274dd449f34f557296ce789e38578dad..ba87bd21295a533c8d6941bc4c11d29bceba0112 100644 (file)
@@ -48,6 +48,9 @@
 #define MSIX_LEGACY_SZ         4
 #define MIN_MSIX_P_PORT                5
 
+#define MLX4_ROCE_MAX_GIDS     128
+#define MLX4_ROCE_PF_GIDS      16
+
 enum {
        MLX4_FLAG_MSI_X         = 1 << 0,
        MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
@@ -81,6 +84,7 @@ enum {
 enum {
        MLX4_MAX_NUM_PF         = 16,
        MLX4_MAX_NUM_VF         = 64,
+       MLX4_MAX_NUM_VF_P_PORT  = 64,
        MLX4_MFUNC_MAX          = 80,
        MLX4_MAX_EQ_NUM         = 1024,
        MLX4_MFUNC_EQ_NUM       = 4,
@@ -629,7 +633,8 @@ struct mlx4_eth_av {
        u8              hop_limit;
        __be32          sl_tclass_flowlabel;
        u8              dgid[16];
-       u32             reserved4[2];
+       u8              s_mac[6];
+       u8              reserved4[2];
        __be16          vlan;
        u8              mac[ETH_ALEN];
 };
@@ -660,6 +665,11 @@ struct mlx4_quotas {
        int xrcd;
 };
 
+struct mlx4_vf_dev {
+       u8                      min_port;
+       u8                      n_ports;
+};
+
 struct mlx4_dev {
        struct pci_dev         *pdev;
        unsigned long           flags;
@@ -675,6 +685,7 @@ struct mlx4_dev {
        int                     oper_log_mgm_entry_size;
        u64                     regid_promisc_array[MLX4_MAX_PORTS + 1];
        u64                     regid_allmulti_array[MLX4_MAX_PORTS + 1];
+       struct mlx4_vf_dev     *dev_vfs;
 };
 
 struct mlx4_eqe {
@@ -1131,7 +1142,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
                u8 *pg, u16 *ratelimit);
-int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering);
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
@@ -1183,9 +1194,44 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int
 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
 
+int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
+                                int *slave_id);
+int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
+                                u8 *gid);
+
 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
                                      u32 max_range_qpn);
 
 cycle_t mlx4_read_clock(struct mlx4_dev *dev);
 
+struct mlx4_active_ports {
+       DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
+};
+/* Returns a bitmap of the physical ports which are assigned to slave */
+struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave);
+
+/* Returns the physical port that represents the virtual port of the slave, */
+/* or a value < 0 in case of an error. If a slave has 2 ports, the identity */
+/* mapping is returned.                                                            */
+int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port);
+
+struct mlx4_slaves_pport {
+       DECLARE_BITMAP(slaves, MLX4_MFUNC_MAX);
+};
+/* Returns a bitmap of all slaves that are assigned to port. */
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
+                                                  int port);
+
+/* Returns a bitmap of all slaves that are assigned exactly to all the */
+/* the ports that are set in crit_ports.                              */
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
+               struct mlx4_dev *dev,
+               const struct mlx4_active_ports *crit_ports);
+
+/* Returns the slave's virtual port that represents the physical port. */
+int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
+
+int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
+
+int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
 #endif /* MLX4_DEVICE_H */
index c257e1b211be813989d1948906a637e358b0910c..022055c8fb2649456b19197f8417f8011ee2dc16 100644 (file)
@@ -64,4 +64,16 @@ void mlx4_unregister_interface(struct mlx4_interface *intf);
 
 void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
 
+static inline u64 mlx4_mac_to_u64(u8 *addr)
+{
+       u64 mac = 0;
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++) {
+               mac <<= 8;
+               mac |= addr[i];
+       }
+       return mac;
+}
+
 #endif /* MLX4_DRIVER_H */
index 59f8ba84568bef4a4e17ed21240c163d6953a912..b66e7610d4eec9f4d67e5f8bbd745bb6cbd3c99a 100644 (file)
@@ -270,9 +270,14 @@ enum {
 
 struct mlx4_wqe_ctrl_seg {
        __be32                  owner_opcode;
-       __be16                  vlan_tag;
-       u8                      ins_vlan;
-       u8                      fence_size;
+       union {
+               struct {
+                       __be16                  vlan_tag;
+                       u8                      ins_vlan;
+                       u8                      fence_size;
+               };
+               __be32                  bf_qpn;
+       };
        /*
         * High 24 bits are SRC remote buffer; low 8 bits are flags:
         * [7]   SO (strong ordering)
index d8836623f36afcbd0aa88949c27a9f3c7000fa38..0f01fe065424d0fcddd53acec02697d89d1ba3d7 100644 (file)
@@ -31,6 +31,7 @@
 #define SDIO_DEVICE_ID_BROADCOM_4334           0x4334
 #define SDIO_DEVICE_ID_BROADCOM_4335_4339      0x4335
 #define SDIO_DEVICE_ID_BROADCOM_43362          43362
+#define SDIO_DEVICE_ID_BROADCOM_4354           0x4354
 
 #define SDIO_VENDOR_ID_INTEL                   0x0089
 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX     0x1402
diff --git a/include/linux/mpls.h b/include/linux/mpls.h
new file mode 100644 (file)
index 0000000..9999145
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_H
+#define _LINUX_MPLS_H
+
+#include <uapi/linux/mpls.h>
+
+#endif  /* _LINUX_MPLS_H */
index daafd9561cbca2335c1f0a3f20cd593e30fa5206..775cc956ff7874c5949ec707357341137c6fdb1f 100644 (file)
@@ -63,13 +63,6 @@ struct wireless_dev;
 void netdev_set_default_ethtool_ops(struct net_device *dev,
                                    const struct ethtool_ops *ops);
 
-/* hardware address assignment types */
-#define NET_ADDR_PERM          0       /* address is permanent (default) */
-#define NET_ADDR_RANDOM                1       /* address is generated randomly */
-#define NET_ADDR_STOLEN                2       /* address is stolen from other device */
-#define NET_ADDR_SET           3       /* address is set using
-                                        * dev_set_mac_address() */
-
 /* Backlog congestion levels */
 #define NET_RX_SUCCESS         0       /* keep 'em coming, baby */
 #define NET_RX_DROP            1       /* packet dropped */
@@ -1037,8 +1030,7 @@ struct net_device_ops {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        void                    (*ndo_poll_controller)(struct net_device *dev);
        int                     (*ndo_netpoll_setup)(struct net_device *dev,
-                                                    struct netpoll_info *info,
-                                                    gfp_t gfp);
+                                                    struct netpoll_info *info);
        void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
 #endif
 #ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1147,6 +1139,89 @@ struct net_device_ops {
                                                        void *priv);
 };
 
+/**
+ * enum net_device_priv_flags - &struct net_device priv_flags
+ *
+ * These are the &struct net_device, they are only set internally
+ * by drivers and used in the kernel. These flags are invisible to
+ * userspace, this means that the order of these flags can change
+ * during any kernel release.
+ *
+ * You should have a pretty good reason to be extending these flags.
+ *
+ * @IFF_802_1Q_VLAN: 802.1Q VLAN device
+ * @IFF_EBRIDGE: Ethernet bridging device
+ * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
+ * @IFF_MASTER_8023AD: bonding master, 802.3ad
+ * @IFF_MASTER_ALB: bonding master, balance-alb
+ * @IFF_BONDING: bonding master or slave
+ * @IFF_SLAVE_NEEDARP: need ARPs for validation
+ * @IFF_ISATAP: ISATAP interface (RFC4214)
+ * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
+ * @IFF_WAN_HDLC: WAN HDLC device
+ * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
+ *     release skb->dst
+ * @IFF_DONT_BRIDGE: disallow bridging this ether dev
+ * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
+ * @IFF_MACVLAN_PORT: device used as macvlan port
+ * @IFF_BRIDGE_PORT: device used as bridge port
+ * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
+ * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
+ * @IFF_UNICAST_FLT: Supports unicast filtering
+ * @IFF_TEAM_PORT: device used as team port
+ * @IFF_SUPP_NOFCS: device supports sending custom FCS
+ * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
+ *     change when it's running
+ * @IFF_MACVLAN: Macvlan device
+ */
+enum netdev_priv_flags {
+       IFF_802_1Q_VLAN                 = 1<<0,
+       IFF_EBRIDGE                     = 1<<1,
+       IFF_SLAVE_INACTIVE              = 1<<2,
+       IFF_MASTER_8023AD               = 1<<3,
+       IFF_MASTER_ALB                  = 1<<4,
+       IFF_BONDING                     = 1<<5,
+       IFF_SLAVE_NEEDARP               = 1<<6,
+       IFF_ISATAP                      = 1<<7,
+       IFF_MASTER_ARPMON               = 1<<8,
+       IFF_WAN_HDLC                    = 1<<9,
+       IFF_XMIT_DST_RELEASE            = 1<<10,
+       IFF_DONT_BRIDGE                 = 1<<11,
+       IFF_DISABLE_NETPOLL             = 1<<12,
+       IFF_MACVLAN_PORT                = 1<<13,
+       IFF_BRIDGE_PORT                 = 1<<14,
+       IFF_OVS_DATAPATH                = 1<<15,
+       IFF_TX_SKB_SHARING              = 1<<16,
+       IFF_UNICAST_FLT                 = 1<<17,
+       IFF_TEAM_PORT                   = 1<<18,
+       IFF_SUPP_NOFCS                  = 1<<19,
+       IFF_LIVE_ADDR_CHANGE            = 1<<20,
+       IFF_MACVLAN                     = 1<<21,
+};
+
+#define IFF_802_1Q_VLAN                        IFF_802_1Q_VLAN
+#define IFF_EBRIDGE                    IFF_EBRIDGE
+#define IFF_SLAVE_INACTIVE             IFF_SLAVE_INACTIVE
+#define IFF_MASTER_8023AD              IFF_MASTER_8023AD
+#define IFF_MASTER_ALB                 IFF_MASTER_ALB
+#define IFF_BONDING                    IFF_BONDING
+#define IFF_SLAVE_NEEDARP              IFF_SLAVE_NEEDARP
+#define IFF_ISATAP                     IFF_ISATAP
+#define IFF_MASTER_ARPMON              IFF_MASTER_ARPMON
+#define IFF_WAN_HDLC                   IFF_WAN_HDLC
+#define IFF_XMIT_DST_RELEASE           IFF_XMIT_DST_RELEASE
+#define IFF_DONT_BRIDGE                        IFF_DONT_BRIDGE
+#define IFF_DISABLE_NETPOLL            IFF_DISABLE_NETPOLL
+#define IFF_MACVLAN_PORT               IFF_MACVLAN_PORT
+#define IFF_BRIDGE_PORT                        IFF_BRIDGE_PORT
+#define IFF_OVS_DATAPATH               IFF_OVS_DATAPATH
+#define IFF_TX_SKB_SHARING             IFF_TX_SKB_SHARING
+#define IFF_UNICAST_FLT                        IFF_UNICAST_FLT
+#define IFF_TEAM_PORT                  IFF_TEAM_PORT
+#define IFF_SUPP_NOFCS                 IFF_SUPP_NOFCS
+#define IFF_LIVE_ADDR_CHANGE           IFF_LIVE_ADDR_CHANGE
+#define IFF_MACVLAN                    IFF_MACVLAN
+
 /*
  *     The DEVICE structure.
  *     Actually, this whole structure is a big mistake.  It mixes I/O
@@ -1228,9 +1303,13 @@ struct net_device {
        int                     iflink;
 
        struct net_device_stats stats;
-       atomic_long_t           rx_dropped; /* dropped packets by core network
-                                            * Do not use this in drivers.
-                                            */
+
+       /* dropped packets by core network, Do not use this in drivers */
+       atomic_long_t           rx_dropped;
+       atomic_long_t           tx_dropped;
+
+       /* Stats to monitor carrier on<->off transitions */
+       atomic_t                carrier_changes;
 
 #ifdef CONFIG_WIRELESS_EXT
        /* List of functions to handle Wireless Extensions (instead of ioctl).
@@ -1279,6 +1358,10 @@ struct net_device {
                                                 * that share the same link
                                                 * layer address
                                                 */
+       unsigned short          dev_port;       /* Used to differentiate
+                                                * devices that share the same
+                                                * function
+                                                */
        spinlock_t              addr_list_lock;
        struct netdev_hw_addr_list      uc;     /* Unicast mac addresses */
        struct netdev_hw_addr_list      mc;     /* Multicast mac addresses */
@@ -1316,13 +1399,7 @@ struct net_device {
 /*
  * Cache lines mostly used on receive path (including eth_type_trans())
  */
-       unsigned long           last_rx;        /* Time of last Rx
-                                                * This should not be set in
-                                                * drivers, unless really needed,
-                                                * because network stack (bonding)
-                                                * use it if/when necessary, to
-                                                * avoid dirtying this cache line.
-                                                */
+       unsigned long           last_rx;        /* Time of last Rx */
 
        /* Interface address info used in eth_type_trans() */
        unsigned char           *dev_addr;      /* hw address, (before bcast
@@ -1729,6 +1806,20 @@ struct pcpu_sw_netstats {
        struct u64_stats_sync   syncp;
 };
 
+#define netdev_alloc_pcpu_stats(type)                          \
+({                                                             \
+       typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
+       if (pcpu_stats) {                                       \
+               int i;                                          \
+               for_each_possible_cpu(i) {                      \
+                       typeof(type) *stat;                     \
+                       stat = per_cpu_ptr(pcpu_stats, i);      \
+                       u64_stats_init(&stat->syncp);           \
+               }                                               \
+       }                                                       \
+       pcpu_stats;                                             \
+})
+
 #include <linux/notifier.h>
 
 /* netdevice notifier chain. Please remember to update the rtnetlink
@@ -1884,9 +1975,6 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
 int netdev_get_name(struct net *net, char *name, int ifindex);
 int dev_restart(struct net_device *dev);
-#ifdef CONFIG_NETPOLL_TRAP
-int netpoll_trap(void);
-#endif
 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 
 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
@@ -1926,11 +2014,6 @@ static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
        return skb->data + offset;
 }
 
-static inline void *skb_gro_mac_header(struct sk_buff *skb)
-{
-       return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
-}
-
 static inline void *skb_gro_network_header(struct sk_buff *skb)
 {
        return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
@@ -2091,12 +2174,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
 
 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
 {
-#ifdef CONFIG_NETPOLL_TRAP
-       if (netpoll_trap()) {
-               netif_tx_start_queue(dev_queue);
-               return;
-       }
-#endif
        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
                __netif_schedule(dev_queue->qdisc);
 }
@@ -2340,10 +2417,6 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
 {
        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
-#ifdef CONFIG_NETPOLL_TRAP
-       if (netpoll_trap())
-               return;
-#endif
        netif_tx_stop_queue(txq);
 }
 
@@ -2378,10 +2451,6 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 {
        struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
-#ifdef CONFIG_NETPOLL_TRAP
-       if (netpoll_trap())
-               return;
-#endif
        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
                __netif_schedule(txq->qdisc);
 }
@@ -2551,6 +2620,7 @@ int dev_get_phys_port_id(struct net_device *dev,
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                        struct netdev_queue *txq);
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
 
 extern int             netdev_budget;
 
@@ -2831,6 +2901,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
        }                                               \
 }
 
+#define HARD_TX_TRYLOCK(dev, txq)                      \
+       (((dev->features & NETIF_F_LLTX) == 0) ?        \
+               __netif_tx_trylock(txq) :               \
+               true )
+
 #define HARD_TX_UNLOCK(dev, txq) {                     \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
                __netif_tx_unlock(txq);                 \
index 0c7d01eae56cf8626d2d1fc55242b3cbae0ba1ad..96afc29184bee810a1ec550933cfb15998f42efb 100644 (file)
@@ -39,11 +39,13 @@ enum ip_set_feature {
        IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
        IPSET_TYPE_IFACE_FLAG = 5,
        IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
-       IPSET_TYPE_NOMATCH_FLAG = 6,
+       IPSET_TYPE_MARK_FLAG = 6,
+       IPSET_TYPE_MARK = (1 << IPSET_TYPE_MARK_FLAG),
+       IPSET_TYPE_NOMATCH_FLAG = 7,
        IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
        /* Strictly speaking not a feature, but a flag for dumping:
         * this settype must be dumped last */
-       IPSET_DUMP_LAST_FLAG = 7,
+       IPSET_DUMP_LAST_FLAG = 8,
        IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
 };
 
@@ -63,6 +65,7 @@ enum ip_set_extension {
 #define SET_WITH_TIMEOUT(s)    ((s)->extensions & IPSET_EXT_TIMEOUT)
 #define SET_WITH_COUNTER(s)    ((s)->extensions & IPSET_EXT_COUNTER)
 #define SET_WITH_COMMENT(s)    ((s)->extensions & IPSET_EXT_COMMENT)
+#define SET_WITH_FORCEADD(s)   ((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
 
 /* Extension id, in size order */
 enum ip_set_ext_id {
@@ -171,8 +174,6 @@ struct ip_set_type {
        char name[IPSET_MAXNAMELEN];
        /* Protocol version */
        u8 protocol;
-       /* Set features to control swapping */
-       u8 features;
        /* Set type dimension */
        u8 dimension;
        /*
@@ -182,6 +183,8 @@ struct ip_set_type {
        u8 family;
        /* Type revisions */
        u8 revision_min, revision_max;
+       /* Set features to control swapping */
+       u16 features;
 
        /* Create set */
        int (*create)(struct net *net, struct ip_set *set,
@@ -217,6 +220,8 @@ struct ip_set {
        u8 revision;
        /* Extensions */
        u8 extensions;
+       /* Create flags */
+       u8 flags;
        /* Default timeout value, if enabled */
        u32 timeout;
        /* Element data size */
@@ -251,6 +256,8 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
                cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
        if (SET_WITH_COMMENT(set))
                cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+       if (SET_WITH_FORCEADD(set))
+               cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
 
        if (!cadt_flags)
                return 0;
index 28c74367e900ac679aa5feba98b7629ea6338af3..e955d47306259c5867d80831bd78d99490849b0b 100644 (file)
@@ -44,6 +44,27 @@ int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
 
 void nfnl_lock(__u8 subsys_id);
 void nfnl_unlock(__u8 subsys_id);
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_nfnl_is_held(__u8 subsys_id);
+#else
+static inline int lockdep_nfnl_is_held(__u8 subsys_id)
+{
+       return 1;
+}
+#endif /* CONFIG_PROVE_LOCKING */
+
+/*
+ * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
+ *
+ * @p: The pointer to read, prior to dereferencing
+ * @ss: The nfnetlink subsystem ID
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds the NFNL subsystem mutex.
+ */
+#define nfnl_dereference(p, ss)                                        \
+       rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
 
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
        MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
index fbfdb9d8d3a7f59b4788bdfd1cdb26c86f0a81ca..b25ee9ffdbe67e06a5c70305360cceea3de43aef 100644 (file)
@@ -24,27 +24,20 @@ struct netpoll {
        struct net_device *dev;
        char dev_name[IFNAMSIZ];
        const char *name;
-       void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
-                           int offset, int len);
 
        union inet_addr local_ip, remote_ip;
        bool ipv6;
        u16 local_port, remote_port;
        u8 remote_mac[ETH_ALEN];
 
-       struct list_head rx; /* rx_np list element */
        struct work_struct cleanup_work;
 };
 
 struct netpoll_info {
        atomic_t refcnt;
 
-       unsigned long rx_flags;
-       spinlock_t rx_lock;
        struct semaphore dev_lock;
-       struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
 
-       struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
        struct sk_buff_head txq;
 
        struct delayed_work tx_work;
@@ -54,24 +47,21 @@ struct netpoll_info {
 };
 
 #ifdef CONFIG_NETPOLL
-extern void netpoll_rx_disable(struct net_device *dev);
-extern void netpoll_rx_enable(struct net_device *dev);
+extern void netpoll_poll_disable(struct net_device *dev);
+extern void netpoll_poll_enable(struct net_device *dev);
 #else
-static inline void netpoll_rx_disable(struct net_device *dev) { return; }
-static inline void netpoll_rx_enable(struct net_device *dev) { return; }
+static inline void netpoll_poll_disable(struct net_device *dev) { return; }
+static inline void netpoll_poll_enable(struct net_device *dev) { return; }
 #endif
 
 void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
 void netpoll_print_options(struct netpoll *np);
 int netpoll_parse_options(struct netpoll *np, char *opt);
-int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
 int netpoll_setup(struct netpoll *np);
-int netpoll_trap(void);
-void netpoll_set_trap(int trap);
 void __netpoll_cleanup(struct netpoll *np);
 void __netpoll_free_async(struct netpoll *np);
 void netpoll_cleanup(struct netpoll *np);
-int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                             struct net_device *dev);
 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
@@ -82,46 +72,7 @@ static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
        local_irq_restore(flags);
 }
 
-
-
 #ifdef CONFIG_NETPOLL
-static inline bool netpoll_rx_on(struct sk_buff *skb)
-{
-       struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
-
-       return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
-}
-
-static inline bool netpoll_rx(struct sk_buff *skb)
-{
-       struct netpoll_info *npinfo;
-       unsigned long flags;
-       bool ret = false;
-
-       local_irq_save(flags);
-
-       if (!netpoll_rx_on(skb))
-               goto out;
-
-       npinfo = rcu_dereference_bh(skb->dev->npinfo);
-       spin_lock(&npinfo->rx_lock);
-       /* check rx_flags again with the lock held */
-       if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
-               ret = true;
-       spin_unlock(&npinfo->rx_lock);
-
-out:
-       local_irq_restore(flags);
-       return ret;
-}
-
-static inline int netpoll_receive_skb(struct sk_buff *skb)
-{
-       if (!list_empty(&skb->dev->napi_list))
-               return netpoll_rx(skb);
-       return 0;
-}
-
 static inline void *netpoll_poll_lock(struct napi_struct *napi)
 {
        struct net_device *dev = napi->dev;
@@ -150,18 +101,6 @@ static inline bool netpoll_tx_running(struct net_device *dev)
 }
 
 #else
-static inline bool netpoll_rx(struct sk_buff *skb)
-{
-       return false;
-}
-static inline bool netpoll_rx_on(struct sk_buff *skb)
-{
-       return false;
-}
-static inline int netpoll_receive_skb(struct sk_buff *skb)
-{
-       return 0;
-}
 static inline void *netpoll_poll_lock(struct napi_struct *napi)
 {
        return NULL;
index fd4f2d1cdf6cbfd95a3dd9eba2c10b27eca9d062..e110b8c266f547a884bca0040dd15e3be03f1485 100644 (file)
@@ -70,6 +70,16 @@ enum {
        IEEE802154_ATTR_PHY_NAME,
        IEEE802154_ATTR_DEV_TYPE,
 
+       IEEE802154_ATTR_TXPOWER,
+       IEEE802154_ATTR_LBT_ENABLED,
+       IEEE802154_ATTR_CCA_MODE,
+       IEEE802154_ATTR_CCA_ED_LEVEL,
+       IEEE802154_ATTR_CSMA_RETRIES,
+       IEEE802154_ATTR_CSMA_MIN_BE,
+       IEEE802154_ATTR_CSMA_MAX_BE,
+
+       IEEE802154_ATTR_FRAME_RETRIES,
+
        __IEEE802154_ATTR_MAX,
 };
 
@@ -122,6 +132,8 @@ enum {
        IEEE802154_ADD_IFACE,
        IEEE802154_DEL_IFACE,
 
+       IEEE802154_SET_PHYPARAMS,
+
        __IEEE802154_CMD_MAX,
 };
 
index 565188ca328f31841a4c8b718db2c4fd08238278..24126c4b27b58e443fe3569fa7143e256466ce84 100644 (file)
@@ -74,8 +74,53 @@ typedef enum {
        PHY_INTERFACE_MODE_RTBI,
        PHY_INTERFACE_MODE_SMII,
        PHY_INTERFACE_MODE_XGMII,
+       PHY_INTERFACE_MODE_MOCA,
+       PHY_INTERFACE_MODE_MAX,
 } phy_interface_t;
 
+/**
+ * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * into the device tree binding of 'phy-mode', so that Ethernet
+ * device driver can get phy interface from device tree.
+ */
+static inline const char *phy_modes(phy_interface_t interface)
+{
+       switch (interface) {
+       case PHY_INTERFACE_MODE_NA:
+               return "";
+       case PHY_INTERFACE_MODE_MII:
+               return "mii";
+       case PHY_INTERFACE_MODE_GMII:
+               return "gmii";
+       case PHY_INTERFACE_MODE_SGMII:
+               return "sgmii";
+       case PHY_INTERFACE_MODE_TBI:
+               return "tbi";
+       case PHY_INTERFACE_MODE_REVMII:
+               return "rev-mii";
+       case PHY_INTERFACE_MODE_RMII:
+               return "rmii";
+       case PHY_INTERFACE_MODE_RGMII:
+               return "rgmii";
+       case PHY_INTERFACE_MODE_RGMII_ID:
+               return "rgmii-id";
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+               return "rgmii-rxid";
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               return "rgmii-txid";
+       case PHY_INTERFACE_MODE_RTBI:
+               return "rtbi";
+       case PHY_INTERFACE_MODE_SMII:
+               return "smii";
+       case PHY_INTERFACE_MODE_XGMII:
+               return "xgmii";
+       case PHY_INTERFACE_MODE_MOCA:
+               return "moca";
+       default:
+               return "unknown";
+       }
+}
+
 
 #define PHY_INIT_TIMEOUT       100000
 #define PHY_STATE_TIME         1
@@ -308,6 +353,7 @@ struct phy_device {
        struct phy_c45_device_ids c45_ids;
        bool is_c45;
        bool is_internal;
+       bool has_fixups;
 
        enum phy_state state;
 
@@ -393,6 +439,11 @@ struct phy_driver {
        u32 features;
        u32 flags;
 
+       /*
+        * Called to issue a PHY software reset
+        */
+       int (*soft_reset)(struct phy_device *phydev);
+
        /*
         * Called to initialize the PHY,
         * including after a reset
@@ -417,6 +468,9 @@ struct phy_driver {
         */
        int (*config_aneg)(struct phy_device *phydev);
 
+       /* Determines the auto negotiation result */
+       int (*aneg_done)(struct phy_device *phydev);
+
        /* Determines the negotiated speed and duplex */
        int (*read_status)(struct phy_device *phydev);
 
@@ -612,10 +666,12 @@ static inline int phy_read_status(struct phy_device *phydev)
 int genphy_setup_forced(struct phy_device *phydev);
 int genphy_restart_aneg(struct phy_device *phydev);
 int genphy_config_aneg(struct phy_device *phydev);
+int genphy_aneg_done(struct phy_device *phydev);
 int genphy_update_link(struct phy_device *phydev);
 int genphy_read_status(struct phy_device *phydev);
 int genphy_suspend(struct phy_device *phydev);
 int genphy_resume(struct phy_device *phydev);
+int genphy_soft_reset(struct phy_device *phydev);
 void phy_driver_unregister(struct phy_driver *drv);
 void phy_drivers_unregister(struct phy_driver *drv, int n);
 int phy_driver_register(struct phy_driver *new_driver);
index 1dc420ba213a52624920184c620441516270c6bf..6d3b0a2ef9ce851f299c03b3f83fcd5a8e4d4952 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <linux/filter.h>
-#ifdef __KERNEL__
 #include <linux/in.h>
-#else
-#include <netinet/in.h>
-#endif
 
 #define PTP_CLASS_NONE  0x00 /* not a PTP event message */
 #define PTP_CLASS_V1    0x01 /* protocol version 1 */
 #define OP_RETA        (BPF_RET | BPF_A)
 #define OP_RETK        (BPF_RET | BPF_K)
 
-static inline int ptp_filter_init(struct sock_filter *f, int len)
-{
-       if (OP_LDH == f[0].code)
-               return sk_chk_filter(f, len);
-       else
-               return 0;
-}
-
 #define PTP_FILTER \
        {OP_LDH,        0,   0, OFF_ETYPE               }, /*              */ \
        {OP_JEQ,        0,  12, ETH_P_IP                }, /* f goto L20   */ \
@@ -137,4 +125,6 @@ static inline int ptp_filter_init(struct sock_filter *f, int len)
        {OP_RETA,       0,   0, 0                       }, /*              */ \
 /*L6x*/        {OP_RETK,       0,   0, PTP_CLASS_NONE          },
 
+unsigned int ptp_classify_raw(const struct sk_buff *skb);
+
 #endif
index 38a99350832744231042de3d1d45f0fc11f3471f..0d8ff3fb84baf1778fccd81cf4caa7c4a87ac2d8 100644 (file)
@@ -49,7 +49,11 @@ struct ptp_clock_request {
  * @n_alarm:   The number of programmable alarms.
  * @n_ext_ts:  The number of external time stamp channels.
  * @n_per_out: The number of programmable periodic signals.
+ * @n_pins:    The number of programmable pins.
  * @pps:       Indicates whether the clock supports a PPS callback.
+ * @pin_config: Array of length 'n_pins'. If the number of
+ *              programmable pins is nonzero, then drivers must
+ *              allocate and initialize this array.
  *
  * clock operations
  *
@@ -70,6 +74,18 @@ struct ptp_clock_request {
  *            parameter request: Desired resource to enable or disable.
  *            parameter on: Caller passes one to enable or zero to disable.
  *
+ * @verify:   Confirm that a pin can perform a given function. The PTP
+ *            Hardware Clock subsystem maintains the 'pin_config'
+ *            array on behalf of the drivers, but the PHC subsystem
+ *            assumes that every pin can perform every function. This
+ *            hook gives drivers a way of telling the core about
+ *            limitations on specific pins. This function must return
+ *            zero if the function can be assigned to this pin, and
+ *            nonzero otherwise.
+ *            parameter pin: index of the pin in question.
+ *            parameter func: the desired function to use.
+ *            parameter chan: the function channel index to use.
+ *
  * Drivers should embed their ptp_clock_info within a private
  * structure, obtaining a reference to it using container_of().
  *
@@ -83,13 +99,17 @@ struct ptp_clock_info {
        int n_alarm;
        int n_ext_ts;
        int n_per_out;
+       int n_pins;
        int pps;
+       struct ptp_pin_desc *pin_config;
        int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
        int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
        int (*gettime)(struct ptp_clock_info *ptp, struct timespec *ts);
        int (*settime)(struct ptp_clock_info *ptp, const struct timespec *ts);
        int (*enable)(struct ptp_clock_info *ptp,
                      struct ptp_clock_request *request, int on);
+       int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
+                     enum ptp_pin_function func, unsigned int chan);
 };
 
 struct ptp_clock;
@@ -156,4 +176,17 @@ extern void ptp_clock_event(struct ptp_clock *ptp,
 
 extern int ptp_clock_index(struct ptp_clock *ptp);
 
+/**
+ * ptp_find_pin() - obtain the pin index of a given auxiliary function
+ *
+ * @ptp:    The clock obtained from ptp_clock_register().
+ * @func:   One of the ptp_pin_function enumerated values.
+ * @chan:   The particular functional channel to find.
+ * Return:  Pin index in the range of zero to ptp_clock_caps.n_pins - 1,
+ *          or -1 if the auxiliary function cannot be found.
+ */
+
+int ptp_find_pin(struct ptp_clock *ptp,
+                enum ptp_pin_function func, unsigned int chan);
+
 #endif
index 6f19cfd1840e4adea37b84dea76175361d3187bc..4054b0994071cada2d95bdbe0964a3b22cfe2590 100644 (file)
@@ -76,7 +76,6 @@ static inline int seccomp_mode(struct seccomp *s)
 #ifdef CONFIG_SECCOMP_FILTER
 extern void put_seccomp_filter(struct task_struct *tsk);
 extern void get_seccomp_filter(struct task_struct *tsk);
-extern u32 seccomp_bpf_load(int off);
 #else  /* CONFIG_SECCOMP_FILTER */
 static inline void put_seccomp_filter(struct task_struct *tsk)
 {
index 15ede6a823a6e2daa366551cd7bad13ac3dd5c03..18ef0224fb6a353a6cc584c3a4f38d5f1ccc4301 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/hrtimer.h>
 #include <linux/dma-mapping.h>
 #include <linux/netdev_features.h>
+#include <linux/sched.h>
 #include <net/flow_keys.h>
 
 /* A. Checksumming of received packets by device.
@@ -356,11 +357,62 @@ typedef unsigned int sk_buff_data_t;
 typedef unsigned char *sk_buff_data_t;
 #endif
 
+/**
+ * struct skb_mstamp - multi resolution time stamps
+ * @stamp_us: timestamp in us resolution
+ * @stamp_jiffies: timestamp in jiffies
+ */
+struct skb_mstamp {
+       union {
+               u64             v64;
+               struct {
+                       u32     stamp_us;
+                       u32     stamp_jiffies;
+               };
+       };
+};
+
+/**
+ * skb_mstamp_get - get current timestamp
+ * @cl: place to store timestamps
+ */
+static inline void skb_mstamp_get(struct skb_mstamp *cl)
+{
+       u64 val = local_clock();
+
+       do_div(val, NSEC_PER_USEC);
+       cl->stamp_us = (u32)val;
+       cl->stamp_jiffies = (u32)jiffies;
+}
+
+/**
+ * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
+ * @t1: pointer to newest sample
+ * @t0: pointer to oldest sample
+ */
+static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
+                                     const struct skb_mstamp *t0)
+{
+       s32 delta_us = t1->stamp_us - t0->stamp_us;
+       u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
+
+       /* If delta_us is negative, this might be because interval is too big,
+        * or local_clock() drift is too big : fallback using jiffies.
+        */
+       if (delta_us <= 0 ||
+           delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
+
+               delta_us = jiffies_to_usecs(delta_jiffies);
+
+       return delta_us;
+}
+
+
 /** 
  *     struct sk_buff - socket buffer
  *     @next: Next buffer in list
  *     @prev: Previous buffer in list
- *     @tstamp: Time we arrived
+ *     @tstamp: Time we arrived/left
  *     @sk: Socket we are owned by
  *     @dev: Device we arrived on/are leaving by
  *     @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -392,11 +444,11 @@ typedef unsigned char *sk_buff_data_t;
  *     @skb_iif: ifindex of device we arrived on
  *     @tc_index: Traffic control index
  *     @tc_verd: traffic control verdict
- *     @rxhash: the packet hash computed on receive
+ *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
  *     @ndisc_nodetype: router type (from link layer)
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
- *     @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
+ *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
  *             ports.
  *     @wifi_acked_valid: wifi_acked was set
  *     @wifi_acked: whether frame was acked on wifi or not
@@ -429,7 +481,10 @@ struct sk_buff {
        struct sk_buff          *next;
        struct sk_buff          *prev;
 
-       ktime_t                 tstamp;
+       union {
+               ktime_t         tstamp;
+               struct skb_mstamp skb_mstamp;
+       };
 
        struct sock             *sk;
        struct net_device       *dev;
@@ -482,7 +537,7 @@ struct sk_buff {
 
        int                     skb_iif;
 
-       __u32                   rxhash;
+       __u32                   hash;
 
        __be16                  vlan_proto;
        __u16                   vlan_tci;
@@ -501,7 +556,7 @@ struct sk_buff {
 #endif
        __u8                    pfmemalloc:1;
        __u8                    ooo_okay:1;
-       __u8                    l4_rxhash:1;
+       __u8                    l4_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
        __u8                    no_fcs:1;
@@ -691,6 +746,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
                                     unsigned int headroom);
 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
                                int newtailroom, gfp_t priority);
+int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+                       int offset, int len);
 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
                 int len);
 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
@@ -758,40 +815,40 @@ enum pkt_hash_types {
 static inline void
 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
 {
-       skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
-       skb->rxhash = hash;
+       skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+       skb->hash = hash;
 }
 
 void __skb_get_hash(struct sk_buff *skb);
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
-       if (!skb->l4_rxhash)
+       if (!skb->l4_hash)
                __skb_get_hash(skb);
 
-       return skb->rxhash;
+       return skb->hash;
 }
 
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 {
-       return skb->rxhash;
+       return skb->hash;
 }
 
 static inline void skb_clear_hash(struct sk_buff *skb)
 {
-       skb->rxhash = 0;
-       skb->l4_rxhash = 0;
+       skb->hash = 0;
+       skb->l4_hash = 0;
 }
 
 static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
 {
-       if (!skb->l4_rxhash)
+       if (!skb->l4_hash)
                skb_clear_hash(skb);
 }
 
 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 {
-       to->rxhash = from->rxhash;
-       to->l4_rxhash = from->l4_rxhash;
+       to->hash = from->hash;
+       to->l4_hash = from->l4_hash;
 };
 
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
new file mode 100644 (file)
index 0000000..a62442c
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * 10G controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_PLATFORM_H__
+#define __SXGBE_PLATFORM_H__
+
+/* MDC Clock Selection define*/
+#define SXGBE_CSR_100_150M     0x0     /* MDC = clk_scr_i/62 */
+#define SXGBE_CSR_150_250M     0x1     /* MDC = clk_scr_i/102 */
+#define SXGBE_CSR_250_300M     0x2     /* MDC = clk_scr_i/122 */
+#define SXGBE_CSR_300_350M     0x3     /* MDC = clk_scr_i/142 */
+#define SXGBE_CSR_350_400M     0x4     /* MDC = clk_scr_i/162 */
+#define SXGBE_CSR_400_500M     0x5     /* MDC = clk_scr_i/202 */
+
+/* Platfrom data for platform device structure's
+ * platform_data field
+ */
+struct sxgbe_mdio_bus_data {
+       unsigned int phy_mask;
+       int *irqs;
+       int probed_phy_irq;
+};
+
+struct sxgbe_dma_cfg {
+       int pbl;
+       int fixed_burst;
+       int burst_map;
+       int adv_addr_mode;
+};
+
+struct sxgbe_plat_data {
+       char *phy_bus_name;
+       int bus_id;
+       int phy_addr;
+       int interface;
+       struct sxgbe_mdio_bus_data *mdio_bus_data;
+       struct sxgbe_dma_cfg *dma_cfg;
+       int clk_csr;
+       int pmt;
+       int force_sf_dma_mode;
+       int force_thresh_dma_mode;
+       int riwt_off;
+};
+
+#endif /* __SXGBE_PLATFORM_H__ */
index 4ad0706d40ebecf98e1ca95483a79f9dd44e903c..239946868142cec2893e89259555d3b86884d616 100644 (file)
@@ -201,10 +201,10 @@ struct tcp_sock {
        u32     tlp_high_seq;   /* snd_nxt at the time of TLP retransmit. */
 
 /* RTT measurement */
-       u32     srtt;           /* smoothed round trip time << 3        */
-       u32     mdev;           /* medium deviation                     */
-       u32     mdev_max;       /* maximal mdev for the last rtt period */
-       u32     rttvar;         /* smoothed mdev_max                    */
+       u32     srtt_us;        /* smoothed round trip time << 3 in usecs */
+       u32     mdev_us;        /* medium deviation                     */
+       u32     mdev_max_us;    /* maximal mdev for the last rtt period */
+       u32     rttvar_us;      /* smoothed mdev_max                    */
        u32     rtt_seq;        /* sequence number to update rttvar     */
 
        u32     packets_out;    /* Packets which are "in flight"        */
index 90b4fdc8a61f14fcd8753bc5d7d55d89924e9302..4781d7b27dd39d9930164592d008aa1ac22772c3 100644 (file)
@@ -518,9 +518,9 @@ extern void tty_port_put(struct tty_port *port);
 
 static inline struct tty_port *tty_port_get(struct tty_port *port)
 {
-       if (port)
-               kref_get(&port->kref);
-       return port;
+       if (port && kref_get_unless_zero(&port->kref))
+               return port;
+       return NULL;
 }
 
 /* If the cts flow control is enabled, return true. */
index 7bfabd20204c150afa40a7249932519c0e770267..4b4439e75f45f8e915f0ffb6b855be5f1113a04f 100644 (file)
@@ -27,8 +27,8 @@
  *    (On UP, there is no seqcount_t protection, a reader allowing interrupts could
  *     read partial values)
  *
- * 7) For softirq uses, readers can use u64_stats_fetch_begin_bh() and
- *    u64_stats_fetch_retry_bh() helpers
+ * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
+ *    u64_stats_fetch_retry_irq() helpers
  *
  * Usage :
  *
@@ -114,31 +114,31 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 }
 
 /*
- * In case softirq handlers can update u64 counters, readers can use following helpers
+ * In case irq handlers can update u64 counters, readers can use following helpers
  * - SMP 32bit arches use seqcount protection, irq safe.
- * - UP 32bit must disable BH.
+ * - UP 32bit must disable irqs.
  * - 64bit have no problem atomically reading u64 values, irq safe.
  */
-static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
+static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
        return read_seqcount_begin(&syncp->seq);
 #else
 #if BITS_PER_LONG==32
-       local_bh_disable();
+       local_irq_disable();
 #endif
        return 0;
 #endif
 }
 
-static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
+static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
                                         unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
        return read_seqcount_retry(&syncp->seq, start);
 #else
 #if BITS_PER_LONG==32
-       local_bh_enable();
+       local_irq_enable();
 #endif
        return false;
 #endif
index 2c14d9cdd57aac88eb3121a4271b7c5fe3909a56..44b38b92236a5834f022185e99707ea9ac6edc39 100644 (file)
@@ -94,6 +94,7 @@ struct cdc_ncm_ctx {
 
        const struct usb_cdc_ncm_desc *func_desc;
        const struct usb_cdc_mbim_desc *mbim_desc;
+       const struct usb_cdc_mbim_extended_desc *mbim_extended_desc;
        const struct usb_cdc_ether_desc *ether_desc;
 
        struct usb_interface *control;
index a54fe82e704bc09c5acd6a5f1664a6dfbc982b64..a9c723be1acfca6cd6d6bbfa37f453404aadb83d 100644 (file)
@@ -48,11 +48,15 @@ enum {
        WL12XX_TCXOCLOCK_33_6   = 7, /* 33.6 MHz */
 };
 
-struct wl12xx_platform_data {
-       void (*set_power)(bool enable);
+struct wl1251_platform_data {
+       int power_gpio;
        /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
        int irq;
        bool use_eeprom;
+};
+
+struct wl12xx_platform_data {
+       int irq;
        int board_ref_clock;
        int board_tcxo_clock;
        unsigned long platform_quirks;
@@ -68,6 +72,10 @@ int wl12xx_set_platform_data(const struct wl12xx_platform_data *data);
 
 struct wl12xx_platform_data *wl12xx_get_platform_data(void);
 
+int wl1251_set_platform_data(const struct wl1251_platform_data *data);
+
+struct wl1251_platform_data *wl1251_get_platform_data(void);
+
 #else
 
 static inline
@@ -82,6 +90,18 @@ struct wl12xx_platform_data *wl12xx_get_platform_data(void)
        return ERR_PTR(-ENODATA);
 }
 
+static inline
+int wl1251_set_platform_data(const struct wl1251_platform_data *data)
+{
+       return -ENOSYS;
+}
+
+static inline
+struct wl1251_platform_data *wl1251_get_platform_data(void)
+{
+       return ERR_PTR(-ENODATA);
+}
+
 #endif
 
 #endif
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
new file mode 100644 (file)
index 0000000..f7d372b
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __6LOWPAN_H__
+#define __6LOWPAN_H__
+
+#include <net/ipv6.h>
+
+#define UIP_802154_SHORTADDR_LEN       2  /* compressed ipv6 address length */
+#define UIP_IPH_LEN                    40 /* ipv6 fixed header size */
+#define UIP_PROTO_UDP                  17 /* ipv6 next header value for UDP */
+#define UIP_FRAGH_LEN                  8  /* ipv6 fragment header size */
+
+/*
+ * ipv6 address based on mac
+ * second bit-flip (Universe/Local) is done according RFC2464
+ */
+#define is_addr_mac_addr_based(a, m) \
+       ((((a)->s6_addr[8])  == (((m)[0]) ^ 0x02)) &&   \
+        (((a)->s6_addr[9])  == (m)[1]) &&              \
+        (((a)->s6_addr[10]) == (m)[2]) &&              \
+        (((a)->s6_addr[11]) == (m)[3]) &&              \
+        (((a)->s6_addr[12]) == (m)[4]) &&              \
+        (((a)->s6_addr[13]) == (m)[5]) &&              \
+        (((a)->s6_addr[14]) == (m)[6]) &&              \
+        (((a)->s6_addr[15]) == (m)[7]))
+
+/* ipv6 address is unspecified */
+#define is_addr_unspecified(a)         \
+       ((((a)->s6_addr32[0]) == 0) &&  \
+        (((a)->s6_addr32[1]) == 0) &&  \
+        (((a)->s6_addr32[2]) == 0) &&  \
+        (((a)->s6_addr32[3]) == 0))
+
+/* compare ipv6 addresses prefixes */
+#define ipaddr_prefixcmp(addr1, addr2, length) \
+       (memcmp(addr1, addr2, length >> 3) == 0)
+
+/* local link, i.e. FE80::/10 */
+#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
+
+/*
+ * check whether we can compress the IID to 16 bits,
+ * it's possible for unicast adresses with first 49 bits are zero only.
+ */
+#define lowpan_is_iid_16_bit_compressable(a)   \
+       ((((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr[10]) == 0) &&           \
+        (((a)->s6_addr[11]) == 0xff) &&        \
+        (((a)->s6_addr[12]) == 0xfe) &&        \
+        (((a)->s6_addr[13]) == 0))
+
+/* multicast address */
+#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
+
+/* check whether the 112-bit gid of the multicast address is mappable to: */
+
+/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
+#define lowpan_is_mcast_addr_compressable(a)   \
+       ((((a)->s6_addr16[1]) == 0) &&          \
+        (((a)->s6_addr16[2]) == 0) &&          \
+        (((a)->s6_addr16[3]) == 0) &&          \
+        (((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr16[5]) == 0) &&          \
+        (((a)->s6_addr16[6]) == 0) &&          \
+        (((a)->s6_addr[14])  == 0) &&          \
+        ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
+
+/* 48 bits, FFXX::00XX:XXXX:XXXX */
+#define lowpan_is_mcast_addr_compressable48(a) \
+       ((((a)->s6_addr16[1]) == 0) &&          \
+        (((a)->s6_addr16[2]) == 0) &&          \
+        (((a)->s6_addr16[3]) == 0) &&          \
+        (((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr[10]) == 0))
+
+/* 32 bits, FFXX::00XX:XXXX */
+#define lowpan_is_mcast_addr_compressable32(a) \
+       ((((a)->s6_addr16[1]) == 0) &&          \
+        (((a)->s6_addr16[2]) == 0) &&          \
+        (((a)->s6_addr16[3]) == 0) &&          \
+        (((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr16[5]) == 0) &&          \
+        (((a)->s6_addr[12]) == 0))
+
+/* 8 bits, FF02::00XX */
+#define lowpan_is_mcast_addr_compressable8(a)  \
+       ((((a)->s6_addr[1])  == 2) &&           \
+        (((a)->s6_addr16[1]) == 0) &&          \
+        (((a)->s6_addr16[2]) == 0) &&          \
+        (((a)->s6_addr16[3]) == 0) &&          \
+        (((a)->s6_addr16[4]) == 0) &&          \
+        (((a)->s6_addr16[5]) == 0) &&          \
+        (((a)->s6_addr16[6]) == 0) &&          \
+        (((a)->s6_addr[14]) == 0))
+
+#define lowpan_is_addr_broadcast(a)    \
+       ((((a)[0]) == 0xFF) &&  \
+        (((a)[1]) == 0xFF) &&  \
+        (((a)[2]) == 0xFF) &&  \
+        (((a)[3]) == 0xFF) &&  \
+        (((a)[4]) == 0xFF) &&  \
+        (((a)[5]) == 0xFF) &&  \
+        (((a)[6]) == 0xFF) &&  \
+        (((a)[7]) == 0xFF))
+
+#define LOWPAN_DISPATCH_IPV6   0x41 /* 01000001 = 65 */
+#define LOWPAN_DISPATCH_HC1    0x42 /* 01000010 = 66 */
+#define LOWPAN_DISPATCH_IPHC   0x60 /* 011xxxxx = ... */
+#define LOWPAN_DISPATCH_FRAG1  0xc0 /* 11000xxx */
+#define LOWPAN_DISPATCH_FRAGN  0xe0 /* 11100xxx */
+
+#define LOWPAN_DISPATCH_MASK   0xf8 /* 11111000 */
+
+#define LOWPAN_FRAG_TIMEOUT    (HZ * 60)       /* time-out 60 sec */
+
+#define LOWPAN_FRAG1_HEAD_SIZE 0x4
+#define LOWPAN_FRAGN_HEAD_SIZE 0x5
+
+/*
+ * According IEEE802.15.4 standard:
+ *   - MTU is 127 octets
+ *   - maximum MHR size is 37 octets
+ *   - MFR size is 2 octets
+ *
+ * so minimal payload size that we may guarantee is:
+ *   MTU - MHR - MFR = 88 octets
+ */
+#define LOWPAN_FRAG_SIZE       88
+
+/*
+ * Values of fields within the IPHC encoding first byte
+ * (C stands for compressed and I for inline)
+ */
+#define LOWPAN_IPHC_TF         0x18
+
+#define LOWPAN_IPHC_FL_C       0x10
+#define LOWPAN_IPHC_TC_C       0x08
+#define LOWPAN_IPHC_NH_C       0x04
+#define LOWPAN_IPHC_TTL_1      0x01
+#define LOWPAN_IPHC_TTL_64     0x02
+#define LOWPAN_IPHC_TTL_255    0x03
+#define LOWPAN_IPHC_TTL_I      0x00
+
+
+/* Values of fields within the IPHC encoding second byte */
+#define LOWPAN_IPHC_CID                0x80
+
+#define LOWPAN_IPHC_ADDR_00    0x00
+#define LOWPAN_IPHC_ADDR_01    0x01
+#define LOWPAN_IPHC_ADDR_02    0x02
+#define LOWPAN_IPHC_ADDR_03    0x03
+
+#define LOWPAN_IPHC_SAC                0x40
+#define LOWPAN_IPHC_SAM                0x30
+
+#define LOWPAN_IPHC_SAM_BIT    4
+
+#define LOWPAN_IPHC_M          0x08
+#define LOWPAN_IPHC_DAC                0x04
+#define LOWPAN_IPHC_DAM_00     0x00
+#define LOWPAN_IPHC_DAM_01     0x01
+#define LOWPAN_IPHC_DAM_10     0x02
+#define LOWPAN_IPHC_DAM_11     0x03
+
+#define LOWPAN_IPHC_DAM_BIT    0
+/*
+ * LOWPAN_UDP encoding (works together with IPHC)
+ */
+#define LOWPAN_NHC_UDP_MASK            0xF8
+#define LOWPAN_NHC_UDP_ID              0xF0
+#define LOWPAN_NHC_UDP_CHECKSUMC       0x04
+#define LOWPAN_NHC_UDP_CHECKSUMI       0x00
+
+#define LOWPAN_NHC_UDP_4BIT_PORT       0xF0B0
+#define LOWPAN_NHC_UDP_4BIT_MASK       0xFFF0
+#define LOWPAN_NHC_UDP_8BIT_PORT       0xF000
+#define LOWPAN_NHC_UDP_8BIT_MASK       0xFF00
+
+/* values for port compression, _with checksum_ ie bit 5 set to 0 */
+#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
+#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
+                                       dest = 0xF0 + 8 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
+                                       dest = 16 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+#define LOWPAN_NHC_UDP_CS_C    0x04 /* checksum elided */
+
+#ifdef DEBUG
+/* print data in line */
+static inline void raw_dump_inline(const char *caller, char *msg,
+                                  unsigned char *buf, int len)
+{
+       if (msg)
+               pr_debug("%s():%s: ", caller, msg);
+
+       print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, buf, len, false);
+}
+
+/* print data in a table format:
+ *
+ * addr: xx xx xx xx xx xx
+ * addr: xx xx xx xx xx xx
+ * ...
+ */
+static inline void raw_dump_table(const char *caller, char *msg,
+                                 unsigned char *buf, int len)
+{
+       if (msg)
+               pr_debug("%s():%s:\n", caller, msg);
+
+       print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
+}
+#else
+static inline void raw_dump_table(const char *caller, char *msg,
+                                 unsigned char *buf, int len) { }
+static inline void raw_dump_inline(const char *caller, char *msg,
+                                  unsigned char *buf, int len) { }
+#endif
+
+static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
+{
+       if (unlikely(!pskb_may_pull(skb, 1)))
+               return -EINVAL;
+
+       *val = skb->data[0];
+       skb_pull(skb, 1);
+
+       return 0;
+}
+
+static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
+{
+       if (unlikely(!pskb_may_pull(skb, 2)))
+               return -EINVAL;
+
+       *val = (skb->data[0] << 8) | skb->data[1];
+       skb_pull(skb, 2);
+
+       return 0;
+}
+
+static inline bool lowpan_fetch_skb(struct sk_buff *skb,
+               void *data, const unsigned int len)
+{
+       if (unlikely(!pskb_may_pull(skb, len)))
+               return true;
+
+       skb_copy_from_linear_data(skb, data, len);
+       skb_pull(skb, len);
+
+       return false;
+}
+
+static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
+                                      const size_t len)
+{
+       memcpy(*hc_ptr, data, len);
+       *hc_ptr += len;
+}
+
+static inline u8 lowpan_addr_mode_size(const u8 addr_mode)
+{
+       static const u8 addr_sizes[] = {
+               [LOWPAN_IPHC_ADDR_00] = 16,
+               [LOWPAN_IPHC_ADDR_01] = 8,
+               [LOWPAN_IPHC_ADDR_02] = 2,
+               [LOWPAN_IPHC_ADDR_03] = 0,
+       };
+       return addr_sizes[addr_mode];
+}
+
+static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header)
+{
+       u8 ret = 1;
+
+       if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+               *uncomp_header += sizeof(struct udphdr);
+
+               switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) {
+               case LOWPAN_NHC_UDP_CS_P_00:
+                       ret += 4;
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_01:
+               case LOWPAN_NHC_UDP_CS_P_10:
+                       ret += 3;
+                       break;
+               case LOWPAN_NHC_UDP_CS_P_11:
+                       ret++;
+                       break;
+               default:
+                       break;
+               }
+
+               if (!(h_enc & LOWPAN_NHC_UDP_CS_C))
+                       ret += 2;
+       }
+
+       return ret;
+}
+
+/**
+ *     lowpan_uncompress_size - returns skb->len size with uncompressed header
+ *     @skb: sk_buff with 6lowpan header inside
+ *     @datagram_offset: optional to get the datagram_offset value
+ *
+ *     Returns the skb->len with uncompressed header
+ */
+static inline u16
+lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
+{
+       u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr);
+       u8 iphc0, iphc1, h_enc;
+
+       iphc0 = skb_network_header(skb)[0];
+       iphc1 = skb_network_header(skb)[1];
+
+       switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
+       case 0:
+               ret += 4;
+               break;
+       case 1:
+               ret += 3;
+               break;
+       case 2:
+               ret++;
+               break;
+       default:
+               break;
+       }
+
+       if (!(iphc0 & LOWPAN_IPHC_NH_C))
+               ret++;
+
+       if (!(iphc0 & 0x03))
+               ret++;
+
+       ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >>
+                                    LOWPAN_IPHC_SAM_BIT);
+
+       if (iphc1 & LOWPAN_IPHC_M) {
+               switch ((iphc1 & LOWPAN_IPHC_DAM_11) >>
+                       LOWPAN_IPHC_DAM_BIT) {
+               case LOWPAN_IPHC_DAM_00:
+                       ret += 16;
+                       break;
+               case LOWPAN_IPHC_DAM_01:
+                       ret += 6;
+                       break;
+               case LOWPAN_IPHC_DAM_10:
+                       ret += 4;
+                       break;
+               case LOWPAN_IPHC_DAM_11:
+                       ret++;
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >>
+                                            LOWPAN_IPHC_DAM_BIT);
+       }
+
+       if (iphc0 & LOWPAN_IPHC_NH_C) {
+               h_enc = skb_network_header(skb)[ret];
+               ret += lowpan_next_hdr_size(h_enc, &uncomp_header);
+       }
+
+       if (dgram_offset)
+               *dgram_offset = uncomp_header;
+
+       return skb->len + uncomp_header - ret;
+}
+
+typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
+
+int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
+               const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
+               const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
+               u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver);
+int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
+                       unsigned short type, const void *_daddr,
+                       const void *_saddr, unsigned int len);
+
+#endif /* __6LOWPAN_H__ */
index 788d8378e587f00bbf2a61bd39665370b17f7a41..3ee4c92afd1bd2baf2b90201a9b4af896d020b5f 100644 (file)
@@ -89,7 +89,7 @@ struct tc_action_ops {
        struct module           *owner;
        int     (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
        int     (*dump)(struct sk_buff *, struct tc_action *, int, int);
-       int     (*cleanup)(struct tc_action *, int bind);
+       void    (*cleanup)(struct tc_action *, int bind);
        int     (*lookup)(struct tc_action *, u32);
        int     (*init)(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action *act, int ovr,
@@ -98,20 +98,18 @@ struct tc_action_ops {
 };
 
 int tcf_hash_search(struct tc_action *a, u32 index);
-void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-int tcf_hash_release(struct tcf_common *p, int bind,
-                    struct tcf_hashinfo *hinfo);
+void tcf_hash_destroy(struct tc_action *a);
+int tcf_hash_release(struct tc_action *a, int bind);
 u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
-struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
-                                 int bind);
-struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
-                                  struct tc_action *a, int size,
-                                  int bind);
-void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+int tcf_hash_check(u32 index, struct tc_action *a, int bind);
+int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+                   int size, int bind);
+void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
+void tcf_hash_insert(struct tc_action *a);
 
-int tcf_register_action(struct tc_action_ops *a);
+int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
 int tcf_unregister_action(struct tc_action_ops *a);
-void tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_destroy(struct list_head *actions, int bind);
 int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
                    struct tcf_result *res);
 int tcf_action_init(struct net *net, struct nlattr *nla,
index 50e39a8822b49ab5ce6b16b5e17c5e875e5d1abe..933a9f22a05ff63abb4379f94cb907c95f6beac4 100644 (file)
@@ -314,7 +314,7 @@ static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
 static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
-       __u64 *p = (__u64 *)addr;
+       __be64 *p = (__be64 *)addr;
        return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL;
 #else
        return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -326,7 +326,7 @@ static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
 static inline bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
-       __u64 *p = (__u64 *)addr;
+       __be64 *p = (__be64 *)addr;
        return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL;
 #else
        return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -343,7 +343,7 @@ static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
 static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
-       __u64 *p = (__u64 *)addr;
+       __be64 *p = (__be64 *)addr;
        return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
                ((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) &
                 cpu_to_be64(0xffffffffff000000UL))) == 0UL;
index 75e64c7a2960a17a0f3c7f6ce62553c6c874f0c9..f79ae2aa76d6a45fb9a1e452d53f82d05e101717 100644 (file)
@@ -36,7 +36,7 @@ enum {
 /* address length, octets */
 #define IEEE802154_ADDR_LEN    8
 
-struct ieee802154_addr {
+struct ieee802154_addr_sa {
        int addr_type;
        u16 pan_id;
        union {
@@ -51,7 +51,7 @@ struct ieee802154_addr {
 
 struct sockaddr_ieee802154 {
        sa_family_t family; /* AF_IEEE802154 */
-       struct ieee802154_addr addr;
+       struct ieee802154_addr_sa addr;
 };
 
 /* get/setsockopt */
index f4f9ee466791a9b9fc0b0d7f74f22e2cf3660ac2..904777c1cd2420486a3df25636f61811ab35c29e 100644 (file)
@@ -65,6 +65,7 @@ struct bt_security {
 #define BT_SECURITY_LOW                1
 #define BT_SECURITY_MEDIUM     2
 #define BT_SECURITY_HIGH       3
+#define BT_SECURITY_FIPS       4
 
 #define BT_DEFER_SETUP 7
 
index 66c1cd87bfe7f9b9d117db340f736884f9d0c4c1..be150cf8cd432298d47860268a1d4566af74481d 100644 (file)
@@ -117,11 +117,18 @@ enum {
        HCI_SERVICE_CACHE,
        HCI_DEBUG_KEYS,
        HCI_DUT_MODE,
+       HCI_FORCE_SC,
+       HCI_FORCE_STATIC_ADDR,
        HCI_UNREGISTER,
        HCI_USER_CHANNEL,
 
        HCI_LE_SCAN,
        HCI_SSP_ENABLED,
+       HCI_SC_ENABLED,
+       HCI_SC_ONLY,
+       HCI_PRIVACY,
+       HCI_RPA_EXPIRED,
+       HCI_RPA_RESOLVING,
        HCI_HS_ENABLED,
        HCI_LE_ENABLED,
        HCI_ADVERTISING,
@@ -133,6 +140,7 @@ enum {
        HCI_FAST_CONNECTABLE,
        HCI_BREDR_ENABLED,
        HCI_6LOWPAN_ENABLED,
+       HCI_LE_SCAN_INTERRUPTED,
 };
 
 /* A mask for the flags that are supposed to remain when a reset happens
@@ -175,6 +183,8 @@ enum {
 #define HCI_CMD_TIMEOUT                msecs_to_jiffies(2000)  /* 2 seconds */
 #define HCI_ACL_TX_TIMEOUT     msecs_to_jiffies(45000) /* 45 seconds */
 #define HCI_AUTO_OFF_TIMEOUT   msecs_to_jiffies(2000)  /* 2 seconds */
+#define HCI_POWER_OFF_TIMEOUT  msecs_to_jiffies(5000)  /* 5 seconds */
+#define HCI_LE_CONN_TIMEOUT    msecs_to_jiffies(20000) /* 20 seconds */
 
 /* HCI data types */
 #define HCI_COMMAND_PKT                0x01
@@ -282,10 +292,14 @@ enum {
 #define LMP_SYNC_TRAIN 0x04
 #define LMP_SYNC_SCAN  0x08
 
+#define LMP_SC         0x01
+#define LMP_PING       0x02
+
 /* Host features */
 #define LMP_HOST_SSP           0x01
 #define LMP_HOST_LE            0x02
 #define LMP_HOST_LE_BREDR      0x04
+#define LMP_HOST_SC            0x08
 
 /* Connection modes */
 #define HCI_CM_ACTIVE  0x0000
@@ -307,6 +321,7 @@ enum {
 #define HCI_LM_TRUSTED 0x0008
 #define HCI_LM_RELIABLE        0x0010
 #define HCI_LM_SECURE  0x0020
+#define HCI_LM_FIPS    0x0040
 
 /* Authentication types */
 #define HCI_AT_NO_BONDING              0x00
@@ -327,17 +342,24 @@ enum {
 #define HCI_LK_LOCAL_UNIT              0x01
 #define HCI_LK_REMOTE_UNIT             0x02
 #define HCI_LK_DEBUG_COMBINATION       0x03
-#define HCI_LK_UNAUTH_COMBINATION      0x04
-#define HCI_LK_AUTH_COMBINATION                0x05
+#define HCI_LK_UNAUTH_COMBINATION_P192 0x04
+#define HCI_LK_AUTH_COMBINATION_P192   0x05
 #define HCI_LK_CHANGED_COMBINATION     0x06
+#define HCI_LK_UNAUTH_COMBINATION_P256 0x07
+#define HCI_LK_AUTH_COMBINATION_P256   0x08
 /* The spec doesn't define types for SMP keys, the _MASTER suffix is implied */
 #define HCI_SMP_STK                    0x80
 #define HCI_SMP_STK_SLAVE              0x81
 #define HCI_SMP_LTK                    0x82
 #define HCI_SMP_LTK_SLAVE              0x83
 
+/* Long Term Key types */
+#define HCI_LTK_UNAUTH                 0x00
+#define HCI_LTK_AUTH                   0x01
+
 /* ---- HCI Error Codes ---- */
 #define HCI_ERROR_AUTH_FAILURE         0x05
+#define HCI_ERROR_MEMORY_EXCEEDED      0x07
 #define HCI_ERROR_CONNECTION_TIMEOUT   0x08
 #define HCI_ERROR_REJ_BAD_ADDR         0x0f
 #define HCI_ERROR_REMOTE_USER_TERM     0x13
@@ -660,6 +682,15 @@ struct hci_rp_set_csb {
 
 #define HCI_OP_START_SYNC_TRAIN                0x0443
 
+#define HCI_OP_REMOTE_OOB_EXT_DATA_REPLY       0x0445
+struct hci_cp_remote_oob_ext_data_reply {
+       bdaddr_t bdaddr;
+       __u8     hash192[16];
+       __u8     randomizer192[16];
+       __u8     hash256[16];
+       __u8     randomizer256[16];
+} __packed;
+
 #define HCI_OP_SNIFF_MODE              0x0803
 struct hci_cp_sniff_mode {
        __le16   handle;
@@ -933,6 +964,26 @@ struct hci_rp_write_sync_train_params {
        __le16  sync_train_int;
 } __packed;
 
+#define HCI_OP_READ_SC_SUPPORT         0x0c79
+struct hci_rp_read_sc_support {
+       __u8    status;
+       __u8    support;
+} __packed;
+
+#define HCI_OP_WRITE_SC_SUPPORT                0x0c7a
+struct hci_cp_write_sc_support {
+       __u8    support;
+} __packed;
+
+#define HCI_OP_READ_LOCAL_OOB_EXT_DATA 0x0c7d
+struct hci_rp_read_local_oob_ext_data {
+       __u8     status;
+       __u8     hash192[16];
+       __u8     randomizer192[16];
+       __u8     hash256[16];
+       __u8     randomizer256[16];
+} __packed;
+
 #define HCI_OP_READ_LOCAL_VERSION      0x1001
 struct hci_rp_read_local_version {
        __u8     status;
@@ -1133,6 +1184,9 @@ struct hci_cp_le_set_scan_enable {
        __u8     filter_dup;
 } __packed;
 
+#define HCI_LE_USE_PEER_ADDR           0x00
+#define HCI_LE_USE_WHITELIST           0x01
+
 #define HCI_OP_LE_CREATE_CONN          0x200d
 struct hci_cp_le_create_conn {
        __le16   scan_interval;
@@ -1157,6 +1211,20 @@ struct hci_rp_le_read_white_list_size {
        __u8    size;
 } __packed;
 
+#define HCI_OP_LE_CLEAR_WHITE_LIST     0x2010
+
+#define HCI_OP_LE_ADD_TO_WHITE_LIST    0x2011
+struct hci_cp_le_add_to_white_list {
+       __u8     bdaddr_type;
+       bdaddr_t bdaddr;
+} __packed;
+
+#define HCI_OP_LE_DEL_FROM_WHITE_LIST  0x2012
+struct hci_cp_le_del_from_white_list {
+       __u8     bdaddr_type;
+       bdaddr_t bdaddr;
+} __packed;
+
 #define HCI_OP_LE_CONN_UPDATE          0x2013
 struct hci_cp_le_conn_update {
        __le16   handle;
@@ -1171,7 +1239,7 @@ struct hci_cp_le_conn_update {
 #define HCI_OP_LE_START_ENC            0x2019
 struct hci_cp_le_start_enc {
        __le16  handle;
-       __u8    rand[8];
+       __le64  rand;
        __le16  ediv;
        __u8    ltk[16];
 } __packed;
@@ -1583,7 +1651,7 @@ struct hci_ev_le_conn_complete {
 #define HCI_EV_LE_LTK_REQ              0x05
 struct hci_ev_le_ltk_req {
        __le16  handle;
-       __u8    random[8];
+       __le64  rand;
        __le16  ediv;
 } __packed;
 
index f2f0cf5865c40a9b92f7a98eb4c53c6834a34d29..5f8bc05694ac665159bb25f7a9eaa704c3784e91 100644 (file)
@@ -91,6 +91,13 @@ struct bt_uuid {
        u8 svc_hint;
 };
 
+struct smp_csrk {
+       bdaddr_t bdaddr;
+       u8 bdaddr_type;
+       u8 master;
+       u8 val[16];
+};
+
 struct smp_ltk {
        struct list_head list;
        bdaddr_t bdaddr;
@@ -99,9 +106,17 @@ struct smp_ltk {
        u8 type;
        u8 enc_size;
        __le16 ediv;
-       u8 rand[8];
+       __le64 rand;
        u8 val[16];
-} __packed;
+};
+
+struct smp_irk {
+       struct list_head list;
+       bdaddr_t rpa;
+       bdaddr_t bdaddr;
+       u8 addr_type;
+       u8 val[16];
+};
 
 struct link_key {
        struct list_head list;
@@ -114,12 +129,17 @@ struct link_key {
 struct oob_data {
        struct list_head list;
        bdaddr_t bdaddr;
-       u8 hash[16];
-       u8 randomizer[16];
+       u8 hash192[16];
+       u8 randomizer192[16];
+       u8 hash256[16];
+       u8 randomizer256[16];
 };
 
 #define HCI_MAX_SHORT_NAME_LENGTH      10
 
+/* Default LE RPA expiry time, 15 minutes */
+#define HCI_DEFAULT_RPA_TIMEOUT                (15 * 60)
+
 struct amp_assoc {
        __u16   len;
        __u16   offset;
@@ -141,8 +161,9 @@ struct hci_dev {
        __u8            bus;
        __u8            dev_type;
        bdaddr_t        bdaddr;
+       bdaddr_t        random_addr;
        bdaddr_t        static_addr;
-       __u8            own_addr_type;
+       __u8            adv_addr_type;
        __u8            dev_name[HCI_MAX_NAME_LENGTH];
        __u8            short_name[HCI_MAX_SHORT_NAME_LENGTH];
        __u8            eir[HCI_MAX_EIR_LENGTH];
@@ -167,6 +188,8 @@ struct hci_dev {
        __u16           page_scan_interval;
        __u16           page_scan_window;
        __u8            page_scan_type;
+       __u8            le_adv_channel_map;
+       __u8            le_scan_type;
        __u16           le_scan_interval;
        __u16           le_scan_window;
        __u16           le_conn_min_interval;
@@ -257,19 +280,21 @@ struct hci_dev {
        __u32                   req_status;
        __u32                   req_result;
 
-       struct list_head        mgmt_pending;
+       struct crypto_blkcipher *tfm_aes;
 
        struct discovery_state  discovery;
        struct hci_conn_hash    conn_hash;
-       struct list_head        blacklist;
 
+       struct list_head        mgmt_pending;
+       struct list_head        blacklist;
        struct list_head        uuids;
-
        struct list_head        link_keys;
-
        struct list_head        long_term_keys;
-
+       struct list_head        identity_resolving_keys;
        struct list_head        remote_oob_data;
+       struct list_head        le_white_list;
+       struct list_head        le_conn_params;
+       struct list_head        pend_le_conns;
 
        struct hci_dev_stats    stat;
 
@@ -291,6 +316,11 @@ struct hci_dev {
        __u8                    scan_rsp_data[HCI_MAX_AD_LENGTH];
        __u8                    scan_rsp_data_len;
 
+       __u8                    irk[16];
+       __u32                   rpa_timeout;
+       struct delayed_work     rpa_expired;
+       bdaddr_t                rpa;
+
        int (*open)(struct hci_dev *hdev);
        int (*close)(struct hci_dev *hdev);
        int (*flush)(struct hci_dev *hdev);
@@ -310,6 +340,10 @@ struct hci_conn {
        __u8            dst_type;
        bdaddr_t        src;
        __u8            src_type;
+       bdaddr_t        init_addr;
+       __u8            init_addr_type;
+       bdaddr_t        resp_addr;
+       __u8            resp_addr_type;
        __u16           handle;
        __u16           state;
        __u8            mode;
@@ -332,6 +366,8 @@ struct hci_conn {
        __u8            passkey_entered;
        __u16           disc_timeout;
        __u16           setting;
+       __u16           le_conn_min_interval;
+       __u16           le_conn_max_interval;
        unsigned long   flags;
 
        __u8            remote_cap;
@@ -347,6 +383,7 @@ struct hci_conn {
        struct delayed_work disc_work;
        struct delayed_work auto_accept_work;
        struct delayed_work idle_work;
+       struct delayed_work le_conn_timeout;
 
        struct device   dev;
 
@@ -372,6 +409,22 @@ struct hci_chan {
        __u8            state;
 };
 
+struct hci_conn_params {
+       struct list_head list;
+
+       bdaddr_t addr;
+       u8 addr_type;
+
+       u16 conn_min_interval;
+       u16 conn_max_interval;
+
+       enum {
+               HCI_AUTO_CONN_DISABLED,
+               HCI_AUTO_CONN_ALWAYS,
+               HCI_AUTO_CONN_LINK_LOSS,
+       } auto_connect;
+};
+
 extern struct list_head hci_dev_list;
 extern struct list_head hci_cb_list;
 extern rwlock_t hci_dev_list_lock;
@@ -446,6 +499,8 @@ enum {
        HCI_CONN_LE_SMP_PEND,
        HCI_CONN_MGMT_CONNECTED,
        HCI_CONN_SSP_ENABLED,
+       HCI_CONN_SC_ENABLED,
+       HCI_CONN_AES_CCM,
        HCI_CONN_POWER_SAVE,
        HCI_CONN_REMOTE_OOB,
        HCI_CONN_6LOWPAN,
@@ -458,6 +513,13 @@ static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
               test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
 }
 
+static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       return test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+              test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
+}
+
 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
 {
        struct hci_conn_hash *h = &hdev->conn_hash;
@@ -521,6 +583,13 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
        }
 }
 
+static inline unsigned int hci_conn_count(struct hci_dev *hdev)
+{
+       struct hci_conn_hash *c = &hdev->conn_hash;
+
+       return c->acl_num + c->amp_num + c->sco_num + c->le_num;
+}
+
 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
                                                                __u16 handle)
 {
@@ -594,8 +663,10 @@ void hci_chan_del(struct hci_chan *chan);
 void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
-                            __u8 dst_type, __u8 sec_level, __u8 auth_type);
+struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+                               u8 dst_type, u8 sec_level, u8 auth_type);
+struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+                                u8 sec_level, u8 auth_type);
 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
                                 __u16 setting);
 int hci_conn_check_link_mode(struct hci_conn *conn);
@@ -606,6 +677,8 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
 
 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
 
+void hci_le_conn_failed(struct hci_conn *conn, u8 status);
+
 /*
  * hci_conn_get() and hci_conn_put() are used to control the life-time of an
  * "hci_conn" object. They do not guarantee that the hci_conn object is running,
@@ -737,31 +810,64 @@ int hci_inquiry(void __user *arg);
 
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
                                         bdaddr_t *bdaddr, u8 type);
-int hci_blacklist_clear(struct hci_dev *hdev);
 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 
-int hci_uuids_clear(struct hci_dev *hdev);
+struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
+                                         bdaddr_t *bdaddr, u8 type);
+void hci_white_list_clear(struct hci_dev *hdev);
+int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
+
+struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
+                                              bdaddr_t *addr, u8 addr_type);
+int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
+                       u8 auto_connect, u16 conn_min_interval,
+                       u16 conn_max_interval);
+void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_conn_params_clear(struct hci_dev *hdev);
+
+struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
+                                           bdaddr_t *addr, u8 addr_type);
+void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
+void hci_pend_le_conns_clear(struct hci_dev *hdev);
+
+void hci_update_background_scan(struct hci_dev *hdev);
 
-int hci_link_keys_clear(struct hci_dev *hdev);
+void hci_uuids_clear(struct hci_dev *hdev);
+
+void hci_link_keys_clear(struct hci_dev *hdev);
 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
                     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len);
-struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]);
-int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
-               int new_key, u8 authenticated, u8 tk[16], u8 enc_size,
-               __le16 ediv, u8 rand[8]);
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
+                            bool master);
+struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 addr_type, u8 type, u8 authenticated,
+                           u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                                    u8 addr_type);
-int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr);
-int hci_smp_ltks_clear(struct hci_dev *hdev);
+                                    u8 addr_type, bool master);
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
+void hci_smp_ltks_clear(struct hci_dev *hdev);
 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
-int hci_remote_oob_data_clear(struct hci_dev *hdev);
+struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa);
+struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                    u8 addr_type);
+struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 addr_type, u8 val[16], bdaddr_t *rpa);
+void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
+void hci_smp_irks_clear(struct hci_dev *hdev);
+
+void hci_remote_oob_data_clear(struct hci_dev *hdev);
 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
-                                                       bdaddr_t *bdaddr);
-int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
-                                                               u8 *randomizer);
+                                         bdaddr_t *bdaddr);
+int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 *hash, u8 *randomizer);
+int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                               u8 *hash192, u8 *randomizer192,
+                               u8 *hash256, u8 *randomizer256);
 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -803,9 +909,12 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 #define lmp_csb_slave_capable(dev)  ((dev)->features[2][0] & LMP_CSB_SLAVE)
 #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
 #define lmp_sync_scan_capable(dev)  ((dev)->features[2][0] & LMP_SYNC_SCAN)
+#define lmp_sc_capable(dev)         ((dev)->features[2][1] & LMP_SC)
+#define lmp_ping_capable(dev)       ((dev)->features[2][1] & LMP_PING)
 
 /* ----- Host capabilities ----- */
 #define lmp_host_ssp_capable(dev)  ((dev)->features[1][0] & LMP_HOST_SSP)
+#define lmp_host_sc_capable(dev)   ((dev)->features[1][0] & LMP_HOST_SC)
 #define lmp_host_le_capable(dev)   (!!((dev)->features[1][0] & LMP_HOST_LE))
 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
 
@@ -1019,6 +1128,26 @@ static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
        return false;
 }
 
+static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+{
+       if (addr_type != 0x01)
+               return false;
+
+       if ((bdaddr->b[5] & 0xc0) == 0x40)
+              return true;
+
+       return false;
+}
+
+static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
+                                         bdaddr_t *bdaddr, u8 addr_type)
+{
+       if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
+               return NULL;
+
+       return hci_find_irk_by_rpa(hdev, bdaddr);
+}
+
 int hci_register_cb(struct hci_cb *hcb);
 int hci_unregister_cb(struct hci_cb *hcb);
 
@@ -1040,6 +1169,9 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
                    const void *param, u8 event);
 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status);
 
+void hci_req_add_le_scan_disable(struct hci_request *req);
+void hci_req_add_le_passive_scan(struct hci_request *req);
+
 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
                               const void *param, u32 timeout);
 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
@@ -1085,6 +1217,7 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered);
 void mgmt_discoverable_timeout(struct hci_dev *hdev);
 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
 void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+void mgmt_advertising(struct hci_dev *hdev, u8 advertising);
 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
                       bool persistent);
@@ -1092,7 +1225,8 @@ void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                           u8 addr_type, u32 flags, u8 *name, u8 name_len,
                           u8 *dev_class);
 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                             u8 link_type, u8 addr_type, u8 reason);
+                             u8 link_type, u8 addr_type, u8 reason,
+                             bool mgmt_connected);
 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
                            u8 link_type, u8 addr_type, u8 status);
 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -1103,7 +1237,7 @@ void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                      u8 status);
 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                             u8 link_type, u8 addr_type, __le32 value,
+                             u8 link_type, u8 addr_type, u32 value,
                              u8 confirm_hint);
 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
                                     u8 link_type, u8 addr_type, u8 status);
@@ -1122,11 +1256,13 @@ void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                      u8 addr_type, u8 status);
 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
                                    u8 status);
 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
-                                            u8 *randomizer, u8 status);
+void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
+                                      u8 *randomizer192, u8 *hash256,
+                                      u8 *randomizer256, u8 status);
 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
                       u8 ssp, u8 *eir, u16 eir_len);
@@ -1135,8 +1271,12 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
+void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+                  bool persistent);
 void mgmt_reenable_advertising(struct hci_dev *hdev);
+void mgmt_smp_complete(struct hci_conn *conn, bool complete);
 
 /* HCI info for socket */
 #define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -1168,9 +1308,14 @@ struct hci_sec_filter {
 
 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
                                        u16 latency, u16 to_multiplier);
-void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
                                                        __u8 ltk[16]);
 
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+                             u8 *own_addr_type);
+void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                              u8 *bdaddr_type);
+
 #define SCO_AIRMODE_MASK       0x0003
 #define SCO_AIRMODE_CVSD       0x0000
 #define SCO_AIRMODE_TRANSP     0x0003
index dbc4a89984ca67287715a8279b2d9793e7d2eebe..4abdcb220e3ac7a0558a9a0115b873a5312f3f07 100644 (file)
@@ -91,6 +91,7 @@ struct l2cap_conninfo {
 #define L2CAP_LM_TRUSTED       0x0008
 #define L2CAP_LM_RELIABLE      0x0010
 #define L2CAP_LM_SECURE                0x0020
+#define L2CAP_LM_FIPS          0x0040
 
 /* L2CAP command codes */
 #define L2CAP_COMMAND_REJ      0x01
@@ -623,6 +624,9 @@ struct l2cap_conn {
        __u32                   rx_len;
        __u8                    tx_ident;
 
+       struct sk_buff_head     pending_rx;
+       struct work_struct      pending_rx_work;
+
        __u8                    disc_reason;
 
        struct delayed_work     security_timer;
@@ -647,7 +651,7 @@ struct l2cap_user {
 #define L2CAP_CHAN_RAW                 1
 #define L2CAP_CHAN_CONN_LESS           2
 #define L2CAP_CHAN_CONN_ORIENTED       3
-#define L2CAP_CHAN_CONN_FIX_A2MP       4
+#define L2CAP_CHAN_FIXED               4
 
 /* ----- L2CAP socket info ----- */
 #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
@@ -853,7 +857,6 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
 }
 
 extern bool disable_ertm;
-extern bool enable_lecoc;
 
 int l2cap_init_sockets(void);
 void l2cap_cleanup_sockets(void);
@@ -878,6 +881,7 @@ int l2cap_ertm_init(struct l2cap_chan *chan);
 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
 void l2cap_chan_del(struct l2cap_chan *chan, int err);
+void l2cap_conn_update_id_addr(struct hci_conn *hcon);
 void l2cap_send_conn_req(struct l2cap_chan *chan);
 void l2cap_move_start(struct l2cap_chan *chan);
 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
index 518c5c84e39a67ef4c9789eacd8d88117bf6ccbd..d4b571c2f9fd33b65ec95dbf9ca6b691f1567b61 100644 (file)
@@ -94,6 +94,9 @@ struct mgmt_rp_read_index_list {
 #define MGMT_SETTING_HS                        0x00000100
 #define MGMT_SETTING_LE                        0x00000200
 #define MGMT_SETTING_ADVERTISING       0x00000400
+#define MGMT_SETTING_SECURE_CONN       0x00000800
+#define MGMT_SETTING_DEBUG_KEYS                0x00001000
+#define MGMT_SETTING_PRIVACY           0x00002000
 
 #define MGMT_OP_READ_INFO              0x0004
 #define MGMT_READ_INFO_SIZE            0
@@ -180,11 +183,11 @@ struct mgmt_cp_load_link_keys {
 
 struct mgmt_ltk_info {
        struct mgmt_addr_info addr;
-       __u8    authenticated;
+       __u8    type;
        __u8    master;
        __u8    enc_size;
        __le16  ediv;
-       __u8    rand[8];
+       __le64  rand;
        __u8    val[16];
 } __packed;
 
@@ -294,6 +297,12 @@ struct mgmt_rp_read_local_oob_data {
        __u8    hash[16];
        __u8    randomizer[16];
 } __packed;
+struct mgmt_rp_read_local_oob_ext_data {
+       __u8    hash192[16];
+       __u8    randomizer192[16];
+       __u8    hash256[16];
+       __u8    randomizer256[16];
+} __packed;
 
 #define MGMT_OP_ADD_REMOTE_OOB_DATA    0x0021
 struct mgmt_cp_add_remote_oob_data {
@@ -302,6 +311,14 @@ struct mgmt_cp_add_remote_oob_data {
        __u8    randomizer[16];
 } __packed;
 #define MGMT_ADD_REMOTE_OOB_DATA_SIZE  (MGMT_ADDR_INFO_SIZE + 32)
+struct mgmt_cp_add_remote_oob_ext_data {
+       struct mgmt_addr_info addr;
+       __u8    hash192[16];
+       __u8    randomizer192[16];
+       __u8    hash256[16];
+       __u8    randomizer256[16];
+} __packed;
+#define MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE (MGMT_ADDR_INFO_SIZE + 64)
 
 #define MGMT_OP_REMOVE_REMOTE_OOB_DATA 0x0022
 struct mgmt_cp_remove_remote_oob_data {
@@ -369,6 +386,29 @@ struct mgmt_cp_set_scan_params {
 } __packed;
 #define MGMT_SET_SCAN_PARAMS_SIZE      4
 
+#define MGMT_OP_SET_SECURE_CONN                0x002D
+
+#define MGMT_OP_SET_DEBUG_KEYS         0x002E
+
+#define MGMT_OP_SET_PRIVACY            0x002F
+struct mgmt_cp_set_privacy {
+       __u8 privacy;
+       __u8 irk[16];
+} __packed;
+#define MGMT_SET_PRIVACY_SIZE          17
+
+struct mgmt_irk_info {
+       struct mgmt_addr_info addr;
+       __u8 val[16];
+} __packed;
+
+#define MGMT_OP_LOAD_IRKS              0x0030
+struct mgmt_cp_load_irks {
+       __le16 irk_count;
+       struct mgmt_irk_info irks[0];
+} __packed;
+#define MGMT_LOAD_IRKS_SIZE            2
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16  opcode;
@@ -504,3 +544,22 @@ struct mgmt_ev_passkey_notify {
        __le32  passkey;
        __u8    entered;
 } __packed;
+
+#define MGMT_EV_NEW_IRK                        0x0018
+struct mgmt_ev_new_irk {
+       __u8     store_hint;
+       bdaddr_t rpa;
+       struct mgmt_irk_info irk;
+} __packed;
+
+struct mgmt_csrk_info {
+       struct mgmt_addr_info addr;
+       __u8 master;
+       __u8 val[16];
+} __packed;
+
+#define MGMT_EV_NEW_CSRK               0x0019
+struct mgmt_ev_new_csrk {
+       __u8 store_hint;
+       struct mgmt_csrk_info key;
+} __packed;
index 486213a1aed8d07ad63aaba57957e7a651657696..2611cc389d7d65d03cf298061bf3e4ed32c304cd 100644 (file)
@@ -238,9 +238,11 @@ int  rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
                                                                u8 channel);
 int  rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
 int  rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb);
+void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb);
 int  rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig);
 int  rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig);
 void rfcomm_dlc_accept(struct rfcomm_dlc *d);
+struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel);
 
 #define rfcomm_dlc_lock(d)     spin_lock(&d->lock)
 #define rfcomm_dlc_unlock(d)   spin_unlock(&d->lock)
@@ -295,6 +297,7 @@ struct rfcomm_conninfo {
 #define RFCOMM_LM_TRUSTED      0x0008
 #define RFCOMM_LM_RELIABLE     0x0010
 #define RFCOMM_LM_SECURE       0x0020
+#define RFCOMM_LM_FIPS         0x0040
 
 #define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk)
 
@@ -323,11 +326,16 @@ int  rfcomm_connect_ind(struct rfcomm_session *s, u8 channel,
 #define RFCOMMGETDEVINFO       _IOR('R', 211, int)
 #define RFCOMMSTEALDLC         _IOW('R', 220, int)
 
+/* rfcomm_dev.flags bit definitions */
 #define RFCOMM_REUSE_DLC      0
 #define RFCOMM_RELEASE_ONHUP  1
 #define RFCOMM_HANGUP_NOW     2
 #define RFCOMM_TTY_ATTACHED   3
-#define RFCOMM_TTY_RELEASED   4
+#define RFCOMM_DEFUNCT_BIT4   4          /* don't reuse this bit - userspace visible */
+
+/* rfcomm_dev.status bit definitions */
+#define RFCOMM_DEV_RELEASED   0
+#define RFCOMM_TTY_OWNED      1
 
 struct rfcomm_dev_req {
        s16      dev_id;
index b1f84b05c67e99371eef66c6c3adad1a45380f50..f3539a15c41103b743c0571913fbb93dc402f40a 100644 (file)
@@ -151,6 +151,7 @@ enum ieee80211_channel_flags {
  * @dfs_state: current state of this channel. Only relevant if radar is required
  *     on this channel.
  * @dfs_state_entered: timestamp (jiffies) when the dfs state was entered.
+ * @dfs_cac_ms: DFS CAC time in milliseconds, this is valid for DFS channels.
  */
 struct ieee80211_channel {
        enum ieee80211_band band;
@@ -165,6 +166,7 @@ struct ieee80211_channel {
        int orig_mag, orig_mpwr;
        enum nl80211_dfs_state dfs_state;
        unsigned long dfs_state_entered;
+       unsigned int dfs_cac_ms;
 };
 
 /**
@@ -1394,10 +1396,12 @@ struct cfg80211_scan_request {
 /**
  * struct cfg80211_match_set - sets of attributes to match
  *
- * @ssid: SSID to be matched
+ * @ssid: SSID to be matched; may be zero-length for no match (RSSI only)
+ * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
  */
 struct cfg80211_match_set {
        struct cfg80211_ssid ssid;
+       s32 rssi_thold;
 };
 
 /**
@@ -1420,7 +1424,8 @@ struct cfg80211_match_set {
  * @dev: the interface
  * @scan_start: start time of the scheduled scan
  * @channels: channels to scan
- * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
+ * @min_rssi_thold: for drivers only supporting a single threshold, this
+ *     contains the minimum over all matchsets
  */
 struct cfg80211_sched_scan_request {
        struct cfg80211_ssid *ssids;
@@ -1433,7 +1438,7 @@ struct cfg80211_sched_scan_request {
        u32 flags;
        struct cfg80211_match_set *match_sets;
        int n_match_sets;
-       s32 rssi_thold;
+       s32 min_rssi_thold;
 
        /* internal */
        struct wiphy *wiphy;
@@ -1701,8 +1706,14 @@ struct cfg80211_ibss_params {
  *
  * @channel: The channel to use or %NULL if not specified (auto-select based
  *     on scan results)
+ * @channel_hint: The channel of the recommended BSS for initial connection or
+ *     %NULL if not specified
  * @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan
  *     results)
+ * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or
+ *     %NULL if not specified. Unlike the @bssid parameter, the driver is
+ *     allowed to ignore this @bssid_hint if it has knowledge of a better BSS
+ *     to use.
  * @ssid: SSID
  * @ssid_len: Length of ssid in octets
  * @auth_type: Authentication type (algorithm)
@@ -1725,11 +1736,13 @@ struct cfg80211_ibss_params {
  */
 struct cfg80211_connect_params {
        struct ieee80211_channel *channel;
-       u8 *bssid;
-       u8 *ssid;
+       struct ieee80211_channel *channel_hint;
+       const u8 *bssid;
+       const u8 *bssid_hint;
+       const u8 *ssid;
        size_t ssid_len;
        enum nl80211_auth_type auth_type;
-       u8 *ie;
+       const u8 *ie;
        size_t ie_len;
        bool privacy;
        enum nl80211_mfp mfp;
@@ -1768,6 +1781,7 @@ struct cfg80211_bitrate_mask {
                u32 legacy;
                u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
                u16 vht_mcs[NL80211_VHT_NSS_MAX];
+               enum nl80211_txrate_gi gi;
        } control[IEEE80211_NUM_BANDS];
 };
 /**
@@ -2194,7 +2208,12 @@ struct cfg80211_qos_map {
  * @set_cqm_txe_config: Configure connection quality monitor TX error
  *     thresholds.
  * @sched_scan_start: Tell the driver to start a scheduled scan.
- * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan.
+ * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan. This
+ *     call must stop the scheduled scan and be ready for starting a new one
+ *     before it returns, i.e. @sched_scan_start may be called immediately
+ *     after that again and should not fail in that case. The driver should
+ *     not call cfg80211_sched_scan_stopped() for a requested stop (when this
+ *     method returns 0.)
  *
  * @mgmt_frame_register: Notify driver that a management frame type was
  *     registered. Note that this callback may not sleep, and cannot run
@@ -2453,7 +2472,8 @@ struct cfg80211_ops {
 
        int     (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev,
                             u8 *peer, u8 action_code,  u8 dialog_token,
-                            u16 status_code, const u8 *buf, size_t len);
+                            u16 status_code, u32 peer_capability,
+                            const u8 *buf, size_t len);
        int     (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev,
                             u8 *peer, enum nl80211_tdls_operation oper);
 
@@ -2485,7 +2505,8 @@ struct cfg80211_ops {
 
        int     (*start_radar_detection)(struct wiphy *wiphy,
                                         struct net_device *dev,
-                                        struct cfg80211_chan_def *chandef);
+                                        struct cfg80211_chan_def *chandef,
+                                        u32 cac_time_ms);
        int     (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
                                 struct cfg80211_update_ft_ies_params *ftie);
        int     (*crit_proto_start)(struct wiphy *wiphy,
@@ -2598,9 +2619,12 @@ struct ieee80211_iface_limit {
  *     only in special cases.
  * @radar_detect_widths: bitmap of channel widths supported for radar detection
  *
- * These examples can be expressed as follows:
+ * With this structure the driver can describe which interface
+ * combinations it supports concurrently.
+ *
+ * Examples:
  *
- * Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
+ * 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
  *
  *  struct ieee80211_iface_limit limits1[] = {
  *     { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
@@ -2614,7 +2638,7 @@ struct ieee80211_iface_limit {
  *  };
  *
  *
- * Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
+ * 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
  *
  *  struct ieee80211_iface_limit limits2[] = {
  *     { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
@@ -2628,7 +2652,8 @@ struct ieee80211_iface_limit {
  *  };
  *
  *
- * Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
+ * 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
+ *
  * This allows for an infrastructure connection and three P2P connections.
  *
  *  struct ieee80211_iface_limit limits3[] = {
@@ -2778,7 +2803,7 @@ struct wiphy_vendor_command {
  * @perm_addr: permanent MAC address of this device
  * @addr_mask: If the device supports multiple MAC addresses by masking,
  *     set this to a mask with variable bits set to 1, e.g. if the last
- *     four bits are variable then set it to 00:...:00:0f. The actual
+ *     four bits are variable then set it to 00-00-00-00-00-0f. The actual
  *     variable bits shall be determined by the interfaces added, with
  *     interfaces not matching the mask being rejected to be brought up.
  * @n_addresses: number of addresses in @addresses.
@@ -2875,6 +2900,11 @@ struct wiphy_vendor_command {
  * @n_vendor_commands: number of vendor commands
  * @vendor_events: array of vendor events supported by the hardware
  * @n_vendor_events: number of vendor events
+ *
+ * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode
+ *     (including P2P GO) or 0 to indicate no such limit is advertised. The
+ *     driver is allowed to advertise a theoretical limit that it can reach in
+ *     some cases, but may not always reach.
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
@@ -2990,6 +3020,8 @@ struct wiphy {
        const struct nl80211_vendor_cmd_info *vendor_events;
        int n_vendor_commands, n_vendor_events;
 
+       u16 max_ap_assoc_sta;
+
        char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -3127,8 +3159,8 @@ struct cfg80211_cached_keys;
  * @identifier: (private) Identifier used in nl80211 to identify this
  *     wireless device if it has no netdev
  * @current_bss: (private) Used by the internal configuration code
- * @channel: (private) Used by the internal configuration code to track
- *     the user-set AP, monitor and WDS channel
+ * @chandef: (private) Used by the internal configuration code to track
+ *     the user-set channel definition.
  * @preset_chandef: (private) Used by the internal configuration code to
  *     track the channel to be used for AP later
  * @bssid: (private) Used by the internal configuration code
@@ -3151,6 +3183,7 @@ struct cfg80211_cached_keys;
  * @p2p_started: true if this is a P2P Device that has been started
  * @cac_started: true if DFS channel availability check has been started
  * @cac_start_time: timestamp (jiffies) when the dfs state was entered.
+ * @cac_time_ms: CAC time in ms
  * @ps: powersave mode is enabled
  * @ps_timeout: dynamic powersave timeout
  * @ap_unexpected_nlportid: (private) netlink port ID of application
@@ -3192,9 +3225,7 @@ struct wireless_dev {
 
        struct cfg80211_internal_bss *current_bss; /* associated / joined */
        struct cfg80211_chan_def preset_chandef;
-
-       /* for AP and mesh channel tracking */
-       struct ieee80211_channel *channel;
+       struct cfg80211_chan_def chandef;
 
        bool ibss_fixed;
        bool ibss_dfs_possible;
@@ -3208,6 +3239,7 @@ struct wireless_dev {
 
        bool cac_started;
        unsigned long cac_start_time;
+       unsigned int cac_time_ms;
 
 #ifdef CONFIG_CFG80211_WEXT
        /* wext data */
@@ -3640,7 +3672,7 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
  * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
  *
  * @wiphy: the wiphy reporting the BSS
- * @channel: The channel the frame was received on
+ * @rx_channel: The channel the frame was received on
  * @scan_width: width of the control channel
  * @mgmt: the management frame (probe response or beacon)
  * @len: length of the management frame
@@ -3655,18 +3687,18 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
  */
 struct cfg80211_bss * __must_check
 cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
-                               struct ieee80211_channel *channel,
+                               struct ieee80211_channel *rx_channel,
                                enum nl80211_bss_scan_width scan_width,
                                struct ieee80211_mgmt *mgmt, size_t len,
                                s32 signal, gfp_t gfp);
 
 static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss_frame(struct wiphy *wiphy,
-                         struct ieee80211_channel *channel,
+                         struct ieee80211_channel *rx_channel,
                          struct ieee80211_mgmt *mgmt, size_t len,
                          s32 signal, gfp_t gfp)
 {
-       return cfg80211_inform_bss_width_frame(wiphy, channel,
+       return cfg80211_inform_bss_width_frame(wiphy, rx_channel,
                                               NL80211_BSS_CHAN_WIDTH_20,
                                               mgmt, len, signal, gfp);
 }
@@ -3675,7 +3707,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
  * cfg80211_inform_bss - inform cfg80211 of a new BSS
  *
  * @wiphy: the wiphy reporting the BSS
- * @channel: The channel the frame was received on
+ * @rx_channel: The channel the frame was received on
  * @scan_width: width of the control channel
  * @bssid: the BSSID of the BSS
  * @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
@@ -3694,7 +3726,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
  */
 struct cfg80211_bss * __must_check
 cfg80211_inform_bss_width(struct wiphy *wiphy,
-                         struct ieee80211_channel *channel,
+                         struct ieee80211_channel *rx_channel,
                          enum nl80211_bss_scan_width scan_width,
                          const u8 *bssid, u64 tsf, u16 capability,
                          u16 beacon_interval, const u8 *ie, size_t ielen,
@@ -3702,12 +3734,12 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
 
 static inline struct cfg80211_bss * __must_check
 cfg80211_inform_bss(struct wiphy *wiphy,
-                   struct ieee80211_channel *channel,
+                   struct ieee80211_channel *rx_channel,
                    const u8 *bssid, u64 tsf, u16 capability,
                    u16 beacon_interval, const u8 *ie, size_t ielen,
                    s32 signal, gfp_t gfp)
 {
-       return cfg80211_inform_bss_width(wiphy, channel,
+       return cfg80211_inform_bss_width(wiphy, rx_channel,
                                         NL80211_BSS_CHAN_WIDTH_20,
                                         bssid, tsf, capability,
                                         beacon_interval, ie, ielen, signal,
@@ -3876,6 +3908,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
  *
  * @dev: network device
  * @bssid: the BSSID of the IBSS joined
+ * @channel: the channel of the IBSS joined
  * @gfp: allocation flags
  *
  * This function notifies cfg80211 that the device joined an IBSS or
@@ -3885,7 +3918,8 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
  * with the locally generated beacon -- this guarantees that there is
  * always a scan result for this IBSS. cfg80211 will handle the rest.
  */
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+                         struct ieee80211_channel *channel, gfp_t gfp);
 
 /**
  * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
index 37a0e24adbe72a995e341c1c0113fbed0c5fd69b..a28f4e0f625193b0682932207a46578f094f52a9 100644 (file)
@@ -69,6 +69,19 @@ static inline __wsum csum_sub(__wsum csum, __wsum addend)
        return csum_add(csum, ~addend);
 }
 
+static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+{
+       u16 res = (__force u16)csum;
+
+       res += (__force u16)addend;
+       return (__force __sum16)(res + (res < (__force u16)addend));
+}
+
+static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+{
+       return csum16_add(csum, ~addend);
+}
+
 static inline __wsum
 csum_block_add(__wsum csum, __wsum csum2, int offset)
 {
@@ -112,9 +125,15 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
        *sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum)));
 }
 
-static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
+/* Implements RFC 1624 (Incremental Internet Checksum)
+ * 3. Discussion states :
+ *     HC' = ~(~HC + ~m + m')
+ *  m : old value of a 16bit field
+ *  m' : new value of a 16bit field
+ */
+static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
 {
-       csum_replace4(sum, (__force __be32)from, (__force __be32)to);
+       *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
 }
 
 struct sk_buff;
index 77eb53fabfb00d6e446d89066937621b22c68e44..46ed958e0c6ef2ffc7142a3739c985382f9a97cf 100644 (file)
@@ -54,10 +54,9 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
-#define DST_NOPEER             0x0040
-#define DST_FAKE_RTABLE                0x0080
-#define DST_XFRM_TUNNEL                0x0100
-#define DST_XFRM_QUEUE         0x0200
+#define DST_FAKE_RTABLE                0x0040
+#define DST_XFRM_TUNNEL                0x0080
+#define DST_XFRM_QUEUE         0x0100
 
        unsigned short          pending_confirm;
 
@@ -109,9 +108,11 @@ struct dst_entry {
 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
 extern const u32 dst_default_metrics[];
 
-#define DST_METRICS_READ_ONLY  0x1UL
+#define DST_METRICS_READ_ONLY          0x1UL
+#define DST_METRICS_FORCE_OVERWRITE    0x2UL
+#define DST_METRICS_FLAGS              0x3UL
 #define __DST_METRICS_PTR(Y)   \
-       ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
+       ((u32 *)((Y) & ~DST_METRICS_FLAGS))
 #define DST_METRICS_PTR(X)     __DST_METRICS_PTR((X)->_metrics)
 
 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
@@ -119,6 +120,11 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
        return dst->_metrics & DST_METRICS_READ_ONLY;
 }
 
+static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
+{
+       dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
+}
+
 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
 
 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
index d23e7fa2042e0dd79b9d23867d5237f164ff3782..64fd24836650b7ea14a5c6ba8278066bbb21bf0d 100644 (file)
@@ -218,9 +218,11 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
                                            const struct flowi *key, u16 family,
                                            u8 dir, flow_resolve_t resolver,
                                            void *ctx);
+int flow_cache_init(struct net *net);
+void flow_cache_fini(struct net *net);
 
-void flow_cache_flush(void);
-void flow_cache_flush_deferred(void);
+void flow_cache_flush(struct net *net);
+void flow_cache_flush_deferred(struct net *net);
 extern atomic_t flow_cache_genid;
 
 #endif
diff --git a/include/net/flowcache.h b/include/net/flowcache.h
new file mode 100644 (file)
index 0000000..c8f665e
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _NET_FLOWCACHE_H
+#define _NET_FLOWCACHE_H
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+
+struct flow_cache_percpu {
+       struct hlist_head               *hash_table;
+       int                             hash_count;
+       u32                             hash_rnd;
+       int                             hash_rnd_recalc;
+       struct tasklet_struct           flush_tasklet;
+};
+
+struct flow_cache {
+       u32                             hash_shift;
+       struct flow_cache_percpu __percpu *percpu;
+       struct notifier_block           hotcpu_notifier;
+       int                             low_watermark;
+       int                             high_watermark;
+       struct timer_list               rnd_timer;
+};
+#endif /* _NET_FLOWCACHE_H */
index 8b5b714332971c547faa032574f56010f7ae3e15..b0fd9476c538eb78a62a6f72a555c1ddb9194426 100644 (file)
@@ -316,6 +316,10 @@ enum ieee80211_radiotap_type {
 #define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM                0x10
 #define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED                 0x20
 
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER0                   0x01
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER1                   0x02
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER2                   0x04
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER3                   0x08
 
 /* helpers */
 static inline int ieee80211_get_radiotap_len(unsigned char *data)
index ee59f8b188ddfb081c948b27f54ac73839a9d2ef..c7ae0ac528dc1e5e1d3c2d5456c2d0e5221b6933 100644 (file)
            (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \
        } while (0)
 
-#define IEEE802154_FC_SECEN            (1 << 3)
-#define IEEE802154_FC_FRPEND           (1 << 4)
-#define IEEE802154_FC_ACK_REQ          (1 << 5)
-#define IEEE802154_FC_INTRA_PAN                (1 << 6)
+#define IEEE802154_FC_SECEN_SHIFT      3
+#define IEEE802154_FC_SECEN            (1 << IEEE802154_FC_SECEN_SHIFT)
+#define IEEE802154_FC_FRPEND_SHIFT     4
+#define IEEE802154_FC_FRPEND           (1 << IEEE802154_FC_FRPEND_SHIFT)
+#define IEEE802154_FC_ACK_REQ_SHIFT    5
+#define IEEE802154_FC_ACK_REQ          (1 << IEEE802154_FC_ACK_REQ_SHIFT)
+#define IEEE802154_FC_INTRA_PAN_SHIFT  6
+#define IEEE802154_FC_INTRA_PAN                (1 << IEEE802154_FC_INTRA_PAN_SHIFT)
 
 #define IEEE802154_FC_SAMODE_SHIFT     14
 #define IEEE802154_FC_SAMODE_MASK      (3 << IEEE802154_FC_SAMODE_SHIFT)
 #define IEEE802154_FC_DAMODE_SHIFT     10
 #define IEEE802154_FC_DAMODE_MASK      (3 << IEEE802154_FC_DAMODE_SHIFT)
 
+#define IEEE802154_FC_VERSION_SHIFT    12
+#define IEEE802154_FC_VERSION_MASK     (3 << IEEE802154_FC_VERSION_SHIFT)
+#define IEEE802154_FC_VERSION(x)       ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT)
+
 #define IEEE802154_FC_SAMODE(x)                \
        (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT)
 
 #define IEEE802154_FC_DAMODE(x)                \
        (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
 
+#define IEEE802154_SCF_SECLEVEL_MASK           7
+#define IEEE802154_SCF_SECLEVEL_SHIFT          0
+#define IEEE802154_SCF_SECLEVEL(x)             (x & IEEE802154_SCF_SECLEVEL_MASK)
+#define IEEE802154_SCF_KEY_ID_MODE_SHIFT       3
+#define IEEE802154_SCF_KEY_ID_MODE_MASK                (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT)
+#define IEEE802154_SCF_KEY_ID_MODE(x)          \
+       ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT)
+
+#define IEEE802154_SCF_KEY_IMPLICIT            0
+#define IEEE802154_SCF_KEY_INDEX               1
+#define IEEE802154_SCF_KEY_SHORT_INDEX         2
+#define IEEE802154_SCF_KEY_HW_INDEX            3
 
 /* MAC footer size */
 #define IEEE802154_MFR_SIZE    2 /* 2 octets */
index 8196d5d4035970849c4caba4ee0c2c0928ec24a0..e1717cbf609b67d6761dc68ed9d8a40a202bbfb3 100644 (file)
 #define IEEE802154_NETDEVICE_H
 
 #include <net/af_ieee802154.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+struct ieee802154_sechdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u8 level:3,
+          key_id_mode:2,
+          reserved:3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       u8 reserved:3,
+          key_id_mode:2,
+          level:3;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+       u8 key_id;
+       __le32 frame_counter;
+       union {
+               __le32 short_src;
+               __le64 extended_src;
+       };
+};
+
+struct ieee802154_addr {
+       u8 mode;
+       __le16 pan_id;
+       union {
+               __le16 short_addr;
+               __le64 extended_addr;
+       };
+};
+
+struct ieee802154_hdr_fc {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       u16 type:3,
+           security_enabled:1,
+           frame_pending:1,
+           ack_request:1,
+           intra_pan:1,
+           reserved:3,
+           dest_addr_mode:2,
+           version:2,
+           source_addr_mode:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       u16 reserved:1,
+           intra_pan:1,
+           ack_request:1,
+           frame_pending:1,
+           security_enabled:1,
+           type:3,
+           source_addr_mode:2,
+           version:2,
+           dest_addr_mode:2,
+           reserved2:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+};
+
+struct ieee802154_hdr {
+       struct ieee802154_hdr_fc fc;
+       u8 seq;
+       struct ieee802154_addr source;
+       struct ieee802154_addr dest;
+       struct ieee802154_sechdr sec;
+};
+
+/* pushes hdr onto the skb. fields of hdr->fc that can be calculated from
+ * the contents of hdr will be, and the actual value of those bits in
+ * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame
+ * version, if SECEN is set.
+ */
+int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr);
+
+/* pulls the entire 802.15.4 header off of the skb, including the security
+ * header, and performs pan id decompression
+ */
+int ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr);
+
+/* parses the frame control, sequence number of address fields in a given skb
+ * and stores them into hdr, performing pan id decompression and length checks
+ * to be suitable for use in header_ops.parse
+ */
+int ieee802154_hdr_peek_addrs(const struct sk_buff *skb,
+                             struct ieee802154_hdr *hdr);
+
+static inline int ieee802154_hdr_length(struct sk_buff *skb)
+{
+       struct ieee802154_hdr hdr;
+       int len = ieee802154_hdr_pull(skb, &hdr);
+
+       if (len > 0)
+               skb_push(skb, len);
+
+       return len;
+}
+
+static inline bool ieee802154_addr_equal(const struct ieee802154_addr *a1,
+                                        const struct ieee802154_addr *a2)
+{
+       if (a1->pan_id != a2->pan_id || a1->mode != a2->mode)
+               return false;
+
+       if ((a1->mode == IEEE802154_ADDR_LONG &&
+            a1->extended_addr != a2->extended_addr) ||
+           (a1->mode == IEEE802154_ADDR_SHORT &&
+            a1->short_addr != a2->short_addr))
+               return false;
+
+       return true;
+}
+
+static inline __le64 ieee802154_devaddr_from_raw(const void *raw)
+{
+       u64 temp;
+
+       memcpy(&temp, raw, IEEE802154_ADDR_LEN);
+       return (__force __le64)swab64(temp);
+}
+
+static inline void ieee802154_devaddr_to_raw(void *raw, __le64 addr)
+{
+       u64 temp = swab64((__force u64)addr);
+
+       memcpy(raw, &temp, IEEE802154_ADDR_LEN);
+}
+
+static inline void ieee802154_addr_from_sa(struct ieee802154_addr *a,
+                                          const struct ieee802154_addr_sa *sa)
+{
+       a->mode = sa->addr_type;
+       a->pan_id = cpu_to_le16(sa->pan_id);
+
+       switch (a->mode) {
+       case IEEE802154_ADDR_SHORT:
+               a->short_addr = cpu_to_le16(sa->short_addr);
+               break;
+       case IEEE802154_ADDR_LONG:
+               a->extended_addr = ieee802154_devaddr_from_raw(sa->hwaddr);
+               break;
+       }
+}
+
+static inline void ieee802154_addr_to_sa(struct ieee802154_addr_sa *sa,
+                                        const struct ieee802154_addr *a)
+{
+       sa->addr_type = a->mode;
+       sa->pan_id = le16_to_cpu(a->pan_id);
+
+       switch (a->mode) {
+       case IEEE802154_ADDR_SHORT:
+               sa->short_addr = le16_to_cpu(a->short_addr);
+               break;
+       case IEEE802154_ADDR_LONG:
+               ieee802154_devaddr_to_raw(sa->hwaddr, a->extended_addr);
+               break;
+       }
+}
 
 /*
  * A control block of skb passed between the ARPHRD_IEEE802154 device
  */
 struct ieee802154_mac_cb {
        u8 lqi;
-       struct ieee802154_addr sa;
-       struct ieee802154_addr da;
        u8 flags;
        u8 seq;
+       struct ieee802154_addr source;
+       struct ieee802154_addr dest;
 };
 
 static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
@@ -50,23 +208,17 @@ static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
 
 #define MAC_CB_FLAG_ACKREQ             (1 << 3)
 #define MAC_CB_FLAG_SECEN              (1 << 4)
-#define MAC_CB_FLAG_INTRAPAN           (1 << 5)
 
-static inline int mac_cb_is_ackreq(struct sk_buff *skb)
+static inline bool mac_cb_is_ackreq(struct sk_buff *skb)
 {
        return mac_cb(skb)->flags & MAC_CB_FLAG_ACKREQ;
 }
 
-static inline int mac_cb_is_secen(struct sk_buff *skb)
+static inline bool mac_cb_is_secen(struct sk_buff *skb)
 {
        return mac_cb(skb)->flags & MAC_CB_FLAG_SECEN;
 }
 
-static inline int mac_cb_is_intrapan(struct sk_buff *skb)
-{
-       return mac_cb(skb)->flags & MAC_CB_FLAG_INTRAPAN;
-}
-
 static inline int mac_cb_type(struct sk_buff *skb)
 {
        return mac_cb(skb)->flags & MAC_CB_FLAG_TYPEMASK;
@@ -92,7 +244,7 @@ struct ieee802154_mlme_ops {
                        u8 channel, u8 page, u8 cap);
        int (*assoc_resp)(struct net_device *dev,
                        struct ieee802154_addr *addr,
-                       u16 short_addr, u8 status);
+                       __le16 short_addr, u8 status);
        int (*disassoc_req)(struct net_device *dev,
                        struct ieee802154_addr *addr,
                        u8 reason);
@@ -111,8 +263,8 @@ struct ieee802154_mlme_ops {
         * FIXME: these should become the part of PIB/MIB interface.
         * However we still don't have IB interface of any kind
         */
-       u16 (*get_pan_id)(const struct net_device *dev);
-       u16 (*get_short_addr)(const struct net_device *dev);
+       __le16 (*get_pan_id)(const struct net_device *dev);
+       __le16 (*get_short_addr)(const struct net_device *dev);
        u8 (*get_dsn)(const struct net_device *dev);
 };
 
index 23be0fd37937ebabfbab8a38864381d0f2d6ff31..25064c28e059e2e42d46dcaf3209a2ff517a7a82 100644 (file)
@@ -187,6 +187,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
 #define NET_INC_STATS(net, field)      SNMP_INC_STATS((net)->mib.net_statistics, field)
 #define NET_INC_STATS_BH(net, field)   SNMP_INC_STATS_BH((net)->mib.net_statistics, field)
 #define NET_INC_STATS_USER(net, field)         SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
+#define NET_ADD_STATS(net, field, adnd)        SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
@@ -266,7 +267,8 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
 
 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
 {
-       return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE;
+       return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
+              inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
 }
 
 static inline bool ip_sk_use_pmtu(const struct sock *sk)
@@ -274,6 +276,12 @@ static inline bool ip_sk_use_pmtu(const struct sock *sk)
        return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
 }
 
+static inline bool ip_sk_local_df(const struct sock *sk)
+{
+       return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
+              inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
+}
+
 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
                                                    bool forwarding)
 {
@@ -489,7 +497,8 @@ int ip_options_rcv_srr(struct sk_buff *skb);
 
 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
 void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc);
+int ip_cmsg_send(struct net *net, struct msghdr *msg,
+                struct ipcm_cookie *ipc, bool allow_ipv6);
 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
                  unsigned int optlen);
 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
index aca0c2709fd6b9771a743810e5078ead2659bd4f..9bcb220bd4ad13538ed0ee2d51546b1463cd116b 100644 (file)
@@ -284,7 +284,8 @@ struct fib6_node *fib6_locate(struct fib6_node *root,
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    void *arg);
 
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info);
+int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
+            struct nlattr *mx, int mx_len);
 
 int fib6_del(struct rt6_info *rt, struct nl_info *info);
 
index 017badb1aec7e8648a6f71d473862b730a78b4e8..3c3bb184eb8f154b61c9f93fd5ea37ffefadeba8 100644 (file)
@@ -51,6 +51,11 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
        return (flags >> 3) & 7;
 }
 
+static inline bool rt6_need_strict(const struct in6_addr *daddr)
+{
+       return ipv6_addr_type(daddr) &
+               (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
+}
 
 void ip6_route_input(struct sk_buff *skb);
 
@@ -171,7 +176,14 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
 
 static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
 {
-       return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE;
+       return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
+              inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
+}
+
+static inline bool ip6_sk_local_df(const struct sock *sk)
+{
+       return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
+              inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
 }
 
 static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
index f4ab2fb4d50c445b980e1f6c507ae5a893c1ddfc..2de7ff42ff3aae4f0e7e8f5eb140d4269853228a 100644 (file)
  *
  * Secondly, when the hardware handles fragmentation, the frame handed to
  * the driver from mac80211 is the MSDU, not the MPDU.
- *
- * Finally, for received frames, the driver is able to indicate that it has
- * filled a radiotap header and put that in front of the frame; if it does
- * not do so then mac80211 may add this under certain circumstances.
  */
 
 /**
@@ -701,11 +697,11 @@ struct ieee80211_tx_info {
                } control;
                struct {
                        struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
-                       int ack_signal;
+                       s32 ack_signal;
                        u8 ampdu_ack_len;
                        u8 ampdu_len;
                        u8 antenna;
-                       /* 21 bytes free */
+                       void *status_driver_data[21 / sizeof(void *)];
                } status;
                struct {
                        struct ieee80211_tx_rate driver_rates[
@@ -808,9 +804,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
  * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
  * @RX_FLAG_40MHZ: HT40 (40 MHz) was used
- * @RX_FLAG_80MHZ: 80 MHz was used
- * @RX_FLAG_80P80MHZ: 80+80 MHz was used
- * @RX_FLAG_160MHZ: 160 MHz was used
  * @RX_FLAG_SHORT_GI: Short guard interval was used
  * @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
  *     Valid only for data frames (mainly A-MPDU)
@@ -830,6 +823,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  *     on this subframe
  * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
  *     is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_LDPC: LDPC was used
  * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
  * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
  * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -866,9 +860,7 @@ enum mac80211_rx_flags {
        RX_FLAG_AMPDU_DELIM_CRC_KNOWN   = BIT(20),
        RX_FLAG_MACTIME_END             = BIT(21),
        RX_FLAG_VHT                     = BIT(22),
-       RX_FLAG_80MHZ                   = BIT(23),
-       RX_FLAG_80P80MHZ                = BIT(24),
-       RX_FLAG_160MHZ                  = BIT(25),
+       RX_FLAG_LDPC                    = BIT(23),
        RX_FLAG_STBC_MASK               = BIT(26) | BIT(27),
        RX_FLAG_10MHZ                   = BIT(28),
        RX_FLAG_5MHZ                    = BIT(29),
@@ -877,6 +869,23 @@ enum mac80211_rx_flags {
 
 #define RX_FLAG_STBC_SHIFT             26
 
+/**
+ * enum mac80211_rx_vht_flags - receive VHT flags
+ *
+ * These flags are used with the @vht_flag member of
+ *     &struct ieee80211_rx_status.
+ * @RX_VHT_FLAG_80MHZ: 80 MHz was used
+ * @RX_VHT_FLAG_80P80MHZ: 80+80 MHz was used
+ * @RX_VHT_FLAG_160MHZ: 160 MHz was used
+ * @RX_VHT_FLAG_BF: packet was beamformed
+ */
+enum mac80211_rx_vht_flags {
+       RX_VHT_FLAG_80MHZ               = BIT(0),
+       RX_VHT_FLAG_80P80MHZ            = BIT(1),
+       RX_VHT_FLAG_160MHZ              = BIT(2),
+       RX_VHT_FLAG_BF                  = BIT(3),
+};
+
 /**
  * struct ieee80211_rx_status - receive status
  *
@@ -902,26 +911,19 @@ enum mac80211_rx_flags {
  *     HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
  * @vht_nss: number of streams (VHT only)
  * @flag: %RX_FLAG_*
+ * @vht_flag: %RX_VHT_FLAG_*
  * @rx_flags: internal RX flags for mac80211
  * @ampdu_reference: A-MPDU reference number, must be a different value for
  *     each A-MPDU but the same for each subframe within one A-MPDU
  * @ampdu_delimiter_crc: A-MPDU delimiter CRC
- * @vendor_radiotap_bitmap: radiotap vendor namespace presence bitmap
- * @vendor_radiotap_len: radiotap vendor namespace length
- * @vendor_radiotap_align: radiotap vendor namespace alignment. Note
- *     that the actual data must be at the start of the SKB data
- *     already.
- * @vendor_radiotap_oui: radiotap vendor namespace OUI
- * @vendor_radiotap_subns: radiotap vendor sub namespace
  */
 struct ieee80211_rx_status {
        u64 mactime;
        u32 device_timestamp;
        u32 ampdu_reference;
        u32 flag;
-       u32 vendor_radiotap_bitmap;
-       u16 vendor_radiotap_len;
        u16 freq;
+       u8 vht_flag;
        u8 rate_idx;
        u8 vht_nss;
        u8 rx_flags;
@@ -931,9 +933,6 @@ struct ieee80211_rx_status {
        u8 chains;
        s8 chain_signal[IEEE80211_MAX_CHAINS];
        u8 ampdu_delimiter_crc;
-       u8 vendor_radiotap_align;
-       u8 vendor_radiotap_oui[3];
-       u8 vendor_radiotap_subns;
 };
 
 /**
@@ -1506,8 +1505,6 @@ struct ieee80211_tx_control {
  * @IEEE80211_HW_CONNECTION_MONITOR:
  *     The hardware performs its own connection monitoring, including
  *     periodic keep-alives to the AP and probing the AP on beacon loss.
- *     When this flag is set, signaling beacon-loss will cause an immediate
- *     change to disassociated state.
  *
  * @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
  *     This device needs to get data from beacon before association (i.e.
@@ -1643,10 +1640,6 @@ enum ieee80211_hw_flags {
  *     the hw can report back.
  * @max_rate_tries: maximum number of tries for each stage
  *
- * @napi_weight: weight used for NAPI polling.  You must specify an
- *     appropriate value here if a napi_poll operation is provided
- *     by your driver.
- *
  * @max_rx_aggregation_subframes: maximum buffer size (number of
  *     sub-frames) to be used for A-MPDU block ack receiver
  *     aggregation.
@@ -1700,7 +1693,6 @@ struct ieee80211_hw {
        int vif_data_size;
        int sta_data_size;
        int chanctx_data_size;
-       int napi_weight;
        u16 queues;
        u16 max_listen_interval;
        s8 max_signal;
@@ -2470,6 +2462,7 @@ enum ieee80211_roc_type {
  *     This process will continue until sched_scan_stop is called.
  *
  * @sched_scan_stop: Tell the hardware to stop an ongoing scheduled scan.
+ *     In this case, ieee80211_sched_scan_stopped() must not be called.
  *
  * @sw_scan_start: Notifier function that is called just before a software scan
  *     is started. Can be NULL, if the driver doesn't need this notification.
@@ -2623,8 +2616,6 @@ enum ieee80211_roc_type {
  *     callback. They must then call ieee80211_chswitch_done() to indicate
  *     completion of the channel switch.
  *
- * @napi_poll: Poll Rx queue for incoming data frames.
- *
  * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
  *     Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
  *     reject TX/RX mask combinations they cannot support by returning -EINVAL
@@ -2750,11 +2741,13 @@ enum ieee80211_roc_type {
  * @channel_switch_beacon: Starts a channel switch to a new channel.
  *     Beacons are modified to include CSA or ECSA IEs before calling this
  *     function. The corresponding count fields in these IEs must be
- *     decremented, and when they reach zero the driver must call
+ *     decremented, and when they reach 1 the driver must call
  *     ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
  *     get the csa counter decremented by mac80211, but must check if it is
- *     zero using ieee80211_csa_is_complete() after the beacon has been
+ *     1 using ieee80211_csa_is_complete() after the beacon has been
  *     transmitted and then call ieee80211_csa_finish().
+ *     If the CSA count starts as zero or 1, this function will not be called,
+ *     since there won't be any time to beacon before the switch anyway.
  *
  * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
  *     information in bss_conf is set up and the beacon can be retrieved. A
@@ -2817,7 +2810,7 @@ struct ieee80211_ops {
                                struct ieee80211_vif *vif,
                                struct cfg80211_sched_scan_request *req,
                                struct ieee80211_sched_scan_ies *ies);
-       void (*sched_scan_stop)(struct ieee80211_hw *hw,
+       int (*sched_scan_stop)(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif);
        void (*sw_scan_start)(struct ieee80211_hw *hw);
        void (*sw_scan_complete)(struct ieee80211_hw *hw);
@@ -2881,7 +2874,6 @@ struct ieee80211_ops {
        void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
        void (*channel_switch)(struct ieee80211_hw *hw,
                               struct ieee80211_channel_switch *ch_switch);
-       int (*napi_poll)(struct ieee80211_hw *hw, int budget);
        int (*set_antenna)(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
        int (*get_antenna)(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant);
 
@@ -3163,21 +3155,21 @@ void ieee80211_free_hw(struct ieee80211_hw *hw);
  */
 void ieee80211_restart_hw(struct ieee80211_hw *hw);
 
-/** ieee80211_napi_schedule - schedule NAPI poll
- *
- * Use this function to schedule NAPI polling on a device.
- *
- * @hw: the hardware to start polling
- */
-void ieee80211_napi_schedule(struct ieee80211_hw *hw);
-
-/** ieee80211_napi_complete - complete NAPI polling
- *
- * Use this function to finish NAPI polling on a device.
+/**
+ * ieee80211_napi_add - initialize mac80211 NAPI context
+ * @hw: the hardware to initialize the NAPI context on
+ * @napi: the NAPI context to initialize
+ * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
+ *     driver doesn't use NAPI
+ * @poll: poll function
+ * @weight: default weight
  *
- * @hw: the hardware to stop polling
+ * See also netif_napi_add().
  */
-void ieee80211_napi_complete(struct ieee80211_hw *hw);
+void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
+                       struct net_device *napi_dev,
+                       int (*poll)(struct napi_struct *, int),
+                       int weight);
 
 /**
  * ieee80211_rx - receive frame
@@ -3452,13 +3444,13 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
  *
  * After a channel switch announcement was scheduled and the counter in this
- * announcement hit zero, this function must be called by the driver to
+ * announcement hits 1, this function must be called by the driver to
  * notify mac80211 that the channel can be changed.
  */
 void ieee80211_csa_finish(struct ieee80211_vif *vif);
 
 /**
- * ieee80211_csa_is_complete - find out if counters reached zero
+ * ieee80211_csa_is_complete - find out if counters reached 1
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
  *
  * This function returns whether the channel switch counters reached zero.
@@ -4451,7 +4443,6 @@ struct ieee80211_tx_rate_control {
 };
 
 struct rate_control_ops {
-       struct module *module;
        const char *name;
        void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
        void (*free)(void *priv);
@@ -4553,8 +4544,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
                           struct ieee80211_sta *pubsta,
                           struct ieee80211_sta_rates *rates);
 
-int ieee80211_rate_control_register(struct rate_control_ops *ops);
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
+int ieee80211_rate_control_register(const struct rate_control_ops *ops);
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops);
 
 static inline bool
 conf_is_ht20(struct ieee80211_conf *conf)
index 807d6b7a943fecab78db1afd6b57ee33b70ded72..a591053cae6305c399eabb745c6f1764c41bc8be 100644 (file)
@@ -20,6 +20,7 @@
 #define NET_MAC802154_H
 
 #include <net/af_ieee802154.h>
+#include <linux/skbuff.h>
 
 /* General MAC frame format:
  *  2 bytes: Frame Control
@@ -50,7 +51,7 @@ struct ieee802154_hw_addr_filt {
                                 * devices across independent networks.
                                 */
        __le16  short_addr;
-       u8      ieee_addr[IEEE802154_ADDR_LEN];
+       __le64  ieee_addr;
        u8      pan_coord;
 };
 
@@ -113,6 +114,32 @@ struct ieee802154_dev {
  *       Set radio for listening on specific address.
  *       Set the device for listening on specified address.
  *       Returns either zero, or negative errno.
+ *
+ * set_txpower:
+ *       Set radio transmit power in dB. Called with pib_lock held.
+ *       Returns either zero, or negative errno.
+ *
+ * set_lbt
+ *       Enables or disables listen before talk on the device. Called with
+ *       pib_lock held.
+ *       Returns either zero, or negative errno.
+ *
+ * set_cca_mode
+ *       Sets the CCA mode used by the device. Called with pib_lock held.
+ *       Returns either zero, or negative errno.
+ *
+ * set_cca_ed_level
+ *       Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ *       held.
+ *       Returns either zero, or negative errno.
+ *
+ * set_csma_params
+ *       Sets the CSMA parameter set for the PHY. Called with pib_lock held.
+ *       Returns either zero, or negative errno.
+ *
+ * set_frame_retries
+ *       Sets the retransmission attempt limit. Called with pib_lock held.
+ *       Returns either zero, or negative errno.
  */
 struct ieee802154_ops {
        struct module   *owner;
@@ -127,8 +154,16 @@ struct ieee802154_ops {
        int             (*set_hw_addr_filt)(struct ieee802154_dev *dev,
                                          struct ieee802154_hw_addr_filt *filt,
                                            unsigned long changed);
-       int             (*ieee_addr)(struct ieee802154_dev *dev,
-                                    u8 addr[IEEE802154_ADDR_LEN]);
+       int             (*ieee_addr)(struct ieee802154_dev *dev, __le64 addr);
+       int             (*set_txpower)(struct ieee802154_dev *dev, int db);
+       int             (*set_lbt)(struct ieee802154_dev *dev, bool on);
+       int             (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode);
+       int             (*set_cca_ed_level)(struct ieee802154_dev *dev,
+                                           s32 level);
+       int             (*set_csma_params)(struct ieee802154_dev *dev,
+                                          u8 min_be, u8 max_be, u8 retries);
+       int             (*set_frame_retries)(struct ieee802154_dev *dev,
+                                            s8 retries);
 };
 
 /* Basic interface to register ieee802154 device */
index 991dcd94cbbf33bc5617fcb3a0c280113c4a042a..79387f73f87585101ff44f548c5db299ae0b574c 100644 (file)
@@ -15,6 +15,7 @@
 #include <net/netns/packet.h>
 #include <net/netns/ipv4.h>
 #include <net/netns/ipv6.h>
+#include <net/netns/ieee802154_6lowpan.h>
 #include <net/netns/sctp.h>
 #include <net/netns/dccp.h>
 #include <net/netns/netfilter.h>
@@ -90,6 +91,9 @@ struct net {
 #if IS_ENABLED(CONFIG_IPV6)
        struct netns_ipv6       ipv6;
 #endif
+#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+       struct netns_ieee802154_lowpan  ieee802154_lowpan;
+#endif
 #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
        struct netns_sctp       sctp;
 #endif
index b2ac6246b7e0abe156b26a06bf9a4463331baa6e..37252f71a38037d0e969699dc7376e6dcae1c5ec 100644 (file)
@@ -73,10 +73,17 @@ struct nf_conn_help {
 
 struct nf_conn {
        /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
-           plus 1 for any connection(s) we are `master' for */
+        * plus 1 for any connection(s) we are `master' for
+        *
+        * Hint, SKB address this struct and refcnt via skb->nfct and
+        * helpers nf_conntrack_get() and nf_conntrack_put().
+        * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
+        * beware nf_ct_get() is different and don't inc refcnt.
+        */
        struct nf_conntrack ct_general;
 
-       spinlock_t lock;
+       spinlock_t      lock;
+       u16             cpu;
 
        /* XXX should I move this to the tail ? - Y.K */
        /* These are my tuples; original and reply */
index 15308b8eb5b5218330c3043c46a02537d00779a2..cc0c1882760270f21bd1cc7080cece06afd518d8 100644 (file)
@@ -77,6 +77,13 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
             const struct nf_conntrack_l3proto *l3proto,
             const struct nf_conntrack_l4proto *proto);
 
-extern spinlock_t nf_conntrack_lock ;
+#ifdef CONFIG_LOCKDEP
+# define CONNTRACK_LOCKS 8
+#else
+# define CONNTRACK_LOCKS 1024
+#endif
+extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+
+extern spinlock_t nf_conntrack_expect_lock;
 
 #endif /* _NF_CONNTRACK_CORE_H */
index c985695283b3a34d5d78aa99b062ab4dfb839c18..dec6336bf850f77928f54f4cd04d37b3ebe3e16a 100644 (file)
@@ -7,6 +7,8 @@
 
 #include <uapi/linux/netfilter/xt_connlabel.h>
 
+#define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
+
 struct nf_conn_labels {
        u8 words;
        unsigned long bits[];
@@ -29,7 +31,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct)
        u8 words;
 
        words = ACCESS_ONCE(net->ct.label_words);
-       if (words == 0 || WARN_ON_ONCE(words > 8))
+       if (words == 0)
                return NULL;
 
        cl_ext = nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS,
index e7e14ffe0f6a0e0f545af45864aa248980250da2..e6bc14d8fa9a9a4b324fac9df5a47e64277f2aa8 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/list.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_tables.h>
 #include <net/netlink.h>
@@ -288,7 +289,8 @@ struct nft_expr_ops {
        int                             (*init)(const struct nft_ctx *ctx,
                                                const struct nft_expr *expr,
                                                const struct nlattr * const tb[]);
-       void                            (*destroy)(const struct nft_expr *expr);
+       void                            (*destroy)(const struct nft_ctx *ctx,
+                                                  const struct nft_expr *expr);
        int                             (*dump)(struct sk_buff *skb,
                                                const struct nft_expr *expr);
        int                             (*validate)(const struct nft_ctx *ctx,
@@ -325,13 +327,15 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
  *     @handle: rule handle
  *     @genmask: generation mask
  *     @dlen: length of expression data
+ *     @ulen: length of user data (used for comments)
  *     @data: expression data
  */
 struct nft_rule {
        struct list_head                list;
-       u64                             handle:46,
+       u64                             handle:42,
                                        genmask:2,
-                                       dlen:16;
+                                       dlen:12,
+                                       ulen:8;
        unsigned char                   data[]
                __attribute__((aligned(__alignof__(struct nft_expr))));
 };
@@ -340,19 +344,13 @@ struct nft_rule {
  *     struct nft_rule_trans - nf_tables rule update in transaction
  *
  *     @list: used internally
+ *     @ctx: rule context
  *     @rule: rule that needs to be updated
- *     @chain: chain that this rule belongs to
- *     @table: table for which this chain applies
- *     @nlh: netlink header of the message that contain this update
- *     @family: family expressesed as AF_*
  */
 struct nft_rule_trans {
        struct list_head                list;
+       struct nft_ctx                  ctx;
        struct nft_rule                 *rule;
-       const struct nft_chain          *chain;
-       const struct nft_table          *table;
-       const struct nlmsghdr           *nlh;
-       u8                              family;
 };
 
 static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
@@ -370,6 +368,11 @@ static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
        return (struct nft_expr *)&rule->data[rule->dlen];
 }
 
+static inline void *nft_userdata(const struct nft_rule *rule)
+{
+       return (void *)&rule->data[rule->dlen];
+}
+
 /*
  * The last pointer isn't really necessary, but the compiler isn't able to
  * determine that the result of nft_expr_last() is always the same since it
@@ -521,6 +524,9 @@ void nft_unregister_chain_type(const struct nf_chain_type *);
 int nft_register_expr(struct nft_expr_type *);
 void nft_unregister_expr(struct nft_expr_type *);
 
+#define nft_dereference(p)                                     \
+       nfnl_dereference(p, NFNL_SUBSYS_NFTABLES)
+
 #define MODULE_ALIAS_NFT_FAMILY(family)        \
        MODULE_ALIAS("nft-afinfo-" __stringify(family))
 
index fbcc7fa536dc4ab49440d4566e63fedf8b860d2a..773cce308bc61ce312c4a5ecab0464ba84e1ce41 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/list_nulls.h>
 #include <linux/atomic.h>
 #include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/seqlock.h>
 
 struct ctl_table_header;
 struct nf_conntrack_ecache;
@@ -62,6 +63,13 @@ struct nf_ip_net {
 #endif
 };
 
+struct ct_pcpu {
+       spinlock_t              lock;
+       struct hlist_nulls_head unconfirmed;
+       struct hlist_nulls_head dying;
+       struct hlist_nulls_head tmpl;
+};
+
 struct netns_ct {
        atomic_t                count;
        unsigned int            expect_count;
@@ -83,12 +91,11 @@ struct netns_ct {
        int                     sysctl_checksum;
 
        unsigned int            htable_size;
+       seqcount_t              generation;
        struct kmem_cache       *nf_conntrack_cachep;
        struct hlist_nulls_head *hash;
        struct hlist_head       *expect_hash;
-       struct hlist_nulls_head unconfirmed;
-       struct hlist_nulls_head dying;
-       struct hlist_nulls_head tmpl;
+       struct ct_pcpu __percpu *pcpu_lists;
        struct ip_conntrack_stat __percpu *stat;
        struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
        struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
new file mode 100644 (file)
index 0000000..079030c
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * ieee802154 6lowpan in net namespaces
+ */
+
+#include <net/inet_frag.h>
+
+#ifndef __NETNS_IEEE802154_6LOWPAN_H__
+#define __NETNS_IEEE802154_6LOWPAN_H__
+
+struct netns_sysctl_lowpan {
+#ifdef CONFIG_SYSCTL
+       struct ctl_table_header *frags_hdr;
+#endif
+};
+
+struct netns_ieee802154_lowpan {
+       struct netns_sysctl_lowpan sysctl;
+       struct netns_frags      frags;
+       u16                     max_dsize;
+};
+
+#endif
index 1006a265beb393a0ab917eb8315f0fb258c39e0a..3492434baf88e9dffb3da763de75db36480d9eb9 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/workqueue.h>
 #include <linux/xfrm.h>
 #include <net/dst_ops.h>
+#include <net/flowcache.h>
 
 struct ctl_table_header;
 
@@ -58,9 +59,17 @@ struct netns_xfrm {
        struct dst_ops          xfrm6_dst_ops;
 #endif
        spinlock_t xfrm_state_lock;
-       spinlock_t xfrm_policy_sk_bundle_lock;
        rwlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
+
+       /* flow cache part */
+       struct flow_cache       flow_cache_global;
+       atomic_t                flow_cache_genid;
+       struct list_head        flow_cache_gc_list;
+       spinlock_t              flow_cache_gc_lock;
+       struct work_struct      flow_cache_gc_work;
+       struct work_struct      flow_cache_flush_work;
+       struct mutex            flow_flush_sem;
 };
 
 #endif
index 81af21e9bcd49b141f799fca231bbf7b1deb08ec..7655cfe27c3465f726dc0d1eed26040e61b6b366 100644 (file)
@@ -35,6 +35,7 @@ enum {
        NFC_DIGITAL_RF_TECH_106A = 0,
        NFC_DIGITAL_RF_TECH_212F,
        NFC_DIGITAL_RF_TECH_424F,
+       NFC_DIGITAL_RF_TECH_ISO15693,
 
        NFC_DIGITAL_RF_TECH_LAST,
 };
@@ -50,6 +51,7 @@ enum {
 
        NFC_DIGITAL_FRAMING_NFCA_T1T,
        NFC_DIGITAL_FRAMING_NFCA_T2T,
+       NFC_DIGITAL_FRAMING_NFCA_T4T,
        NFC_DIGITAL_FRAMING_NFCA_NFC_DEP,
 
        NFC_DIGITAL_FRAMING_NFCF,
@@ -57,6 +59,9 @@ enum {
        NFC_DIGITAL_FRAMING_NFCF_NFC_DEP,
        NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED,
 
+       NFC_DIGITAL_FRAMING_ISO15693_INVENTORY,
+       NFC_DIGITAL_FRAMING_ISO15693_T5T,
+
        NFC_DIGITAL_FRAMING_LAST,
 };
 
@@ -204,6 +209,8 @@ struct nfc_digital_dev {
        u8 curr_rf_tech;
        u8 curr_nfc_dep_pni;
 
+       u16 target_fsc;
+
        int (*skb_check_crc)(struct sk_buff *skb);
        void (*skb_add_crc)(struct sk_buff *skb);
 };
index e80894bca1d042201c01c854df897ccc2607b72e..2e8b40c16274f73d1ef98d6ee17fa17da172354e 100644 (file)
@@ -111,6 +111,9 @@ struct nfc_target {
        u8 sensf_res[NFC_SENSF_RES_MAXSIZE];
        u8 hci_reader_gate;
        u8 logical_idx;
+       u8 is_iso15693;
+       u8 iso15693_dsfid;
+       u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE];
 };
 
 /**
index 99d2ba1c7e03f76d7a92e95691fb002535de52eb..b23548e0409848530b1c7f94fb805601c46e583c 100644 (file)
@@ -52,7 +52,7 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
  * Note: This is in section 7.3.2 of the IEEE 802.15.4 document.
  */
 int ieee802154_nl_assoc_confirm(struct net_device *dev,
-               u16 short_addr, u8 status);
+               __le16 short_addr, u8 status);
 
 /**
  * ieee802154_nl_disassoc_indic - Notify userland of disassociation.
@@ -111,8 +111,8 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
  * Note: This API cannot indicate a beacon frame for a coordinator
  *       operating in long addressing mode.
  */
-int ieee802154_nl_beacon_indic(struct net_device *dev, u16 panid,
-               u16 coord_addr);
+int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid,
+               __le16 coord_addr);
 
 /**
  * ieee802154_nl_start_confirm - Notify userland of completion of start.
index b07cdc9fa454a57fe43d1eea0235522c26d0c1a6..75fc1f5a948d685fcfff12e04cc6b85e194cd541 100644 (file)
@@ -155,6 +155,7 @@ struct ieee80211_reg_rule {
        struct ieee80211_freq_range freq_range;
        struct ieee80211_power_rule power_rule;
        u32 flags;
+       u32 dfs_cac_ms;
 };
 
 struct ieee80211_regdomain {
@@ -172,14 +173,18 @@ struct ieee80211_regdomain {
 #define DBM_TO_MBM(gain) ((gain) * 100)
 #define MBM_TO_DBM(gain) ((gain) / 100)
 
-#define REG_RULE(start, end, bw, gain, eirp, reg_flags) \
-{                                                      \
-       .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \
-       .freq_range.end_freq_khz = MHZ_TO_KHZ(end),     \
-       .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \
-       .power_rule.max_antenna_gain = DBI_TO_MBI(gain),\
-       .power_rule.max_eirp = DBM_TO_MBM(eirp),        \
-       .flags = reg_flags,                             \
+#define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags)   \
+{                                                                      \
+       .freq_range.start_freq_khz = MHZ_TO_KHZ(start),                 \
+       .freq_range.end_freq_khz = MHZ_TO_KHZ(end),                     \
+       .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw),                 \
+       .power_rule.max_antenna_gain = DBI_TO_MBI(gain),                \
+       .power_rule.max_eirp = DBM_TO_MBM(eirp),                        \
+       .flags = reg_flags,                                             \
+       .dfs_cac_ms = dfs_cac,                                          \
 }
 
+#define REG_RULE(start, end, bw, gain, eirp, reg_flags) \
+       REG_RULE_EXT(start, end, bw, gain, eirp, 0, reg_flags)
+
 #endif
index 9d1f423d5944bc3908bf68e64aae7f259ff7fdb0..b17cf28f996e6ab3cee2b73b6747eafde6f02380 100644 (file)
@@ -191,7 +191,6 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
-int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);
 
 struct in_ifaddr;
 void fib_add_ifaddr(struct in_ifaddr *);
index 661e45d3805173b119c3fdbfe477922528172562..72240e5ac2c4b2223e9e1706c81974f11d7eafc6 100644 (file)
@@ -140,7 +140,7 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
                                    struct nlattr *tb[]);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
-extern const struct nla_policy ifla_policy[IFLA_MAX+1];
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
 
 #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
 
index b9586a137cadd38e8a36abcd34018cfe9b680047..06a5668f05c984526c4ce2455aff825e2aaa9836 100644 (file)
@@ -862,9 +862,9 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
                                        const struct sk_buff *skb)
 {
 #ifdef CONFIG_RPS
-       if (unlikely(sk->sk_rxhash != skb->rxhash)) {
+       if (unlikely(sk->sk_rxhash != skb->hash)) {
                sock_rps_reset_flow(sk);
-               sk->sk_rxhash = skb->rxhash;
+               sk->sk_rxhash = skb->hash;
        }
 #endif
 }
@@ -1621,33 +1621,6 @@ void sk_common_release(struct sock *sk);
 /* Initialise core socket variables */
 void sock_init_data(struct socket *sock, struct sock *sk);
 
-void sk_filter_release_rcu(struct rcu_head *rcu);
-
-/**
- *     sk_filter_release - release a socket filter
- *     @fp: filter to remove
- *
- *     Remove a filter from a socket and release its resources.
- */
-
-static inline void sk_filter_release(struct sk_filter *fp)
-{
-       if (atomic_dec_and_test(&fp->refcnt))
-               call_rcu(&fp->rcu, sk_filter_release_rcu);
-}
-
-static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
-{
-       atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
-       sk_filter_release(fp);
-}
-
-static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
-{
-       atomic_inc(&fp->refcnt);
-       atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
-}
-
 /*
  * Socket reference counting postulates.
  *
@@ -2256,8 +2229,12 @@ void sock_net_set(struct sock *sk, struct net *net)
  */
 static inline void sk_change_net(struct sock *sk, struct net *net)
 {
-       put_net(sock_net(sk));
-       sock_net_set(sk, hold_net(net));
+       struct net *current_net = sock_net(sk);
+
+       if (!net_eq(current_net, net)) {
+               put_net(current_net);
+               sock_net_set(sk, hold_net(net));
+       }
 }
 
 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
index 9e8710be7a04f0d43cd8358bd07208e5a6adb14a..fa8f5fac65e959bff4458cd3e7cce6005bd08533 100644 (file)
@@ -9,7 +9,7 @@ struct tcf_csum {
 
        u32 update_flags;
 };
-#define to_tcf_csum(pc) \
-       container_of(pc,struct tcf_csum,common)
+#define to_tcf_csum(a) \
+       container_of(a->priv,struct tcf_csum,common)
 
 #endif /* __NET_TC_CSUM_H */
index 65f024b809589bc638de15053257d31da5b206cc..9763dcbb9bc3e594c61b62504917e3a3185d1caa 100644 (file)
@@ -8,7 +8,7 @@ struct tcf_defact {
        u32                     tcfd_datalen;
        void                    *tcfd_defdata;
 };
-#define to_defact(pc) \
-       container_of(pc, struct tcf_defact, common)
+#define to_defact(a) \
+       container_of(a->priv, struct tcf_defact, common)
 
 #endif /* __NET_TC_DEF_H */
index 9e3f6767b80e5de5213a8a9b74ae6f4446021046..9fc9b578908ab868dcc0ef5986358ccb603bf5f9 100644 (file)
@@ -11,7 +11,7 @@ struct tcf_gact {
         int                    tcfg_paction;
 #endif
 };
-#define to_gact(pc) \
-       container_of(pc, struct tcf_gact, common)
+#define to_gact(a) \
+       container_of(a->priv, struct tcf_gact, common)
 
 #endif /* __NET_TC_GACT_H */
index f7d25dfcc4b78c011acbb6044b38e4af8f8438d9..c0f4193f432c20d67234c6198ce7c6cb0a4933ac 100644 (file)
@@ -11,7 +11,7 @@ struct tcf_ipt {
        char                    *tcfi_tname;
        struct xt_entry_target  *tcfi_t;
 };
-#define to_ipt(pc) \
-       container_of(pc, struct tcf_ipt, common)
+#define to_ipt(a) \
+       container_of(a->priv, struct tcf_ipt, common)
 
 #endif /* __NET_TC_IPT_H */
index cfe2943690ff298cd3e938ecef2ddb8d49dfc229..4dd77a1c106b246b0abc9d8af3d6dc67fa748b5c 100644 (file)
@@ -11,7 +11,7 @@ struct tcf_mirred {
        struct net_device       *tcfm_dev;
        struct list_head        tcfm_list;
 };
-#define to_mirred(pc) \
-       container_of(pc, struct tcf_mirred, common)
+#define to_mirred(a) \
+       container_of(a->priv, struct tcf_mirred, common)
 
 #endif /* __NET_TC_MIR_H */
index 4a691f34d7035c2eea28a941cf701c82832d7050..63d8e9ca9d99e0bef223973a48d24229689de27e 100644 (file)
@@ -13,9 +13,9 @@ struct tcf_nat {
        u32 flags;
 };
 
-static inline struct tcf_nat *to_tcf_nat(struct tcf_common *pc)
+static inline struct tcf_nat *to_tcf_nat(struct tc_action *a)
 {
-       return container_of(pc, struct tcf_nat, common);
+       return container_of(a->priv, struct tcf_nat, common);
 }
 
 #endif /* __NET_TC_NAT_H */
index e6f6e15956f5fb9dfb5fcbd446fc3d60ed7f4ec1..5b80998879c7cf6c0bf7df280556de05b5831260 100644 (file)
@@ -9,7 +9,7 @@ struct tcf_pedit {
        unsigned char           tcfp_flags;
        struct tc_pedit_key     *tcfp_keys;
 };
-#define to_pedit(pc) \
-       container_of(pc, struct tcf_pedit, common)
+#define to_pedit(a) \
+       container_of(a->priv, struct tcf_pedit, common)
 
 #endif /* __NET_TC_PED_H */
index dd5d86fab030c8fb8d16d6acad7eefebcab375b9..0df9a0db4a8e4ac300fcbafdd505f05ffd320782 100644 (file)
@@ -29,7 +29,7 @@ struct tcf_skbedit {
        u16                     queue_mapping;
        /* XXX: 16-bit pad here? */
 };
-#define to_skbedit(pc) \
-       container_of(pc, struct tcf_skbedit, common)
+#define to_skbedit(a) \
+       container_of(a->priv, struct tcf_skbedit, common)
 
 #endif /* __NET_TC_SKBEDIT_H */
index 743accec6c76e056547102a0b429418b8450af65..87d87740818867b8b50074c8a5634658be46199c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/crypto.h>
 #include <linux/cryptohash.h>
 #include <linux/kref.h>
+#include <linux/ktime.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
@@ -478,7 +479,6 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
                             struct ip_options *opt);
 #ifdef CONFIG_SYN_COOKIES
-#include <linux/ktime.h>
 
 /* Syncookies use a monotonic timer which increments every 60 seconds.
  * This counter is used both as a hash input and partially encoded into
@@ -620,7 +620,7 @@ static inline void tcp_bound_rto(const struct sock *sk)
 
 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
 {
-       return (tp->srtt >> 3) + tp->rttvar;
+       return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
 }
 
 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
@@ -657,6 +657,11 @@ static inline u32 tcp_rto_min(struct sock *sk)
        return rto_min;
 }
 
+static inline u32 tcp_rto_min_us(struct sock *sk)
+{
+       return jiffies_to_usecs(tcp_rto_min(sk));
+}
+
 /* Compute the actual receive window we are currently advertising.
  * Rcv_nxt can be after the window if our peer push more data
  * than the offered window.
@@ -779,7 +784,6 @@ enum tcp_ca_event {
 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
 
 #define TCP_CONG_NON_RESTRICTED 0x1
-#define TCP_CONG_RTT_STAMP     0x2
 
 struct tcp_congestion_ops {
        struct list_head        list;
@@ -792,8 +796,6 @@ struct tcp_congestion_ops {
 
        /* return slow start threshold (required) */
        u32 (*ssthresh)(struct sock *sk);
-       /* lower bound for congestion window (optional) */
-       u32 (*min_cwnd)(const struct sock *sk);
        /* do new cwnd calculation (required) */
        void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
        /* call before changing ca_state (optional) */
@@ -828,7 +830,6 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
 u32 tcp_reno_ssthresh(struct sock *sk);
 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
-u32 tcp_reno_min_cwnd(const struct sock *sk);
 extern struct tcp_congestion_ops tcp_reno;
 
 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
index b52bda8d13b183a953b88cb90700a1ee8cf08e23..10ab0fc6d4f79a8a608d0ce642aae294636dcecc 100644 (file)
@@ -37,15 +37,22 @@ struct wpan_phy {
        struct mutex pib_lock;
 
        /*
-        * This is a PIB according to 802.15.4-2006.
+        * This is a PIB according to 802.15.4-2011.
         * We do not provide timing-related variables, as they
         * aren't used outside of driver
         */
        u8 current_channel;
        u8 current_page;
        u32 channels_supported[32];
-       u8 transmit_power;
+       s8 transmit_power;
        u8 cca_mode;
+       u8 min_be;
+       u8 max_be;
+       u8 csma_retries;
+       s8 frame_retries;
+
+       bool lbt;
+       s32 cca_ed_level;
 
        struct device dev;
        int idx;
@@ -54,6 +61,14 @@ struct wpan_phy {
                                        const char *name, int type);
        void (*del_iface)(struct wpan_phy *phy, struct net_device *dev);
 
+       int (*set_txpower)(struct wpan_phy *phy, int db);
+       int (*set_lbt)(struct wpan_phy *phy, bool on);
+       int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode);
+       int (*set_cca_ed_level)(struct wpan_phy *phy, int level);
+       int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be,
+                              u8 retries);
+       int (*set_frame_retries)(struct wpan_phy *phy, s8 retries);
+
        char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
 };
 
index fb5654a8ca3cee1c65923c105f3760d607eaa1de..32682ae47b3fe6d88e7750120089efc791b38894 100644 (file)
 struct xfrm_state_walk {
        struct list_head        all;
        u8                      state;
-       union {
-               u8              dying;
-               u8              proto;
-       };
+       u8                      dying;
+       u8                      proto;
        u32                     seq;
+       struct xfrm_address_filter *filter;
 };
 
 /* Full description of state of transformer. */
@@ -350,6 +349,16 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
 void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
 
+struct xfrm_input_afinfo {
+       unsigned int            family;
+       struct module           *owner;
+       int                     (*callback)(struct sk_buff *skb, u8 protocol,
+                                           int err);
+};
+
+int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo);
+int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo);
+
 void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
 struct xfrm_type {
@@ -594,21 +603,33 @@ struct xfrm_mgr {
                                           const struct xfrm_migrate *m,
                                           int num_bundles,
                                           const struct xfrm_kmaddress *k);
+       bool                    (*is_alive)(const struct km_event *c);
 };
 
 int xfrm_register_km(struct xfrm_mgr *km);
 int xfrm_unregister_km(struct xfrm_mgr *km);
 
+struct xfrm_tunnel_skb_cb {
+       union {
+               struct inet_skb_parm h4;
+               struct inet6_skb_parm h6;
+       } header;
+
+       union {
+               struct ip_tunnel *ip4;
+               struct ip6_tnl *ip6;
+       } tunnel;
+};
+
+#define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
+
 /*
  * This structure is used for the duration where packets are being
  * transformed by IPsec.  As soon as the packet leaves IPsec the
  * area beyond the generic IP part may be overwritten.
  */
 struct xfrm_skb_cb {
-       union {
-               struct inet_skb_parm h4;
-               struct inet6_skb_parm h6;
-        } header;
+       struct xfrm_tunnel_skb_cb header;
 
         /* Sequence number for replay protection. */
        union {
@@ -630,10 +651,7 @@ struct xfrm_skb_cb {
  * to transmit header information to the mode input/output functions.
  */
 struct xfrm_mode_skb_cb {
-       union {
-               struct inet_skb_parm h4;
-               struct inet6_skb_parm h6;
-       } header;
+       struct xfrm_tunnel_skb_cb header;
 
        /* Copied from header for IPv4, always set to zero and DF for IPv6. */
        __be16 id;
@@ -665,10 +683,7 @@ struct xfrm_mode_skb_cb {
  * related information.
  */
 struct xfrm_spi_skb_cb {
-       union {
-               struct inet_skb_parm h4;
-               struct inet6_skb_parm h6;
-       } header;
+       struct xfrm_tunnel_skb_cb header;
 
        unsigned int daddroff;
        unsigned int family;
@@ -1347,18 +1362,34 @@ struct xfrm_algo_desc {
        struct sadb_alg desc;
 };
 
-/* XFRM tunnel handlers.  */
-struct xfrm_tunnel {
+/* XFRM protocol handlers.  */
+struct xfrm4_protocol {
        int (*handler)(struct sk_buff *skb);
+       int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
+                            int encap_type);
+       int (*cb_handler)(struct sk_buff *skb, int err);
        int (*err_handler)(struct sk_buff *skb, u32 info);
 
-       struct xfrm_tunnel __rcu *next;
+       struct xfrm4_protocol __rcu *next;
+       int priority;
+};
+
+struct xfrm6_protocol {
+       int (*handler)(struct sk_buff *skb);
+       int (*cb_handler)(struct sk_buff *skb, int err);
+       int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                          u8 type, u8 code, int offset, __be32 info);
+
+       struct xfrm6_protocol __rcu *next;
        int priority;
 };
 
-struct xfrm_tunnel_notifier {
+/* XFRM tunnel handlers.  */
+struct xfrm_tunnel {
        int (*handler)(struct sk_buff *skb);
-       struct xfrm_tunnel_notifier __rcu *next;
+       int (*err_handler)(struct sk_buff *skb, u32 info);
+
+       struct xfrm_tunnel __rcu *next;
        int priority;
 };
 
@@ -1375,11 +1406,14 @@ void xfrm4_init(void);
 int xfrm_state_init(struct net *net);
 void xfrm_state_fini(struct net *net);
 void xfrm4_state_init(void);
+void xfrm4_protocol_init(void);
 #ifdef CONFIG_XFRM
 int xfrm6_init(void);
 void xfrm6_fini(void);
 int xfrm6_state_init(void);
 void xfrm6_state_fini(void);
+int xfrm6_protocol_init(void);
+void xfrm6_protocol_fini(void);
 #else
 static inline int xfrm6_init(void)
 {
@@ -1405,7 +1439,8 @@ static inline void xfrm_sysctl_fini(struct net *net)
 }
 #endif
 
-void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
+                         struct xfrm_address_filter *filter);
 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
                    int (*func)(struct xfrm_state *, int, void*), void *);
 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
@@ -1497,20 +1532,22 @@ int xfrm4_rcv(struct sk_buff *skb);
 
 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 {
-       return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+       XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+       XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+       return xfrm_input(skb, nexthdr, spi, 0);
 }
 
 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm4_output(struct sk_buff *skb);
 int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
+int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
+int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
-int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
-int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
 int xfrm6_extract_header(struct sk_buff *skb);
 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1519,6 +1556,9 @@ int xfrm6_rcv(struct sk_buff *skb);
 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
                     xfrm_address_t *saddr, u8 proto);
 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
+int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
+int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
@@ -1646,6 +1686,20 @@ static inline int xfrm_aevent_is_on(struct net *net)
        rcu_read_unlock();
        return ret;
 }
+
+static inline int xfrm_acquire_is_on(struct net *net)
+{
+       struct sock *nlsk;
+       int ret = 0;
+
+       rcu_read_lock();
+       nlsk = rcu_dereference(net->xfrm.nlsk);
+       if (nlsk)
+               ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
+       rcu_read_unlock();
+
+       return ret;
+}
 #endif
 
 static inline int aead_len(struct xfrm_algo_aead *alg)
@@ -1748,4 +1802,24 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
        return ret;
 }
 
+static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
+                                   unsigned int family)
+{
+       bool tunnel = false;
+
+       switch(family) {
+       case AF_INET:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
+                       tunnel = true;
+               break;
+       case AF_INET6:
+               if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
+                       tunnel = true;
+               break;
+       }
+       if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL))
+               return -EINVAL;
+
+       return 0;
+}
 #endif /* _NET_XFRM_H */
index a34f27b2e394ef1ebdb7148b69ac573df85b6502..1de256b358074b6b3486e4e1b4686f9ecace6320 100644 (file)
@@ -153,8 +153,8 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
                __field(        u16,                    vlan_tci        )
                __field(        u16,                    protocol        )
                __field(        u8,                     ip_summed       )
-               __field(        u32,                    rxhash          )
-               __field(        bool,                   l4_rxhash       )
+               __field(        u32,                    hash            )
+               __field(        bool,                   l4_hash         )
                __field(        unsigned int,           len             )
                __field(        unsigned int,           data_len        )
                __field(        unsigned int,           truesize        )
@@ -179,8 +179,8 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
                __entry->vlan_tci = vlan_tx_tag_get(skb);
                __entry->protocol = ntohs(skb->protocol);
                __entry->ip_summed = skb->ip_summed;
-               __entry->rxhash = skb->rxhash;
-               __entry->l4_rxhash = skb->l4_rxhash;
+               __entry->hash = skb->hash;
+               __entry->l4_hash = skb->l4_hash;
                __entry->len = skb->len;
                __entry->data_len = skb->data_len;
                __entry->truesize = skb->truesize;
@@ -191,11 +191,11 @@ DECLARE_EVENT_CLASS(net_dev_rx_verbose_template,
                __entry->gso_type = skb_shinfo(skb)->gso_type;
        ),
 
-       TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d rxhash=0x%08x l4_rxhash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
+       TP_printk("dev=%s napi_id=%#x queue_mapping=%u skbaddr=%p vlan_tagged=%d vlan_proto=0x%04x vlan_tci=0x%04x protocol=0x%04x ip_summed=%d hash=0x%08x l4_hash=%d len=%u data_len=%u truesize=%u mac_header_valid=%d mac_header=%d nr_frags=%d gso_size=%d gso_type=%#x",
                  __get_str(name), __entry->napi_id, __entry->queue_mapping,
                  __entry->skbaddr, __entry->vlan_tagged, __entry->vlan_proto,
                  __entry->vlan_tci, __entry->protocol, __entry->ip_summed,
-                 __entry->rxhash, __entry->l4_rxhash, __entry->len,
+                 __entry->hash, __entry->l4_hash, __entry->len,
                  __entry->data_len, __entry->truesize,
                  __entry->mac_header_valid, __entry->mac_header,
                  __entry->nr_frags, __entry->gso_size, __entry->gso_type)
index e52958d7c2d119cb416587466f4a4d2a5bb1e8d0..5d9d1d1407180a9291c0f986945e3a34f2ccf51e 100644 (file)
@@ -8,6 +8,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_H
index df944ed206a8e4bd23d9f9a3ec9e08c9e0ad6e3b..7e2e1863db16e02fa15e1edc109adefda8236ba7 100644 (file)
@@ -96,6 +96,7 @@ struct can_ctrlmode {
 #define CAN_CTRLMODE_3_SAMPLES         0x04    /* Triple sampling mode */
 #define CAN_CTRLMODE_ONE_SHOT          0x08    /* One-Shot mode */
 #define CAN_CTRLMODE_BERR_REPORTING    0x10    /* Bus-error reporting */
+#define CAN_CTRLMODE_FD                        0x20    /* CAN FD mode */
 
 /*
  * CAN device statistics
@@ -122,6 +123,8 @@ enum {
        IFLA_CAN_RESTART_MS,
        IFLA_CAN_RESTART,
        IFLA_CAN_BERR_COUNTER,
+       IFLA_CAN_DATA_BITTIMING,
+       IFLA_CAN_DATA_BITTIMING_CONST,
        __IFLA_CAN_MAX
 };
 
index 38dbafaa5341154167a685a245096fbece6a375d..fd161e91b6d7e711270da030ed84b38f9b718f0c 100644 (file)
 #include <linux/types.h>
 #include <linux/if_ether.h>
 
-/* This should work for both 32 and 64 bit userland. */
+/* All structures exposed to userland should be defined such that they
+ * have the same layout for 32-bit and 64-bit userland.
+ */
+
+/**
+ * struct ethtool_cmd - link control and status
+ * @cmd: Command number = %ETHTOOL_GSET or %ETHTOOL_SSET
+ * @supported: Bitmask of %SUPPORTED_* flags for the link modes,
+ *     physical connectors and other link features for which the
+ *     interface supports autonegotiation or auto-detection.
+ *     Read-only.
+ * @advertising: Bitmask of %ADVERTISED_* flags for the link modes,
+ *     physical connectors and other link features that are
+ *     advertised through autonegotiation or enabled for
+ *     auto-detection.
+ * @speed: Low bits of the speed
+ * @duplex: Duplex mode; one of %DUPLEX_*
+ * @port: Physical connector type; one of %PORT_*
+ * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not
+ *     applicable.  For clause 45 PHYs this is the PRTAD.
+ * @transceiver: Historically used to distinguish different possible
+ *     PHY types, but not in a consistent way.  Deprecated.
+ * @autoneg: Enable/disable autonegotiation and auto-detection;
+ *     either %AUTONEG_DISABLE or %AUTONEG_ENABLE
+ * @mdio_support: Bitmask of %ETH_MDIO_SUPPORTS_* flags for the MDIO
+ *     protocols supported by the interface; 0 if unknown.
+ *     Read-only.
+ * @maxtxpkt: Historically used to report TX IRQ coalescing; now
+ *     obsoleted by &struct ethtool_coalesce.  Read-only; deprecated.
+ * @maxrxpkt: Historically used to report RX IRQ coalescing; now
+ *     obsoleted by &struct ethtool_coalesce.  Read-only; deprecated.
+ * @speed_hi: High bits of the speed
+ * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of
+ *     %ETH_TP_MDI_*.  If the status is unknown or not applicable, the
+ *     value will be %ETH_TP_MDI_INVALID.  Read-only.
+ * @eth_tp_mdix_ctrl: Ethernet twisted pair MDI(-X) control; one of
+ *     %ETH_TP_MDI_*.  If MDI(-X) control is not implemented, reads
+ *     yield %ETH_TP_MDI_INVALID and writes may be ignored or rejected.
+ *     When written successfully, the link should be renegotiated if
+ *     necessary.
+ * @lp_advertising: Bitmask of %ADVERTISED_* flags for the link modes
+ *     and other link features that the link partner advertised
+ *     through autonegotiation; 0 if unknown or not applicable.
+ *     Read-only.
+ *
+ * The link speed in Mbps is split between @speed and @speed_hi.  Use
+ * the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
+ * access it.
+ *
+ * If autonegotiation is disabled, the speed and @duplex represent the
+ * fixed link mode and are writable if the driver supports multiple
+ * link modes.  If it is enabled then they are read-only; if the link
+ * is up they represent the negotiated link mode; if the link is down,
+ * the speed is 0, %SPEED_UNKNOWN or the highest enabled speed and
+ * @duplex is %DUPLEX_UNKNOWN or the best enabled duplex mode.
+ *
+ * Some hardware interfaces may have multiple PHYs and/or physical
+ * connectors fitted or do not allow the driver to detect which are
+ * fitted.  For these interfaces @port and/or @phy_address may be
+ * writable, possibly dependent on @autoneg being %AUTONEG_DISABLE.
+ * Otherwise, attempts to write different values may be ignored or
+ * rejected.
+ *
+ * Users should assume that all fields not marked read-only are
+ * writable and subject to validation by the driver.  They should use
+ * %ETHTOOL_GSET to get the current values before making specific
+ * changes and then applying them with %ETHTOOL_SSET.
+ *
+ * Drivers that implement set_settings() should validate all fields
+ * other than @cmd that are not described as read-only or deprecated,
+ * and must ignore all fields described as read-only.
+ *
+ * Deprecated fields should be ignored by both users and drivers.
+ */
 struct ethtool_cmd {
        __u32   cmd;
-       __u32   supported;      /* Features this interface supports */
-       __u32   advertising;    /* Features this interface advertises */
-       __u16   speed;          /* The forced speed (lower bits) in
-                                * Mbps. Please use
-                                * ethtool_cmd_speed()/_set() to
-                                * access it */
-       __u8    duplex;         /* Duplex, half or full */
-       __u8    port;           /* Which connector port */
-       __u8    phy_address;    /* MDIO PHY address (PRTAD for clause 45).
-                                * May be read-only or read-write
-                                * depending on the driver.
-                                */
-       __u8    transceiver;    /* Which transceiver to use */
-       __u8    autoneg;        /* Enable or disable autonegotiation */
-       __u8    mdio_support;   /* MDIO protocols supported.  Read-only.
-                                * Not set by all drivers.
-                                */
-       __u32   maxtxpkt;       /* Tx pkts before generating tx int */
-       __u32   maxrxpkt;       /* Rx pkts before generating rx int */
-       __u16   speed_hi;       /* The forced speed (upper
-                                * bits) in Mbps. Please use
-                                * ethtool_cmd_speed()/_set() to
-                                * access it */
-       __u8    eth_tp_mdix;    /* twisted pair MDI-X status */
-       __u8    eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
-                                  * link should be renegotiated if necessary
-                                  */
-       __u32   lp_advertising; /* Features the link partner advertises */
+       __u32   supported;
+       __u32   advertising;
+       __u16   speed;
+       __u8    duplex;
+       __u8    port;
+       __u8    phy_address;
+       __u8    transceiver;
+       __u8    autoneg;
+       __u8    mdio_support;
+       __u32   maxtxpkt;
+       __u32   maxrxpkt;
+       __u16   speed_hi;
+       __u8    eth_tp_mdix;
+       __u8    eth_tp_mdix_ctrl;
+       __u32   lp_advertising;
        __u32   reserved[2];
 };
 
@@ -79,37 +139,68 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
 
 #define ETHTOOL_FWVERS_LEN     32
 #define ETHTOOL_BUSINFO_LEN    32
-/* these strings are set to whatever the driver author decides... */
+
+/**
+ * struct ethtool_drvinfo - general driver and device information
+ * @cmd: Command number = %ETHTOOL_GDRVINFO
+ * @driver: Driver short name.  This should normally match the name
+ *     in its bus driver structure (e.g. pci_driver::name).  Must
+ *     not be an empty string.
+ * @version: Driver version string; may be an empty string
+ * @fw_version: Firmware version string; may be an empty string
+ * @bus_info: Device bus address.  This should match the dev_name()
+ *     string for the underlying bus device, if there is one.  May be
+ *     an empty string.
+ * @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
+ *     %ETHTOOL_SPFLAGS commands; also the number of strings in the
+ *     %ETH_SS_PRIV_FLAGS set
+ * @n_stats: Number of u64 statistics returned by the %ETHTOOL_GSTATS
+ *     command; also the number of strings in the %ETH_SS_STATS set
+ * @testinfo_len: Number of results returned by the %ETHTOOL_TEST
+ *     command; also the number of strings in the %ETH_SS_TEST set
+ * @eedump_len: Size of EEPROM accessible through the %ETHTOOL_GEEPROM
+ *     and %ETHTOOL_SEEPROM commands, in bytes
+ * @regdump_len: Size of register dump returned by the %ETHTOOL_GREGS
+ *     command, in bytes
+ *
+ * Users can use the %ETHTOOL_GSSET_INFO command to get the number of
+ * strings in any string set (from Linux 2.6.34).
+ *
+ * Drivers should set at most @driver, @version, @fw_version and
+ * @bus_info in their get_drvinfo() implementation.  The ethtool
+ * core fills in the other fields using other driver operations.
+ */
 struct ethtool_drvinfo {
        __u32   cmd;
-       char    driver[32];     /* driver short name, "tulip", "eepro100" */
-       char    version[32];    /* driver version string */
-       char    fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */
-       char    bus_info[ETHTOOL_BUSINFO_LEN];  /* Bus info for this IF. */
-                               /* For PCI devices, use pci_name(pci_dev). */
+       char    driver[32];
+       char    version[32];
+       char    fw_version[ETHTOOL_FWVERS_LEN];
+       char    bus_info[ETHTOOL_BUSINFO_LEN];
        char    reserved1[32];
        char    reserved2[12];
-                               /*
-                                * Some struct members below are filled in
-                                * using ops->get_sset_count().  Obtaining
-                                * this info from ethtool_drvinfo is now
-                                * deprecated; Use ETHTOOL_GSSET_INFO
-                                * instead.
-                                */
-       __u32   n_priv_flags;   /* number of flags valid in ETHTOOL_GPFLAGS */
-       __u32   n_stats;        /* number of u64's from ETHTOOL_GSTATS */
+       __u32   n_priv_flags;
+       __u32   n_stats;
        __u32   testinfo_len;
-       __u32   eedump_len;     /* Size of data from ETHTOOL_GEEPROM (bytes) */
-       __u32   regdump_len;    /* Size of data from ETHTOOL_GREGS (bytes) */
+       __u32   eedump_len;
+       __u32   regdump_len;
 };
 
 #define SOPASS_MAX     6
-/* wake-on-lan settings */
+
+/**
+ * struct ethtool_wolinfo - Wake-On-Lan configuration
+ * @cmd: Command number = %ETHTOOL_GWOL or %ETHTOOL_SWOL
+ * @supported: Bitmask of %WAKE_* flags for supported Wake-On-Lan modes.
+ *     Read-only.
+ * @wolopts: Bitmask of %WAKE_* flags for enabled Wake-On-Lan modes.
+ * @sopass: SecureOn(tm) password; meaningful only if %WAKE_MAGICSECURE
+ *     is set in @wolopts.
+ */
 struct ethtool_wolinfo {
        __u32   cmd;
        __u32   supported;
        __u32   wolopts;
-       __u8    sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+       __u8    sopass[SOPASS_MAX];
 };
 
 /* for passing single values */
@@ -118,20 +209,51 @@ struct ethtool_value {
        __u32   data;
 };
 
-/* for passing big chunks of data */
+/**
+ * struct ethtool_regs - hardware register dump
+ * @cmd: Command number = %ETHTOOL_GREGS
+ * @version: Dump format version.  This is driver-specific and may
+ *     distinguish different chips/revisions.  Drivers must use new
+ *     version numbers whenever the dump format changes in an
+ *     incompatible way.
+ * @len: On entry, the real length of @data.  On return, the number of
+ *     bytes used.
+ * @data: Buffer for the register dump
+ *
+ * Users should use %ETHTOOL_GDRVINFO to find the maximum length of
+ * a register dump for the interface.  They must allocate the buffer
+ * immediately following this structure.
+ */
 struct ethtool_regs {
        __u32   cmd;
-       __u32   version; /* driver-specific, indicates different chips/revs */
-       __u32   len; /* bytes */
+       __u32   version;
+       __u32   len;
        __u8    data[0];
 };
 
-/* for passing EEPROM chunks */
+/**
+ * struct ethtool_eeprom - EEPROM dump
+ * @cmd: Command number = %ETHTOOL_GEEPROM, %ETHTOOL_GMODULEEEPROM or
+ *     %ETHTOOL_SEEPROM
+ * @magic: A 'magic cookie' value to guard against accidental changes.
+ *     The value passed in to %ETHTOOL_SEEPROM must match the value
+ *     returned by %ETHTOOL_GEEPROM for the same device.  This is
+ *     unused when @cmd is %ETHTOOL_GMODULEEEPROM.
+ * @offset: Offset within the EEPROM to begin reading/writing, in bytes
+ * @len: On entry, number of bytes to read/write.  On successful
+ *     return, number of bytes actually read/written.  In case of
+ *     error, this may indicate at what point the error occurred.
+ * @data: Buffer to read/write from
+ *
+ * Users may use %ETHTOOL_GDRVINFO or %ETHTOOL_GMODULEINFO to find
+ * the length of an on-board or module EEPROM, respectively.  They
+ * must allocate the buffer immediately following this structure.
+ */
 struct ethtool_eeprom {
        __u32   cmd;
        __u32   magic;
-       __u32   offset; /* in bytes */
-       __u32   len; /* in bytes */
+       __u32   offset;
+       __u32   len;
        __u8    data[0];
 };
 
@@ -229,17 +351,18 @@ struct ethtool_modinfo {
  * @rate_sample_interval: How often to do adaptive coalescing packet rate
  *     sampling, measured in seconds.  Must not be zero.
  *
- * Each pair of (usecs, max_frames) fields specifies this exit
- * condition for interrupt coalescing:
+ * Each pair of (usecs, max_frames) fields specifies that interrupts
+ * should be coalesced until
  *     (usecs > 0 && time_since_first_completion >= usecs) ||
  *     (max_frames > 0 && completed_frames >= max_frames)
+ *
  * It is illegal to set both usecs and max_frames to zero as this
  * would cause interrupts to never be generated.  To disable
  * coalescing, set usecs = 0 and max_frames = 1.
  *
  * Some implementations ignore the value of max_frames and use the
- * condition:
- *     time_since_first_completion >= usecs
+ * condition time_since_first_completion >= usecs
+ *
  * This is deprecated.  Drivers for hardware that does not support
  * counting completions should validate that max_frames == !rx_usecs.
  *
@@ -279,22 +402,37 @@ struct ethtool_coalesce {
        __u32   rate_sample_interval;
 };
 
-/* for configuring RX/TX ring parameters */
+/**
+ * struct ethtool_ringparam - RX/TX ring parameters
+ * @cmd: Command number = %ETHTOOL_GRINGPARAM or %ETHTOOL_SRINGPARAM
+ * @rx_max_pending: Maximum supported number of pending entries per
+ *     RX ring.  Read-only.
+ * @rx_mini_max_pending: Maximum supported number of pending entries
+ *     per RX mini ring.  Read-only.
+ * @rx_jumbo_max_pending: Maximum supported number of pending entries
+ *     per RX jumbo ring.  Read-only.
+ * @tx_max_pending: Maximum supported number of pending entries per
+ *     TX ring.  Read-only.
+ * @rx_pending: Current maximum number of pending entries per RX ring
+ * @rx_mini_pending: Current maximum number of pending entries per RX
+ *     mini ring
+ * @rx_jumbo_pending: Current maximum number of pending entries per RX
+ *     jumbo ring
+ * @tx_pending: Current maximum supported number of pending entries
+ *     per TX ring
+ *
+ * If the interface does not have separate RX mini and/or jumbo rings,
+ * @rx_mini_max_pending and/or @rx_jumbo_max_pending will be 0.
+ *
+ * There may also be driver-dependent minimum values for the number
+ * of entries per ring.
+ */
 struct ethtool_ringparam {
-       __u32   cmd;    /* ETHTOOL_{G,S}RINGPARAM */
-
-       /* Read only attributes.  These indicate the maximum number
-        * of pending RX/TX ring entries the driver will allow the
-        * user to set.
-        */
+       __u32   cmd;
        __u32   rx_max_pending;
        __u32   rx_mini_max_pending;
        __u32   rx_jumbo_max_pending;
        __u32   tx_max_pending;
-
-       /* Values changeable by the user.  The valid values are
-        * in the range 1 to the "*_max_pending" counterpart above.
-        */
        __u32   rx_pending;
        __u32   rx_mini_pending;
        __u32   rx_jumbo_pending;
@@ -329,51 +467,96 @@ struct ethtool_channels {
        __u32   combined_count;
 };
 
-/* for configuring link flow control parameters */
+/**
+ * struct ethtool_pauseparam - Ethernet pause (flow control) parameters
+ * @cmd: Command number = %ETHTOOL_GPAUSEPARAM or %ETHTOOL_SPAUSEPARAM
+ * @autoneg: Flag to enable autonegotiation of pause frame use
+ * @rx_pause: Flag to enable reception of pause frames
+ * @tx_pause: Flag to enable transmission of pause frames
+ *
+ * Drivers should reject a non-zero setting of @autoneg when
+ * autoneogotiation is disabled (or not supported) for the link.
+ *
+ * If the link is autonegotiated, drivers should use
+ * mii_advertise_flowctrl() or similar code to set the advertised
+ * pause frame capabilities based on the @rx_pause and @tx_pause flags,
+ * even if @autoneg is zero.  They should also allow the advertised
+ * pause frame capabilities to be controlled directly through the
+ * advertising field of &struct ethtool_cmd.
+ *
+ * If @autoneg is non-zero, the MAC is configured to send and/or
+ * receive pause frames according to the result of autonegotiation.
+ * Otherwise, it is configured directly based on the @rx_pause and
+ * @tx_pause flags.
+ */
 struct ethtool_pauseparam {
-       __u32   cmd;    /* ETHTOOL_{G,S}PAUSEPARAM */
-
-       /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
-        * being true) the user may set 'autoneg' here non-zero to have the
-        * pause parameters be auto-negotiated too.  In such a case, the
-        * {rx,tx}_pause values below determine what capabilities are
-        * advertised.
-        *
-        * If 'autoneg' is zero or the link is not being auto-negotiated,
-        * then {rx,tx}_pause force the driver to use/not-use pause
-        * flow control.
-        */
+       __u32   cmd;
        __u32   autoneg;
        __u32   rx_pause;
        __u32   tx_pause;
 };
 
 #define ETH_GSTRING_LEN                32
+
+/**
+ * enum ethtool_stringset - string set ID
+ * @ETH_SS_TEST: Self-test result names, for use with %ETHTOOL_TEST
+ * @ETH_SS_STATS: Statistic names, for use with %ETHTOOL_GSTATS
+ * @ETH_SS_PRIV_FLAGS: Driver private flag names, for use with
+ *     %ETHTOOL_GPFLAGS and %ETHTOOL_SPFLAGS
+ * @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE;
+ *     now deprecated
+ * @ETH_SS_FEATURES: Device feature names
+ */
 enum ethtool_stringset {
        ETH_SS_TEST             = 0,
        ETH_SS_STATS,
        ETH_SS_PRIV_FLAGS,
-       ETH_SS_NTUPLE_FILTERS,  /* Do not use, GRXNTUPLE is now deprecated */
+       ETH_SS_NTUPLE_FILTERS,
        ETH_SS_FEATURES,
 };
 
-/* for passing string sets for data tagging */
+/**
+ * struct ethtool_gstrings - string set for data tagging
+ * @cmd: Command number = %ETHTOOL_GSTRINGS
+ * @string_set: String set ID; one of &enum ethtool_stringset
+ * @len: On return, the number of strings in the string set
+ * @data: Buffer for strings.  Each string is null-padded to a size of
+ *     %ETH_GSTRING_LEN.
+ *
+ * Users must use %ETHTOOL_GSSET_INFO to find the number of strings in
+ * the string set.  They must allocate a buffer of the appropriate
+ * size immediately following this structure.
+ */
 struct ethtool_gstrings {
-       __u32   cmd;            /* ETHTOOL_GSTRINGS */
-       __u32   string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
-       __u32   len;            /* number of strings in the string set */
+       __u32   cmd;
+       __u32   string_set;
+       __u32   len;
        __u8    data[0];
 };
 
+/**
+ * struct ethtool_sset_info - string set information
+ * @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @sset_mask: On entry, a bitmask of string sets to query, with bits
+ *     numbered according to &enum ethtool_stringset.  On return, a
+ *     bitmask of those string sets queried that are supported.
+ * @data: Buffer for string set sizes.  On return, this contains the
+ *     size of each string set that was queried and supported, in
+ *     order of ID.
+ *
+ * Example: The user passes in @sset_mask = 0x7 (sets 0, 1, 2) and on
+ * return @sset_mask == 0x6 (sets 1, 2).  Then @data[0] contains the
+ * size of set 1 and @data[1] contains the size of set 2.
+ *
+ * Users must allocate a buffer of the appropriate size (4 * number of
+ * sets queried) immediately following this structure.
+ */
 struct ethtool_sset_info {
-       __u32   cmd;            /* ETHTOOL_GSSET_INFO */
+       __u32   cmd;
        __u32   reserved;
-       __u64   sset_mask;      /* input: each bit selects an sset to query */
-                               /* output: each bit a returned sset */
-       __u32   data[0];        /* ETH_SS_xxx count, in order, based on bits
-                                  in sset_mask.  One bit implies one
-                                  __u32, two bits implies two
-                                  __u32's, etc. */
+       __u64   sset_mask;
+       __u32   data[0];
 };
 
 /**
@@ -393,24 +576,58 @@ enum ethtool_test_flags {
        ETH_TEST_FL_EXTERNAL_LB_DONE    = (1 << 3),
 };
 
-/* for requesting NIC test and getting results*/
+/**
+ * struct ethtool_test - device self-test invocation
+ * @cmd: Command number = %ETHTOOL_TEST
+ * @flags: A bitmask of flags from &enum ethtool_test_flags.  Some
+ *     flags may be set by the user on entry; others may be set by
+ *     the driver on return.
+ * @len: On return, the number of test results
+ * @data: Array of test results
+ *
+ * Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
+ * number of test results that will be returned.  They must allocate a
+ * buffer of the appropriate size (8 * number of results) immediately
+ * following this structure.
+ */
 struct ethtool_test {
-       __u32   cmd;            /* ETHTOOL_TEST */
-       __u32   flags;          /* ETH_TEST_FL_xxx */
+       __u32   cmd;
+       __u32   flags;
        __u32   reserved;
-       __u32   len;            /* result length, in number of u64 elements */
+       __u32   len;
        __u64   data[0];
 };
 
-/* for dumping NIC-specific statistics */
+/**
+ * struct ethtool_stats - device-specific statistics
+ * @cmd: Command number = %ETHTOOL_GSTATS
+ * @n_stats: On return, the number of statistics
+ * @data: Array of statistics
+ *
+ * Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
+ * number of statistics that will be returned.  They must allocate a
+ * buffer of the appropriate size (8 * number of statistics)
+ * immediately following this structure.
+ */
 struct ethtool_stats {
-       __u32   cmd;            /* ETHTOOL_GSTATS */
-       __u32   n_stats;        /* number of u64's being returned */
+       __u32   cmd;
+       __u32   n_stats;
        __u64   data[0];
 };
 
+/**
+ * struct ethtool_perm_addr - permanent hardware address
+ * @cmd: Command number = %ETHTOOL_GPERMADDR
+ * @size: On entry, the size of the buffer.  On return, the size of the
+ *     address.  The command fails if the buffer is too small.
+ * @data: Buffer for the address
+ *
+ * Users must allocate the buffer immediately following this structure.
+ * A buffer size of %MAX_ADDR_LEN should be sufficient for any address
+ * type.
+ */
 struct ethtool_perm_addr {
-       __u32   cmd;            /* ETHTOOL_GPERMADDR */
+       __u32   cmd;
        __u32   size;
        __u8    data[0];
 };
@@ -593,7 +810,7 @@ struct ethtool_rx_flow_spec {
  * %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused
  * location, and may remove a rule at a later location (lower
  * priority) that matches exactly the same set of flows.  The special
- * values are: %RX_CLS_LOC_ANY, selecting any location;
+ * values are %RX_CLS_LOC_ANY, selecting any location;
  * %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum
  * priority); and %RX_CLS_LOC_LAST, selecting the last suitable
  * location (minimum priority).  Additional special values may be
@@ -704,9 +921,6 @@ struct ethtool_flash {
  *      for %ETHTOOL_GET_DUMP_FLAG command
  * @data: data collected for get dump data operation
  */
-
-#define ETH_FW_DUMP_DISABLE 0
-
 struct ethtool_dump {
        __u32   cmd;
        __u32   version;
@@ -715,6 +929,8 @@ struct ethtool_dump {
        __u8    data[0];
 };
 
+#define ETH_FW_DUMP_DISABLE 0
+
 /* for returning and changing feature sets */
 
 /**
@@ -734,8 +950,9 @@ struct ethtool_get_features_block {
 /**
  * struct ethtool_gfeatures - command to get state of device's features
  * @cmd: command number = %ETHTOOL_GFEATURES
- * @size: in: number of elements in the features[] array;
- *       out: number of elements in features[] needed to hold all features
+ * @size: On entry, the number of elements in the features[] array;
+ *     on return, the number of elements in features[] needed to hold
+ *     all features
  * @features: state of features
  */
 struct ethtool_gfeatures {
@@ -905,7 +1122,6 @@ enum ethtool_sfeatures_retval_bits {
 #define SPARC_ETH_GSET         ETHTOOL_GSET
 #define SPARC_ETH_SSET         ETHTOOL_SSET
 
-/* Indicates what features are supported by the interface. */
 #define SUPPORTED_10baseT_Half         (1 << 0)
 #define SUPPORTED_10baseT_Full         (1 << 1)
 #define SUPPORTED_100baseT_Half                (1 << 2)
@@ -934,7 +1150,6 @@ enum ethtool_sfeatures_retval_bits {
 #define SUPPORTED_40000baseSR4_Full    (1 << 25)
 #define SUPPORTED_40000baseLR4_Full    (1 << 26)
 
-/* Indicates what features are advertised by the interface. */
 #define ADVERTISED_10baseT_Half                (1 << 0)
 #define ADVERTISED_10baseT_Full                (1 << 1)
 #define ADVERTISED_100baseT_Half       (1 << 2)
@@ -999,9 +1214,7 @@ enum ethtool_sfeatures_retval_bits {
 #define XCVR_DUMMY2            0x03
 #define XCVR_DUMMY3            0x04
 
-/* Enable or disable autonegotiation.  If this is set to enable,
- * the forced link modes above are completely ignored.
- */
+/* Enable or disable autonegotiation. */
 #define AUTONEG_DISABLE                0x00
 #define AUTONEG_ENABLE         0x01
 
index d758163b0e432f6c461d2b32335dcf138d217df7..9cf2394f0bcff00b6629933e3a864d69abd1da55 100644 (file)
 #define        IFALIASZ        256
 #include <linux/hdlc/ioctl.h>
 
-/* Standard interface flags (netdevice->flags). */
-#define        IFF_UP          0x1             /* interface is up              */
-#define        IFF_BROADCAST   0x2             /* broadcast address valid      */
-#define        IFF_DEBUG       0x4             /* turn on debugging            */
-#define        IFF_LOOPBACK    0x8             /* is a loopback net            */
-#define        IFF_POINTOPOINT 0x10            /* interface is has p-p link    */
-#define        IFF_NOTRAILERS  0x20            /* avoid use of trailers        */
-#define        IFF_RUNNING     0x40            /* interface RFC2863 OPER_UP    */
-#define        IFF_NOARP       0x80            /* no ARP protocol              */
-#define        IFF_PROMISC     0x100           /* receive all packets          */
-#define        IFF_ALLMULTI    0x200           /* receive all multicast packets*/
-
-#define IFF_MASTER     0x400           /* master of a load balancer    */
-#define IFF_SLAVE      0x800           /* slave of a load balancer     */
-
-#define IFF_MULTICAST  0x1000          /* Supports multicast           */
-
-#define IFF_PORTSEL    0x2000          /* can set media type           */
-#define IFF_AUTOMEDIA  0x4000          /* auto media select active     */
-#define IFF_DYNAMIC    0x8000          /* dialup device with changing addresses*/
-
-#define IFF_LOWER_UP   0x10000         /* driver signals L1 up         */
-#define IFF_DORMANT    0x20000         /* driver signals dormant       */
+/**
+ * enum net_device_flags - &struct net_device flags
+ *
+ * These are the &struct net_device flags, they can be set by drivers, the
+ * kernel and some can be triggered by userspace. Userspace can query and
+ * set these flags using userspace utilities but there is also a sysfs
+ * entry available for all dev flags which can be queried and set. These flags
+ * are shared for all types of net_devices. The sysfs entries are available
+ * via /sys/class/net/<dev>/flags. Flags which can be toggled through sysfs
+ * are annotated below, note that only a few flags can be toggled and some
+ * other flags are always always preserved from the original net_device flags
+ * even if you try to set them via sysfs. Flags which are always preserved
+ * are kept under the flag grouping @IFF_VOLATILE. Flags which are volatile
+ * are annotated below as such.
+ *
+ * You should have a pretty good reason to be extending these flags.
+ *
+ * @IFF_UP: interface is up. Can be toggled through sysfs.
+ * @IFF_BROADCAST: broadcast address valid. Volatile.
+ * @IFF_DEBUG: turn on debugging. Can be toggled through sysfs.
+ * @IFF_LOOPBACK: is a loopback net. Volatile.
+ * @IFF_POINTOPOINT: interface is has p-p link. Volatile.
+ * @IFF_NOTRAILERS: avoid use of trailers. Can be toggled through sysfs.
+ *     Volatile.
+ * @IFF_RUNNING: interface RFC2863 OPER_UP. Volatile.
+ * @IFF_NOARP: no ARP protocol. Can be toggled through sysfs. Volatile.
+ * @IFF_PROMISC: receive all packets. Can be toggled through sysfs.
+ * @IFF_ALLMULTI: receive all multicast packets. Can be toggled through
+ *     sysfs.
+ * @IFF_MASTER: master of a load balancer. Volatile.
+ * @IFF_SLAVE: slave of a load balancer. Volatile.
+ * @IFF_MULTICAST: Supports multicast. Can be toggled through sysfs.
+ * @IFF_PORTSEL: can set media type. Can be toggled through sysfs.
+ * @IFF_AUTOMEDIA: auto media select active. Can be toggled through sysfs.
+ * @IFF_DYNAMIC: dialup device with changing addresses. Can be toggled
+ *     through sysfs.
+ * @IFF_LOWER_UP: driver signals L1 up. Volatile.
+ * @IFF_DORMANT: driver signals dormant. Volatile.
+ * @IFF_ECHO: echo sent packets. Volatile.
+ */
+enum net_device_flags {
+       IFF_UP                          = 1<<0,  /* sysfs */
+       IFF_BROADCAST                   = 1<<1,  /* volatile */
+       IFF_DEBUG                       = 1<<2,  /* sysfs */
+       IFF_LOOPBACK                    = 1<<3,  /* volatile */
+       IFF_POINTOPOINT                 = 1<<4,  /* volatile */
+       IFF_NOTRAILERS                  = 1<<5,  /* sysfs */
+       IFF_RUNNING                     = 1<<6,  /* volatile */
+       IFF_NOARP                       = 1<<7,  /* sysfs */
+       IFF_PROMISC                     = 1<<8,  /* sysfs */
+       IFF_ALLMULTI                    = 1<<9,  /* sysfs */
+       IFF_MASTER                      = 1<<10, /* volatile */
+       IFF_SLAVE                       = 1<<11, /* volatile */
+       IFF_MULTICAST                   = 1<<12, /* sysfs */
+       IFF_PORTSEL                     = 1<<13, /* sysfs */
+       IFF_AUTOMEDIA                   = 1<<14, /* sysfs */
+       IFF_DYNAMIC                     = 1<<15, /* sysfs */
+       IFF_LOWER_UP                    = 1<<16, /* volatile */
+       IFF_DORMANT                     = 1<<17, /* volatile */
+       IFF_ECHO                        = 1<<18, /* volatile */
+};
 
-#define IFF_ECHO       0x40000         /* echo sent packets            */
+#define IFF_UP                         IFF_UP
+#define IFF_BROADCAST                  IFF_BROADCAST
+#define IFF_DEBUG                      IFF_DEBUG
+#define IFF_LOOPBACK                   IFF_LOOPBACK
+#define IFF_POINTOPOINT                        IFF_POINTOPOINT
+#define IFF_NOTRAILERS                 IFF_NOTRAILERS
+#define IFF_RUNNING                    IFF_RUNNING
+#define IFF_NOARP                      IFF_NOARP
+#define IFF_PROMISC                    IFF_PROMISC
+#define IFF_ALLMULTI                   IFF_ALLMULTI
+#define IFF_MASTER                     IFF_MASTER
+#define IFF_SLAVE                      IFF_SLAVE
+#define IFF_MULTICAST                  IFF_MULTICAST
+#define IFF_PORTSEL                    IFF_PORTSEL
+#define IFF_AUTOMEDIA                  IFF_AUTOMEDIA
+#define IFF_DYNAMIC                    IFF_DYNAMIC
+#define IFF_LOWER_UP                   IFF_LOWER_UP
+#define IFF_DORMANT                    IFF_DORMANT
+#define IFF_ECHO                       IFF_ECHO
 
 #define IFF_VOLATILE   (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
                IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
 
-/* Private (from user) interface flags (netdevice->priv_flags). */
-#define IFF_802_1Q_VLAN 0x1             /* 802.1Q VLAN device.          */
-#define IFF_EBRIDGE    0x2             /* Ethernet bridging device.    */
-#define IFF_SLAVE_INACTIVE     0x4     /* bonding slave not the curr. active */
-#define IFF_MASTER_8023AD      0x8     /* bonding master, 802.3ad.     */
-#define IFF_MASTER_ALB 0x10            /* bonding master, balance-alb. */
-#define IFF_BONDING    0x20            /* bonding master or slave      */
-#define IFF_SLAVE_NEEDARP 0x40         /* need ARPs for validation     */
-#define IFF_ISATAP     0x80            /* ISATAP interface (RFC4214)   */
-#define IFF_MASTER_ARPMON 0x100                /* bonding master, ARP mon in use */
-#define IFF_WAN_HDLC   0x200           /* WAN HDLC device              */
-#define IFF_XMIT_DST_RELEASE 0x400     /* dev_hard_start_xmit() is allowed to
-                                        * release skb->dst
-                                        */
-#define IFF_DONT_BRIDGE 0x800          /* disallow bridging this ether dev */
-#define IFF_DISABLE_NETPOLL    0x1000  /* disable netpoll at run-time */
-#define IFF_MACVLAN_PORT       0x2000  /* device used as macvlan port */
-#define IFF_BRIDGE_PORT        0x4000          /* device used as bridge port */
-#define IFF_OVS_DATAPATH       0x8000  /* device used as Open vSwitch
-                                        * datapath port */
-#define IFF_TX_SKB_SHARING     0x10000 /* The interface supports sharing
-                                        * skbs on transmit */
-#define IFF_UNICAST_FLT        0x20000         /* Supports unicast filtering   */
-#define IFF_TEAM_PORT  0x40000         /* device used as team port */
-#define IFF_SUPP_NOFCS 0x80000         /* device supports sending custom FCS */
-#define IFF_LIVE_ADDR_CHANGE 0x100000  /* device supports hardware address
-                                        * change when it's running */
-#define IFF_MACVLAN 0x200000           /* Macvlan device */
-
-
 #define IF_GET_IFACE   0x0001          /* for querying only */
 #define IF_GET_PROTO   0x0002
 
index 2ce0f6a78fa5b47ba6308b371cf6b0c8f21e579b..0f8210b8e0bc47ac0b7faab45a12ca6dcfbdf72d 100644 (file)
 #define ETH_P_SLOW     0x8809          /* Slow Protocol. See 802.3ad 43B */
 #define ETH_P_WCCP     0x883E          /* Web-cache coordination protocol
                                         * defined in draft-wilson-wrec-wccp-v2-00.txt */
-#define ETH_P_PPP_DISC 0x8863          /* PPPoE discovery messages     */
-#define ETH_P_PPP_SES  0x8864          /* PPPoE session messages       */
 #define ETH_P_MPLS_UC  0x8847          /* MPLS Unicast traffic         */
 #define ETH_P_MPLS_MC  0x8848          /* MPLS Multicast traffic       */
 #define ETH_P_ATMMPOA  0x884c          /* MultiProtocol Over ATM       */
+#define ETH_P_PPP_DISC 0x8863          /* PPPoE discovery messages     */
+#define ETH_P_PPP_SES  0x8864          /* PPPoE session messages       */
 #define ETH_P_LINK_CTL 0x886c          /* HPNA, wlan link local tunnel */
 #define ETH_P_ATMFATE  0x8884          /* Frame-based ATM Transport
                                         * over Ethernet
@@ -89,6 +89,8 @@
 #define ETH_P_FCOE     0x8906          /* Fibre Channel over Ethernet  */
 #define ETH_P_TDLS     0x890D          /* TDLS */
 #define ETH_P_FIP      0x8914          /* FCoE Initialization Protocol */
+#define ETH_P_80221    0x8917          /* IEEE 802.21 Media Independent Handover Protocol */
+#define ETH_P_LOOPBACK 0x9000          /* Ethernet loopback packet, per IEEE 802.3 */
 #define ETH_P_QINQ1    0x9100          /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_QINQ2    0x9200          /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_QINQ3    0x9300          /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
index 16410b6e7819fa93f242665b036812f44fd5fa41..9a7f7ace66494e144c55c08d5bbba085de3a2b7c 100644 (file)
@@ -144,6 +144,7 @@ enum {
        IFLA_NUM_RX_QUEUES,
        IFLA_CARRIER,
        IFLA_PHYS_PORT_ID,
+       IFLA_CARRIER_CHANGES,
        __IFLA_MAX
 };
 
index 393c5de09d42c69393cb228e0d401189f072deb4..c33a65e3d62c85d104d38ab082d997df13cd4c0b 100644 (file)
@@ -120,6 +120,10 @@ struct in_addr {
  * this socket to prevent accepting spoofed ones.
  */
 #define IP_PMTUDISC_INTERFACE          4
+/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+ * fragmented if they exeed the interface mtu
+ */
+#define IP_PMTUDISC_OMIT               5
 
 #define IP_MULTICAST_IF                        32
 #define IP_MULTICAST_TTL               33
index e9a1d2d973b6aef256808d248d007804a689a1df..0d8e0f0342dc183acd5393e11bfe203f019ca165 100644 (file)
@@ -185,6 +185,10 @@ struct in6_flowlabel_req {
  * also see comments on IP_PMTUDISC_INTERFACE
  */
 #define IPV6_PMTUDISC_INTERFACE                4
+/* weaker version of IPV6_PMTUDISC_INTERFACE, which allows packets to
+ * get fragmented if they exceed the interface mtu
+ */
+#define IPV6_PMTUDISC_OMIT             5
 
 /* Flowlabel */
 #define IPV6_FLOWLABEL_MGR     32
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
new file mode 100644 (file)
index 0000000..bc9abfe
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef _UAPI_MPLS_H
+#define _UAPI_MPLS_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* Reference: RFC 5462, RFC 3032
+ *
+ *  0                   1                   2                   3
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                Label                  | TC  |S|       TTL     |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *     Label:  Label Value, 20 bits
+ *     TC:     Traffic Class field, 3 bits
+ *     S:      Bottom of Stack, 1 bit
+ *     TTL:    Time to Live, 8 bits
+ */
+
+struct mpls_label {
+       __be32 entry;
+};
+
+#define MPLS_LS_LABEL_MASK      0xFFFFF000
+#define MPLS_LS_LABEL_SHIFT     12
+#define MPLS_LS_TC_MASK         0x00000E00
+#define MPLS_LS_TC_SHIFT        9
+#define MPLS_LS_S_MASK          0x00000100
+#define MPLS_LS_S_SHIFT         8
+#define MPLS_LS_TTL_MASK        0x000000FF
+#define MPLS_LS_TTL_SHIFT       0
+
+#endif /* _UAPI_MPLS_H */
index 6b9500bc2d56a0338b39dbf1b46540edc6ffe141..fdfbd1c17065e33fb45a3e70ebcc6d9cf6891dac 100644 (file)
@@ -49,5 +49,11 @@ enum {
         IF_PORT_100BASEFX
 };
 
+/* hardware address assignment types */
+#define NET_ADDR_PERM          0       /* address is permanent (default) */
+#define NET_ADDR_RANDOM                1       /* address is generated randomly */
+#define NET_ADDR_STOLEN                2       /* address is stolen from other device */
+#define NET_ADDR_SET           3       /* address is set using
+                                        * dev_set_mac_address() */
 
 #endif /* _UAPI_LINUX_NETDEVICE_H */
index 25d3b2f79c022e92cfd8f851e0f9ae8a2f7b731a..78c2f2e799208a467b75c8035b4d3362365362bb 100644 (file)
@@ -82,6 +82,8 @@ enum {
        IPSET_ATTR_PROTO,       /* 7 */
        IPSET_ATTR_CADT_FLAGS,  /* 8 */
        IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO,     /* 9 */
+       IPSET_ATTR_MARK,        /* 10 */
+       IPSET_ATTR_MARKMASK,    /* 11 */
        /* Reserve empty slots */
        IPSET_ATTR_CADT_MAX = 16,
        /* Create-only specific attributes */
@@ -144,6 +146,7 @@ enum ipset_errno {
        IPSET_ERR_IPADDR_IPV6,
        IPSET_ERR_COUNTER,
        IPSET_ERR_COMMENT,
+       IPSET_ERR_INVALID_MARKMASK,
 
        /* Type specific error codes */
        IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -182,9 +185,18 @@ enum ipset_cadt_flags {
        IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS),
        IPSET_FLAG_BIT_WITH_COMMENT = 4,
        IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
+       IPSET_FLAG_BIT_WITH_FORCEADD = 5,
+       IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD),
        IPSET_FLAG_CADT_MAX     = 15,
 };
 
+/* The flag bits which correspond to the non-extension create flags */
+enum ipset_create_flags {
+       IPSET_CREATE_FLAG_BIT_FORCEADD = 0,
+       IPSET_CREATE_FLAG_FORCEADD = (1 << IPSET_CREATE_FLAG_BIT_FORCEADD),
+       IPSET_CREATE_FLAG_BIT_MAX = 7,
+};
+
 /* Commands with settype-specific attributes */
 enum ipset_adt {
        IPSET_ADD,
index 83c985a6170bd52938764732f45a70d09ae2a28e..c88ccbfda5f1b111a5fa43e1d1803bcccf95b521 100644 (file)
@@ -1,7 +1,8 @@
 #ifndef _LINUX_NF_TABLES_H
 #define _LINUX_NF_TABLES_H
 
-#define NFT_CHAIN_MAXNAMELEN 32
+#define NFT_CHAIN_MAXNAMELEN   32
+#define NFT_USERDATA_MAXLEN    256
 
 enum nft_registers {
        NFT_REG_VERDICT,
@@ -156,6 +157,7 @@ enum nft_chain_attributes {
  * @NFTA_RULE_EXPRESSIONS: list of expressions (NLA_NESTED: nft_expr_attributes)
  * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes)
  * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
+ * @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
  */
 enum nft_rule_attributes {
        NFTA_RULE_UNSPEC,
@@ -165,6 +167,7 @@ enum nft_rule_attributes {
        NFTA_RULE_EXPRESSIONS,
        NFTA_RULE_COMPAT,
        NFTA_RULE_POSITION,
+       NFTA_RULE_USERDATA,
        __NFTA_RULE_MAX
 };
 #define NFTA_RULE_MAX          (__NFTA_RULE_MAX - 1)
@@ -601,6 +604,7 @@ enum nft_ct_keys {
        NFT_CT_PROTOCOL,
        NFT_CT_PROTO_SRC,
        NFT_CT_PROTO_DST,
+       NFT_CT_LABELS,
 };
 
 /**
index 6ad6cc03ccd3aeec4074f187836efac62ed31a0f..9789dc95b6a8fb775a16e4fff1612101cae879c7 100644 (file)
@@ -150,6 +150,8 @@ enum nfc_commands {
  * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
  * @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
  * @NFC_ATTR_APDU: Secure element APDU
+ * @NFC_ATTR_TARGET_ISO15693_DSFID: ISO 15693 Data Storage Format Identifier
+ * @NFC_ATTR_TARGET_ISO15693_UID: ISO 15693 Unique Identifier
  */
 enum nfc_attrs {
        NFC_ATTR_UNSPEC,
@@ -178,6 +180,8 @@ enum nfc_attrs {
        NFC_ATTR_SE_AID,
        NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS,
        NFC_ATTR_SE_APDU,
+       NFC_ATTR_TARGET_ISO15693_DSFID,
+       NFC_ATTR_TARGET_ISO15693_UID,
 /* private: internal use only */
        __NFC_ATTR_AFTER_LAST
 };
@@ -200,6 +204,7 @@ enum nfc_sdp_attr {
 #define NFC_SENSF_RES_MAXSIZE 18
 #define NFC_GB_MAXSIZE        48
 #define NFC_FIRMWARE_NAME_MAXSIZE 32
+#define NFC_ISO15693_UID_MAXSIZE 8
 
 /* NFC protocols */
 #define NFC_PROTO_JEWEL                1
@@ -208,8 +213,9 @@ enum nfc_sdp_attr {
 #define NFC_PROTO_ISO14443     4
 #define NFC_PROTO_NFC_DEP      5
 #define NFC_PROTO_ISO14443_B   6
+#define NFC_PROTO_ISO15693     7
 
-#define NFC_PROTO_MAX          7
+#define NFC_PROTO_MAX          8
 
 /* NFC communication modes */
 #define NFC_COMM_ACTIVE  0
@@ -227,6 +233,7 @@ enum nfc_sdp_attr {
 #define NFC_PROTO_ISO14443_MASK          (1 << NFC_PROTO_ISO14443)
 #define NFC_PROTO_NFC_DEP_MASK   (1 << NFC_PROTO_NFC_DEP)
 #define NFC_PROTO_ISO14443_B_MASK (1 << NFC_PROTO_ISO14443_B)
+#define NFC_PROTO_ISO15693_MASK          (1 << NFC_PROTO_ISO15693)
 
 /* NFC Secure Elements */
 #define NFC_SE_UICC     0x1
index 91054fd660e083156f02b826f0a5b812d7faac26..1ba9d626aa833db91c462560f27054b30e91939d 100644 (file)
  *     passed, all channels allowed for the current regulatory domain
  *     are used.  Extra IEs can also be passed from the userspace by
  *     using the %NL80211_ATTR_IE attribute.
- * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan.  Returns -ENOENT
- *     if scheduled scan is not running.
+ * @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if
+ *     scheduled scan is not running. The caller may assume that as soon
+ *     as the call returns, it is safe to start a new scheduled scan again.
  * @NL80211_CMD_SCHED_SCAN_RESULTS: indicates that there are scheduled scan
  *     results available.
  * @NL80211_CMD_SCHED_SCAN_STOPPED: indicates that the scheduled scan has
  *     %NL80211_ATTR_SSID attribute, and can optionally specify the association
  *     IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
  *     %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
- *     %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
- *     %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
+ *     %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
+ *     %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, %NL80211_ATTR_MAC_HINT, and
+ *     %NL80211_ATTR_WIPHY_FREQ_HINT.
+ *     If included, %NL80211_ATTR_MAC and %NL80211_ATTR_WIPHY_FREQ are
+ *     restrictions on BSS selection, i.e., they effectively prevent roaming
+ *     within the ESS. %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT
+ *     can be included to provide a recommendation of the initial BSS while
+ *     allowing the driver to roam to other BSSes within the ESS and also to
+ *     ignore this recommendation if the indicated BSS is not ideal. Only one
+ *     set of BSSID,frequency parameters is used (i.e., either the enforcing
+ *     %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
+ *     %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
  *     Background scan period can optionally be
  *     specified in %NL80211_ATTR_BG_SCAN_PERIOD,
  *     if not specified default background scan configuration
@@ -1555,6 +1566,19 @@ enum nl80211_commands {
  *     data is in the format defined for the payload of the QoS Map Set element
  *     in IEEE Std 802.11-2012, 8.4.2.97.
  *
+ * @NL80211_ATTR_MAC_HINT: MAC address recommendation as initial BSS
+ * @NL80211_ATTR_WIPHY_FREQ_HINT: frequency of the recommended initial BSS
+ *
+ * @NL80211_ATTR_MAX_AP_ASSOC_STA: Device attribute that indicates how many
+ *     associated stations are supported in AP mode (including P2P GO); u32.
+ *     Since drivers may not have a fixed limit on the maximum number (e.g.,
+ *     other concurrent operations may affect this), drivers are allowed to
+ *     advertise values that cannot always be met. In such cases, an attempt
+ *     to add a new station entry with @NL80211_CMD_NEW_STATION may fail.
+ *
+ * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
+ *     As specified in the &enum nl80211_tdls_peer_capability.
+ *
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
  */
@@ -1883,6 +1907,13 @@ enum nl80211_attrs {
 
        NL80211_ATTR_QOS_MAP,
 
+       NL80211_ATTR_MAC_HINT,
+       NL80211_ATTR_WIPHY_FREQ_HINT,
+
+       NL80211_ATTR_MAX_AP_ASSOC_STA,
+
+       NL80211_ATTR_TDLS_PEER_CAPABILITY,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -2304,6 +2335,7 @@ enum nl80211_band_attr {
  * @NL80211_FREQUENCY_ATTR_NO_160MHZ: any 160 MHz (but not 80+80) channel
  *     using this channel as the primary or any of the secondary channels
  *     isn't possible
+ * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
  * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
  *     currently defined
  * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -2322,6 +2354,7 @@ enum nl80211_frequency_attr {
        NL80211_FREQUENCY_ATTR_NO_HT40_PLUS,
        NL80211_FREQUENCY_ATTR_NO_80MHZ,
        NL80211_FREQUENCY_ATTR_NO_160MHZ,
+       NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
 
        /* keep last */
        __NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -2412,12 +2445,14 @@ enum nl80211_reg_type {
  *     in KHz. This is not a center a frequency but an actual regulatory
  *     band edge.
  * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this
- *     frequency range, in KHz.
+ *     frequency range, in KHz.
  * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain
  *     for a given frequency range. The value is in mBi (100 * dBi).
  *     If you don't have one then don't send this.
  * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for
  *     a given frequency range. The value is in mBm (100 * dBm).
+ * @NL80211_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
+ *     If not present or 0 default CAC time will be used.
  * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number
  *     currently defined
  * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use
@@ -2433,6 +2468,8 @@ enum nl80211_reg_rule_attr {
        NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
        NL80211_ATTR_POWER_RULE_MAX_EIRP,
 
+       NL80211_ATTR_DFS_CAC_TIME,
+
        /* keep last */
        __NL80211_REG_RULE_ATTR_AFTER_LAST,
        NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
@@ -2442,9 +2479,15 @@ enum nl80211_reg_rule_attr {
  * enum nl80211_sched_scan_match_attr - scheduled scan match attributes
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
  * @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
- * only report BSS with matching SSID.
+ *     only report BSS with matching SSID.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
- *     BSS in scan results. Filtering is turned off if not specified.
+ *     BSS in scan results. Filtering is turned off if not specified. Note that
+ *     if this attribute is in a match set of its own, then it is treated as
+ *     the default value for all matchsets with an SSID, rather than being a
+ *     matchset of its own without an RSSI filter. This is due to problems with
+ *     how this API was implemented in the past. Also, due to the same problem,
+ *     the only way to create a matchset with only an RSSI filter (with this
+ *     attribute) is if there's only a single matchset with the RSSI attribute.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
  *     attribute number currently defined
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -2477,6 +2520,9 @@ enum nl80211_sched_scan_match_attr {
  * @NL80211_RRF_NO_IR: no mechanisms that initiate radiation are allowed,
  *     this includes probe requests or modes of operation that require
  *     beaconing.
+ * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
+ *     base on contiguous rules and wider channels will be allowed to cross
+ *     multiple contiguous/overlapping frequency ranges.
  */
 enum nl80211_reg_rule_flags {
        NL80211_RRF_NO_OFDM             = 1<<0,
@@ -2488,6 +2534,7 @@ enum nl80211_reg_rule_flags {
        NL80211_RRF_PTMP_ONLY           = 1<<6,
        NL80211_RRF_NO_IR               = 1<<7,
        __NL80211_RRF_NO_IBSS           = 1<<8,
+       NL80211_RRF_AUTO_BW             = 1<<11,
 };
 
 #define NL80211_RRF_PASSIVE_SCAN       NL80211_RRF_NO_IR
@@ -3131,6 +3178,7 @@ enum nl80211_key_attributes {
  *     in an array of MCS numbers.
  * @NL80211_TXRATE_VHT: VHT rates allowed for TX rate selection,
  *     see &struct nl80211_txrate_vht
+ * @NL80211_TXRATE_GI: configure GI, see &enum nl80211_txrate_gi
  * @__NL80211_TXRATE_AFTER_LAST: internal
  * @NL80211_TXRATE_MAX: highest TX rate attribute
  */
@@ -3139,6 +3187,7 @@ enum nl80211_tx_rate_attributes {
        NL80211_TXRATE_LEGACY,
        NL80211_TXRATE_HT,
        NL80211_TXRATE_VHT,
+       NL80211_TXRATE_GI,
 
        /* keep last */
        __NL80211_TXRATE_AFTER_LAST,
@@ -3156,6 +3205,12 @@ struct nl80211_txrate_vht {
        __u16 mcs[NL80211_VHT_NSS_MAX];
 };
 
+enum nl80211_txrate_gi {
+       NL80211_TXRATE_DEFAULT_GI,
+       NL80211_TXRATE_FORCE_SGI,
+       NL80211_TXRATE_FORCE_LGI,
+};
+
 /**
  * enum nl80211_band - Frequency band
  * @NL80211_BAND_2GHZ: 2.4 GHz ISM band
@@ -3801,11 +3856,6 @@ enum nl80211_ap_sme_features {
  * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
  *     to work properly to suppport receiving regulatory hints from
  *     cellular base stations.
- * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: If this is set, an active
- *     P2P Device (%NL80211_IFTYPE_P2P_DEVICE) requires its own channel
- *     in the interface combinations, even when it's only used for scan
- *     and remain-on-channel. This could be due to, for example, the
- *     remain-on-channel implementation requiring a channel context.
  * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
  *     equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
  *     mode
@@ -3847,7 +3897,7 @@ enum nl80211_feature_flags {
        NL80211_FEATURE_HT_IBSS                         = 1 << 1,
        NL80211_FEATURE_INACTIVITY_TIMER                = 1 << 2,
        NL80211_FEATURE_CELL_BASE_REG_HINTS             = 1 << 3,
-       NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL        = 1 << 4,
+       /* bit 4 is reserved - don't use */
        NL80211_FEATURE_SAE                             = 1 << 5,
        NL80211_FEATURE_LOW_PRIORITY_SCAN               = 1 << 6,
        NL80211_FEATURE_SCAN_FLUSH                      = 1 << 7,
@@ -4037,4 +4087,20 @@ struct nl80211_vendor_cmd_info {
        __u32 subcmd;
 };
 
+/**
+ * enum nl80211_tdls_peer_capability - TDLS peer flags.
+ *
+ * Used by tdls_mgmt() to determine which conditional elements need
+ * to be added to TDLS Setup frames.
+ *
+ * @NL80211_TDLS_PEER_HT: TDLS peer is HT capable.
+ * @NL80211_TDLS_PEER_VHT: TDLS peer is VHT capable.
+ * @NL80211_TDLS_PEER_WMM: TDLS peer is WMM capable.
+ */
+enum nl80211_tdls_peer_capability {
+       NL80211_TDLS_PEER_HT = 1<<0,
+       NL80211_TDLS_PEER_VHT = 1<<1,
+       NL80211_TDLS_PEER_WMM = 1<<2,
+};
+
 #endif /* __LINUX_NL80211_H */
index 0b80c806631fc12d26a11d9f6e5d1f3b65e9066a..ada7f0171cccd3174810d1c9c0d6ea566c558c0a 100644 (file)
@@ -235,6 +235,18 @@ struct sadb_x_kmaddress {
 } __attribute__((packed));
 /* sizeof(struct sadb_x_kmaddress) == 8 */
 
+/* To specify the SA dump filter */
+struct sadb_x_filter {
+       __u16   sadb_x_filter_len;
+       __u16   sadb_x_filter_exttype;
+       __u32   sadb_x_filter_saddr[4];
+       __u32   sadb_x_filter_daddr[4];
+       __u16   sadb_x_filter_family;
+       __u8    sadb_x_filter_splen;
+       __u8    sadb_x_filter_dplen;
+} __attribute__((packed));
+/* sizeof(struct sadb_x_filter) == 40 */
+
 /* Message types */
 #define SADB_RESERVED          0
 #define SADB_GETSPI            1
@@ -358,7 +370,8 @@ struct sadb_x_kmaddress {
 #define SADB_X_EXT_SEC_CTX             24
 /* Used with MIGRATE to pass @ to IKE for negotiation */
 #define SADB_X_EXT_KMADDRESS           25
-#define SADB_EXT_MAX                   25
+#define SADB_X_EXT_FILTER              26
+#define SADB_EXT_MAX                   26
 
 /* Identity Extension values */
 #define SADB_IDENTTYPE_RESERVED        0
index b65c834f83e903c6dd00c062362a93bfd9a51aea..f0b7bfe5da920069a8acdca9dc018860495e0e59 100644 (file)
@@ -50,7 +50,8 @@ struct ptp_clock_caps {
        int n_ext_ts;  /* Number of external time stamp channels. */
        int n_per_out; /* Number of programmable periodic signals. */
        int pps;       /* Whether the clock supports a PPS callback. */
-       int rsv[15];   /* Reserved for future use. */
+       int n_pins;    /* Number of input/output pins. */
+       int rsv[14];   /* Reserved for future use. */
 };
 
 struct ptp_extts_request {
@@ -80,6 +81,40 @@ struct ptp_sys_offset {
        struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
 };
 
+enum ptp_pin_function {
+       PTP_PF_NONE,
+       PTP_PF_EXTTS,
+       PTP_PF_PEROUT,
+       PTP_PF_PHYSYNC,
+};
+
+struct ptp_pin_desc {
+       /*
+        * Hardware specific human readable pin name. This field is
+        * set by the kernel during the PTP_PIN_GETFUNC ioctl and is
+        * ignored for the PTP_PIN_SETFUNC ioctl.
+        */
+       char name[64];
+       /*
+        * Pin index in the range of zero to ptp_clock_caps.n_pins - 1.
+        */
+       unsigned int index;
+       /*
+        * Which of the PTP_PF_xxx functions to use on this pin.
+        */
+       unsigned int func;
+       /*
+        * The specific channel to use for this function.
+        * This corresponds to the 'index' field of the
+        * PTP_EXTTS_REQUEST and PTP_PEROUT_REQUEST ioctls.
+        */
+       unsigned int chan;
+       /*
+        * Reserved for future use.
+        */
+       unsigned int rsv[5];
+};
+
 #define PTP_CLK_MAGIC '='
 
 #define PTP_CLOCK_GETCAPS  _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps)
@@ -87,6 +122,8 @@ struct ptp_sys_offset {
 #define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request)
 #define PTP_ENABLE_PPS     _IOW(PTP_CLK_MAGIC, 4, int)
 #define PTP_SYS_OFFSET     _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
+#define PTP_PIN_GETFUNC    _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc)
+#define PTP_PIN_SETFUNC    _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
 
 struct ptp_extts_event {
        struct ptp_clock_time t; /* Time event occured. */
index bbaba22f2d1bcfa25517acb1bf19fc8eb648c480..df40137f33dd48c0b23dad1f5cd13c357c91c031 100644 (file)
@@ -252,6 +252,7 @@ enum
        LINUX_MIB_TCPCHALLENGEACK,              /* TCPChallengeACK */
        LINUX_MIB_TCPSYNCHALLENGE,              /* TCPSYNChallenge */
        LINUX_MIB_TCPFASTOPENACTIVE,            /* TCPFastOpenActive */
+       LINUX_MIB_TCPFASTOPENACTIVEFAIL,        /* TCPFastOpenActiveFail */
        LINUX_MIB_TCPFASTOPENPASSIVE,           /* TCPFastOpenPassive*/
        LINUX_MIB_TCPFASTOPENPASSIVEFAIL,       /* TCPFastOpenPassiveFail */
        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW,    /* TCPFastOpenListenOverflow */
@@ -259,6 +260,11 @@ enum
        LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
        LINUX_MIB_BUSYPOLLRXPACKETS,            /* BusyPollRxPackets */
        LINUX_MIB_TCPAUTOCORKING,               /* TCPAutoCorking */
+       LINUX_MIB_TCPFROMZEROWINDOWADV,         /* TCPFromZeroWindowAdv */
+       LINUX_MIB_TCPTOZEROWINDOWADV,           /* TCPToZeroWindowAdv */
+       LINUX_MIB_TCPWANTZEROWINDOWADV,         /* TCPWantZeroWindowAdv */
+       LINUX_MIB_TCPSYNRETRANS,                /* TCPSynRetrans */
+       LINUX_MIB_TCPORIGDATASENT,              /* TCPOrigDataSent */
        __LINUX_MIB_MAX
 };
 
index 377f1e59411d1572eb645b7b80be42347a7513f5..3b9718328d8bf7732a73a13a4811b98ff667f000 100644 (file)
@@ -186,6 +186,9 @@ struct tcp_info {
        __u32   tcpi_rcv_space;
 
        __u32   tcpi_total_retrans;
+
+       __u64   tcpi_pacing_rate;
+       __u64   tcpi_max_pacing_rate;
 };
 
 /* for TCP_MD5SIG socket option */
index 54a37b13f2c4d77202d35d42e852689ddc9fcdd0..93533926035ca8d18e08f04eac3adacfcb266d8d 100644 (file)
 #define TCP_METRICS_GENL_VERSION       0x1
 
 enum tcp_metric_index {
-       TCP_METRIC_RTT,
-       TCP_METRIC_RTTVAR,
+       TCP_METRIC_RTT,         /* in ms units */
+       TCP_METRIC_RTTVAR,      /* in ms units */
        TCP_METRIC_SSTHRESH,
        TCP_METRIC_CWND,
        TCP_METRIC_REORDERING,
 
+       TCP_METRIC_RTT_US,      /* in usec units */
+       TCP_METRIC_RTTVAR_US,   /* in usec units */
+
        /* Always last.  */
        __TCP_METRIC_MAX,
 };
index f35aa0a338c7610d89aa7f9e92e47d7e3a0de0a2..b6a9cdd6e096a26831bcb5c1cc69e6d41c8afe05 100644 (file)
@@ -56,6 +56,7 @@
 #define USB_CDC_OBEX_TYPE              0x15
 #define USB_CDC_NCM_TYPE               0x1a
 #define USB_CDC_MBIM_TYPE              0x1b
+#define USB_CDC_MBIM_EXTENDED_TYPE     0x1c
 
 /* "Header Functional Descriptor" from CDC spec  5.2.3.1 */
 struct usb_cdc_header_desc {
@@ -205,6 +206,17 @@ struct usb_cdc_mbim_desc {
        __u8    bmNetworkCapabilities;
 } __attribute__ ((packed));
 
+/* "MBIM Extended Functional Descriptor" from CDC MBIM spec 1.0 errata-1 */
+struct usb_cdc_mbim_extended_desc {
+       __u8    bLength;
+       __u8    bDescriptorType;
+       __u8    bDescriptorSubType;
+
+       __le16  bcdMBIMExtendedVersion;
+       __u8    bMaxOutstandingCommandMessages;
+       __le16  wMTU;
+} __attribute__ ((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /*
index a8cd6a4a297070052a0e3deddafd9a05bca62c91..25e5dd916ba491feba3f1d570e8712283a95645e 100644 (file)
@@ -298,6 +298,8 @@ enum xfrm_attr_type_t {
        XFRMA_TFCPAD,           /* __u32 */
        XFRMA_REPLAY_ESN_VAL,   /* struct xfrm_replay_esn */
        XFRMA_SA_EXTRA_FLAGS,   /* __u32 */
+       XFRMA_PROTO,            /* __u8 */
+       XFRMA_ADDRESS_FILTER,   /* struct xfrm_address_filter */
        __XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -474,6 +476,14 @@ struct xfrm_user_mapping {
        __be16                          new_sport;
 };
 
+struct xfrm_address_filter {
+       xfrm_address_t                  saddr;
+       xfrm_address_t                  daddr;
+       __u16                           family;
+       __u8                            splen;
+       __u8                            dplen;
+};
+
 #ifndef __KERNEL__
 /* backwards compatibility for userspace */
 #define XFRMGRP_ACQUIRE                1
index b7a10048a32c11fb473515d9075ef2b0782a563a..4f18e754c23ea51dfc732dba85aa5db186c79789 100644 (file)
@@ -55,60 +55,33 @@ struct seccomp_filter {
        atomic_t usage;
        struct seccomp_filter *prev;
        unsigned short len;  /* Instruction count */
-       struct sock_filter insns[];
+       struct sock_filter_int insnsi[];
 };
 
 /* Limit any path through the tree to 256KB worth of instructions. */
 #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter))
 
-/**
- * get_u32 - returns a u32 offset into data
- * @data: a unsigned 64 bit value
- * @index: 0 or 1 to return the first or second 32-bits
- *
- * This inline exists to hide the length of unsigned long.  If a 32-bit
- * unsigned long is passed in, it will be extended and the top 32-bits will be
- * 0. If it is a 64-bit unsigned long, then whatever data is resident will be
- * properly returned.
- *
+/*
  * Endianness is explicitly ignored and left for BPF program authors to manage
  * as per the specific architecture.
  */
-static inline u32 get_u32(u64 data, int index)
+static void populate_seccomp_data(struct seccomp_data *sd)
 {
-       return ((u32 *)&data)[index];
-}
+       struct task_struct *task = current;
+       struct pt_regs *regs = task_pt_regs(task);
 
-/* Helper for bpf_load below. */
-#define BPF_DATA(_name) offsetof(struct seccomp_data, _name)
-/**
- * bpf_load: checks and returns a pointer to the requested offset
- * @off: offset into struct seccomp_data to load from
- *
- * Returns the requested 32-bits of data.
- * seccomp_check_filter() should assure that @off is 32-bit aligned
- * and not out of bounds.  Failure to do so is a BUG.
- */
-u32 seccomp_bpf_load(int off)
-{
-       struct pt_regs *regs = task_pt_regs(current);
-       if (off == BPF_DATA(nr))
-               return syscall_get_nr(current, regs);
-       if (off == BPF_DATA(arch))
-               return syscall_get_arch(current, regs);
-       if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) {
-               unsigned long value;
-               int arg = (off - BPF_DATA(args[0])) / sizeof(u64);
-               int index = !!(off % sizeof(u64));
-               syscall_get_arguments(current, regs, arg, 1, &value);
-               return get_u32(value, index);
-       }
-       if (off == BPF_DATA(instruction_pointer))
-               return get_u32(KSTK_EIP(current), 0);
-       if (off == BPF_DATA(instruction_pointer) + sizeof(u32))
-               return get_u32(KSTK_EIP(current), 1);
-       /* seccomp_check_filter should make this impossible. */
-       BUG();
+       sd->nr = syscall_get_nr(task, regs);
+       sd->arch = syscall_get_arch(task, regs);
+
+       /* Unroll syscall_get_args to help gcc on arm. */
+       syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]);
+       syscall_get_arguments(task, regs, 1, 1, (unsigned long *) &sd->args[1]);
+       syscall_get_arguments(task, regs, 2, 1, (unsigned long *) &sd->args[2]);
+       syscall_get_arguments(task, regs, 3, 1, (unsigned long *) &sd->args[3]);
+       syscall_get_arguments(task, regs, 4, 1, (unsigned long *) &sd->args[4]);
+       syscall_get_arguments(task, regs, 5, 1, (unsigned long *) &sd->args[5]);
+
+       sd->instruction_pointer = KSTK_EIP(task);
 }
 
 /**
@@ -133,17 +106,17 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
 
                switch (code) {
                case BPF_S_LD_W_ABS:
-                       ftest->code = BPF_S_ANC_SECCOMP_LD_W;
+                       ftest->code = BPF_LDX | BPF_W | BPF_ABS;
                        /* 32-bit aligned and not out of bounds. */
                        if (k >= sizeof(struct seccomp_data) || k & 3)
                                return -EINVAL;
                        continue;
                case BPF_S_LD_W_LEN:
-                       ftest->code = BPF_S_LD_IMM;
+                       ftest->code = BPF_LD | BPF_IMM;
                        ftest->k = sizeof(struct seccomp_data);
                        continue;
                case BPF_S_LDX_W_LEN:
-                       ftest->code = BPF_S_LDX_IMM;
+                       ftest->code = BPF_LDX | BPF_IMM;
                        ftest->k = sizeof(struct seccomp_data);
                        continue;
                /* Explicitly include allowed calls. */
@@ -185,6 +158,7 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
                case BPF_S_JMP_JGT_X:
                case BPF_S_JMP_JSET_K:
                case BPF_S_JMP_JSET_X:
+                       sk_decode_filter(ftest, ftest);
                        continue;
                default:
                        return -EINVAL;
@@ -202,18 +176,21 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen)
 static u32 seccomp_run_filters(int syscall)
 {
        struct seccomp_filter *f;
+       struct seccomp_data sd;
        u32 ret = SECCOMP_RET_ALLOW;
 
        /* Ensure unexpected behavior doesn't result in failing open. */
        if (WARN_ON(current->seccomp.filter == NULL))
                return SECCOMP_RET_KILL;
 
+       populate_seccomp_data(&sd);
+
        /*
         * All filters in the list are evaluated and the lowest BPF return
         * value always takes priority (ignoring the DATA).
         */
        for (f = current->seccomp.filter; f; f = f->prev) {
-               u32 cur_ret = sk_run_filter(NULL, f->insns);
+               u32 cur_ret = sk_run_filter_int_seccomp(&sd, f->insnsi);
                if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
                        ret = cur_ret;
        }
@@ -231,6 +208,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        struct seccomp_filter *filter;
        unsigned long fp_size = fprog->len * sizeof(struct sock_filter);
        unsigned long total_insns = fprog->len;
+       struct sock_filter *fp;
+       int new_len;
        long ret;
 
        if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
@@ -252,28 +231,43 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
                                     CAP_SYS_ADMIN) != 0)
                return -EACCES;
 
-       /* Allocate a new seccomp_filter */
-       filter = kzalloc(sizeof(struct seccomp_filter) + fp_size,
-                        GFP_KERNEL|__GFP_NOWARN);
-       if (!filter)
+       fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
+       if (!fp)
                return -ENOMEM;
-       atomic_set(&filter->usage, 1);
-       filter->len = fprog->len;
 
        /* Copy the instructions from fprog. */
        ret = -EFAULT;
-       if (copy_from_user(filter->insns, fprog->filter, fp_size))
-               goto fail;
+       if (copy_from_user(fp, fprog->filter, fp_size))
+               goto free_prog;
 
        /* Check and rewrite the fprog via the skb checker */
-       ret = sk_chk_filter(filter->insns, filter->len);
+       ret = sk_chk_filter(fp, fprog->len);
        if (ret)
-               goto fail;
+               goto free_prog;
 
        /* Check and rewrite the fprog for seccomp use */
-       ret = seccomp_check_filter(filter->insns, filter->len);
+       ret = seccomp_check_filter(fp, fprog->len);
+       if (ret)
+               goto free_prog;
+
+       /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
+       ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
+       if (ret)
+               goto free_prog;
+
+       /* Allocate a new seccomp_filter */
+       filter = kzalloc(sizeof(struct seccomp_filter) +
+                        sizeof(struct sock_filter_int) * new_len,
+                        GFP_KERNEL|__GFP_NOWARN);
+       if (!filter)
+               goto free_prog;
+
+       ret = sk_convert_filter(fp, fprog->len, filter->insnsi, &new_len);
        if (ret)
-               goto fail;
+               goto free_filter;
+
+       atomic_set(&filter->usage, 1);
+       filter->len = new_len;
 
        /*
         * If there is an existing filter, make it the prev and don't drop its
@@ -282,8 +276,11 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        filter->prev = current->seccomp.filter;
        current->seccomp.filter = filter;
        return 0;
-fail:
+
+free_filter:
        kfree(filter);
+free_prog:
+       kfree(fp);
        return ret;
 }
 
index 5704ed9c3a23bb13d1d5b41f36d6575fd3ec3ed9..9d010a09ab9880a6eb15cee0524b46be53d76587 100644 (file)
@@ -38,9 +38,9 @@ struct vlan_info {
 static inline unsigned int vlan_proto_idx(__be16 proto)
 {
        switch (proto) {
-       case __constant_htons(ETH_P_8021Q):
+       case htons(ETH_P_8021Q):
                return VLAN_PROTO_8021Q;
-       case __constant_htons(ETH_P_8021AD):
+       case htons(ETH_P_8021AD):
                return VLAN_PROTO_8021AD;
        default:
                BUG();
index 6ee48aac776fbe84db68a503d9aca10ca45b0081..3c32bd257b73975a33ba104c1c3b3797d9f29843 100644 (file)
@@ -22,11 +22,11 @@ bool vlan_do_receive(struct sk_buff **skbp)
                return false;
 
        skb->dev = vlan_dev;
-       if (skb->pkt_type == PACKET_OTHERHOST) {
+       if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
                /* Our lower layer thinks this is not local, let's make sure.
                 * This allows the VLAN to have a different MAC than the
                 * underlying device, and still route correctly. */
-               if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
+               if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
                        skb->pkt_type = PACKET_HOST;
        }
 
@@ -106,6 +106,12 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
 }
 EXPORT_SYMBOL(vlan_dev_vlan_id);
 
+__be16 vlan_dev_vlan_proto(const struct net_device *dev)
+{
+       return vlan_dev_priv(dev)->vlan_proto;
+}
+EXPORT_SYMBOL(vlan_dev_vlan_proto);
+
 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
 {
        if (skb_cow(skb, skb_headroom(skb)) < 0)
index 27bfe2f8e2de71bee3f873106d88498765d36e93..6f142f03716d04effac77f0e2210879eff3daddb 100644 (file)
@@ -559,7 +559,7 @@ static const struct net_device_ops vlan_netdev_ops;
 static int vlan_dev_init(struct net_device *dev)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0, i;
+       int subclass = 0;
 
        netif_carrier_off(dev);
 
@@ -613,17 +613,10 @@ static int vlan_dev_init(struct net_device *dev)
 
        vlan_dev_set_lockdep_class(dev, subclass);
 
-       vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+       vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct vlan_pcpu_stats *vlan_stat;
-               vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
-               u64_stats_init(&vlan_stat->syncp);
-       }
-
-
        return 0;
 }
 
@@ -689,13 +682,13 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
 
                        p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
                        do {
-                               start = u64_stats_fetch_begin_bh(&p->syncp);
+                               start = u64_stats_fetch_begin_irq(&p->syncp);
                                rxpackets       = p->rx_packets;
                                rxbytes         = p->rx_bytes;
                                rxmulticast     = p->rx_multicast;
                                txpackets       = p->tx_packets;
                                txbytes         = p->tx_bytes;
-                       } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+                       } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 
                        stats->rx_packets       += rxpackets;
                        stats->rx_bytes         += rxbytes;
@@ -718,20 +711,19 @@ static void vlan_dev_poll_controller(struct net_device *dev)
        return;
 }
 
-static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo,
-                                 gfp_t gfp)
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
 {
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev = vlan->real_dev;
        struct netpoll *netpoll;
        int err = 0;
 
-       netpoll = kzalloc(sizeof(*netpoll), gfp);
+       netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
        err = -ENOMEM;
        if (!netpoll)
                goto out;
 
-       err = __netpoll_setup(netpoll, real_dev, gfp);
+       err = __netpoll_setup(netpoll, real_dev);
        if (err) {
                kfree(netpoll);
                goto out;
index c7e634af85165613822074b28ceeca4af7153ae7..8ac8a5cc214331253e591fe26f6c3b39a00d5023 100644 (file)
@@ -56,8 +56,8 @@ static int vlan_validate(struct nlattr *tb[], struct nlattr *data[])
 
        if (data[IFLA_VLAN_PROTOCOL]) {
                switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
-               case __constant_htons(ETH_P_8021Q):
-               case __constant_htons(ETH_P_8021AD):
+               case htons(ETH_P_8021Q):
+               case htons(ETH_P_8021AD):
                        break;
                default:
                        return -EPROTONOSUPPORT;
index d27b86dfb0e92bf467b95f7e488f736cff8db657..d1c55d8dd0a2538eaabe403ceeb26896c00adf66 100644 (file)
@@ -926,7 +926,7 @@ static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos)
        struct aarp_entry *entry;
 
  rescan:
-       while(ct < AARP_HASH_SIZE) {
+       while (ct < AARP_HASH_SIZE) {
                for (entry = table[ct]; entry; entry = entry->next) {
                        if (!pos || ++off == *pos) {
                                iter->table = table;
@@ -995,7 +995,7 @@ static const char *dt2str(unsigned long ticks)
 {
        static char buf[32];
 
-       sprintf(buf, "%ld.%02ld", ticks / HZ, ((ticks % HZ) * 100 ) / HZ);
+       sprintf(buf, "%ld.%02ld", ticks / HZ, ((ticks % HZ) * 100) / HZ);
 
        return buf;
 }
index 02806c6b2ff36c86bc15d5da4230fbadb8d0a538..786ee2f83d5fea1dbfd6bb2660544d7f88e1eff2 100644 (file)
@@ -293,7 +293,7 @@ static int atif_probe_device(struct atalk_iface *atif)
 
 /* Perform AARP probing for a proxy address */
 static int atif_proxy_probe_device(struct atalk_iface *atif,
-                                  struct atalk_addrproxy_addr)
+                                  struct atalk_addr *proxy_addr)
 {
        int netrange = ntohs(atif->nets.nr_lastnet) -
                        ntohs(atif->nets.nr_firstnet) + 1;
@@ -581,7 +581,7 @@ out:
 }
 
 /* Delete a route. Find it and discard it */
-static int atrtr_delete(struct atalk_addr * addr)
+static int atrtr_delete(struct atalk_addr *addr)
 {
        struct atalk_route **r = &atalk_routes;
        int retval = 0;
@@ -936,11 +936,11 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
        int i, copy;
 
        /* checksum stuff in header space */
-       if ( (copy = start - offset) > 0) {
+       if ((copy = start - offset) > 0) {
                if (copy > len)
                        copy = len;
                sum = atalk_sum_partial(skb->data + offset, copy, sum);
-               if ( (len -= copy) == 0)
+               if ((len -= copy) == 0)
                        return sum;
 
                offset += copy;
@@ -1151,7 +1151,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out;
 
                at->src_net  = addr->sat_addr.s_net = ap->s_net;
-               at->src_node = addr->sat_addr.s_node= ap->s_node;
+               at->src_node = addr->sat_addr.s_node = ap->s_node;
        } else {
                err = -EADDRNOTAVAIL;
                if (!atalk_find_interface(addr->sat_addr.s_net,
@@ -1790,53 +1790,53 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        void __user *argp = (void __user *)arg;
 
        switch (cmd) {
-               /* Protocol layer */
-               case TIOCOUTQ: {
-                       long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+       /* Protocol layer */
+       case TIOCOUTQ: {
+               long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
 
-                       if (amount < 0)
-                               amount = 0;
-                       rc = put_user(amount, (int __user *)argp);
-                       break;
-               }
-               case TIOCINQ: {
-                       /*
-                        * These two are safe on a single CPU system as only
-                        * user tasks fiddle here
-                        */
-                       struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
-                       long amount = 0;
+               if (amount < 0)
+                       amount = 0;
+               rc = put_user(amount, (int __user *)argp);
+               break;
+       }
+       case TIOCINQ: {
+               /*
+                * These two are safe on a single CPU system as only
+                * user tasks fiddle here
+                */
+               struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+               long amount = 0;
 
-                       if (skb)
-                               amount = skb->len - sizeof(struct ddpehdr);
-                       rc = put_user(amount, (int __user *)argp);
-                       break;
-               }
-               case SIOCGSTAMP:
-                       rc = sock_get_timestamp(sk, argp);
-                       break;
-               case SIOCGSTAMPNS:
-                       rc = sock_get_timestampns(sk, argp);
-                       break;
-               /* Routing */
-               case SIOCADDRT:
-               case SIOCDELRT:
-                       rc = -EPERM;
-                       if (capable(CAP_NET_ADMIN))
-                               rc = atrtr_ioctl(cmd, argp);
-                       break;
-               /* Interface */
-               case SIOCGIFADDR:
-               case SIOCSIFADDR:
-               case SIOCGIFBRDADDR:
-               case SIOCATALKDIFADDR:
-               case SIOCDIFADDR:
-               case SIOCSARP:          /* proxy AARP */
-               case SIOCDARP:          /* proxy AARP */
-                       rtnl_lock();
-                       rc = atif_ioctl(cmd, argp);
-                       rtnl_unlock();
-                       break;
+               if (skb)
+               amount = skb->len - sizeof(struct ddpehdr);
+               rc = put_user(amount, (int __user *)argp);
+               break;
+       }
+       case SIOCGSTAMP:
+               rc = sock_get_timestamp(sk, argp);
+               break;
+       case SIOCGSTAMPNS:
+               rc = sock_get_timestampns(sk, argp);
+               break;
+       /* Routing */
+       case SIOCADDRT:
+       case SIOCDELRT:
+               rc = -EPERM;
+               if (capable(CAP_NET_ADMIN))
+                       rc = atrtr_ioctl(cmd, argp);
+               break;
+       /* Interface */
+       case SIOCGIFADDR:
+       case SIOCSIFADDR:
+       case SIOCGIFBRDADDR:
+       case SIOCATALKDIFADDR:
+       case SIOCDIFADDR:
+       case SIOCSARP:          /* proxy AARP */
+       case SIOCDARP:          /* proxy AARP */
+               rtnl_lock();
+               rc = atif_ioctl(cmd, argp);
+               rtnl_unlock();
+               break;
        }
 
        return rc;
index b71ff6b234f2bb1c6d97915eb5a4fb6ab4d1851d..91dc58f1124dd976e4cca35af2407eb2c0e25ecc 100644 (file)
@@ -1492,7 +1492,7 @@ static void __exit atm_mpoa_cleanup(void)
 
        mpc_proc_clean();
 
-       del_timer(&mpc_timer);
+       del_timer_sync(&mpc_timer);
        unregister_netdevice_notifier(&mpoa_notifier);
        deregister_atm_ioctl(&atm_ioctl_ops);
 
index fa780b76630e4def219fc5f464554a4361721229..11660a3aab5ae0b7e0f3f2a111c6e6ab2081de59 100644 (file)
@@ -50,6 +50,15 @@ config BATMAN_ADV_NC
          If you think that your network does not need this feature you
          can safely disable it and save some space.
 
+config BATMAN_ADV_MCAST
+       bool "Multicast optimisation"
+       depends on BATMAN_ADV
+       default n
+       help
+         This option enables the multicast optimisation which aims to
+         reduce the air overhead while improving the reliability of
+         multicast messages.
+
 config BATMAN_ADV_DEBUG
        bool "B.A.T.M.A.N. debugging"
        depends on BATMAN_ADV
index 42df18f877e9304a515a30f2122ae0c324984356..eb7d8c0388e4a32a52ba076460463a722d0db612 100644 (file)
@@ -36,3 +36,4 @@ batman-adv-y += send.o
 batman-adv-y += soft-interface.o
 batman-adv-y += sysfs.o
 batman-adv-y += translation-table.o
+batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
index 8323bced8e5bc9e8e1f6d552f7e94222c3f218ab..b3bd4ec3fd9452f0d1f9a99dd4782260ab65c818 100644 (file)
@@ -347,10 +347,10 @@ static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
        unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
-       memcpy(batadv_ogm_packet->orig,
-              hard_iface->net_dev->dev_addr, ETH_ALEN);
-       memcpy(batadv_ogm_packet->prev_sender,
-              hard_iface->net_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(batadv_ogm_packet->orig,
+                       hard_iface->net_dev->dev_addr);
+       ether_addr_copy(batadv_ogm_packet->prev_sender,
+                       hard_iface->net_dev->dev_addr);
 }
 
 static void
@@ -830,7 +830,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
        tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
 
        batadv_ogm_packet->ttl--;
-       memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
+       ether_addr_copy(batadv_ogm_packet->prev_sender, ethhdr->h_source);
 
        /* apply hop penalty */
        batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq,
index 05f0712be5e7fe3b1fe357d454e6eb9e6c779fc5..6f0d9ec3795059fdc5319574b65b24c08aaf2790 100644 (file)
@@ -191,7 +191,7 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv,
        if (!hash)
                return NULL;
 
-       memcpy(search_entry.orig, addr, ETH_ALEN);
+       ether_addr_copy(search_entry.orig, addr);
        search_entry.vid = vid;
 
        index = batadv_choose_backbone_gw(&search_entry, hash->size);
@@ -305,7 +305,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                /* normal claim frame
                 * set Ethernet SRC to the clients mac
                 */
-               memcpy(ethhdr->h_source, mac, ETH_ALEN);
+               ether_addr_copy(ethhdr->h_source, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
                           BATADV_PRINT_VID(vid));
@@ -314,7 +314,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                /* unclaim frame
                 * set HW SRC to the clients mac
                 */
-               memcpy(hw_src, mac, ETH_ALEN);
+               ether_addr_copy(hw_src, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
                           BATADV_PRINT_VID(vid));
@@ -323,7 +323,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                /* announcement frame
                 * set HW SRC to the special mac containg the crc
                 */
-               memcpy(hw_src, mac, ETH_ALEN);
+               ether_addr_copy(hw_src, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
                           ethhdr->h_source, BATADV_PRINT_VID(vid));
@@ -333,8 +333,8 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
                 * set HW SRC and header destination to the receiving backbone
                 * gws mac
                 */
-               memcpy(hw_src, mac, ETH_ALEN);
-               memcpy(ethhdr->h_dest, mac, ETH_ALEN);
+               ether_addr_copy(hw_src, mac);
+               ether_addr_copy(ethhdr->h_dest, mac);
                batadv_dbg(BATADV_DBG_BLA, bat_priv,
                           "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
                           ethhdr->h_source, ethhdr->h_dest,
@@ -395,7 +395,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
        entry->bat_priv = bat_priv;
        atomic_set(&entry->request_sent, 0);
        atomic_set(&entry->wait_periods, 0);
-       memcpy(entry->orig, orig, ETH_ALEN);
+       ether_addr_copy(entry->orig, orig);
 
        /* one for the hash, one for returning */
        atomic_set(&entry->refcount, 2);
@@ -563,7 +563,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
        struct batadv_bla_claim search_claim;
        int hash_added;
 
-       memcpy(search_claim.addr, mac, ETH_ALEN);
+       ether_addr_copy(search_claim.addr, mac);
        search_claim.vid = vid;
        claim = batadv_claim_hash_find(bat_priv, &search_claim);
 
@@ -573,7 +573,7 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                if (!claim)
                        return;
 
-               memcpy(claim->addr, mac, ETH_ALEN);
+               ether_addr_copy(claim->addr, mac);
                claim->vid = vid;
                claim->lasttime = jiffies;
                claim->backbone_gw = backbone_gw;
@@ -624,7 +624,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
 {
        struct batadv_bla_claim search_claim, *claim;
 
-       memcpy(search_claim.addr, mac, ETH_ALEN);
+       ether_addr_copy(search_claim.addr, mac);
        search_claim.vid = vid;
        claim = batadv_claim_hash_find(bat_priv, &search_claim);
        if (!claim)
@@ -882,7 +882,7 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        proto = ethhdr->h_proto;
        headlen = ETH_HLEN;
        if (vid & BATADV_VLAN_HAS_TAG) {
-               vhdr = (struct vlan_ethhdr *)ethhdr;
+               vhdr = vlan_eth_hdr(skb);
                proto = vhdr->h_vlan_encapsulated_proto;
                headlen += VLAN_HLEN;
        }
@@ -1103,8 +1103,8 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                                                oldif->net_dev->dev_addr))
                                continue;
 
-                       memcpy(backbone_gw->orig,
-                              primary_if->net_dev->dev_addr, ETH_ALEN);
+                       ether_addr_copy(backbone_gw->orig,
+                                       primary_if->net_dev->dev_addr);
                        /* send an announce frame so others will ask for our
                         * claims and update their tables.
                         */
@@ -1310,7 +1310,7 @@ int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
        entry = &bat_priv->bla.bcast_duplist[curr];
        entry->crc = crc;
        entry->entrytime = jiffies;
-       memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
+       ether_addr_copy(entry->orig, bcast_packet->orig);
        bat_priv->bla.bcast_duplist_curr = curr;
 
 out:
@@ -1458,7 +1458,7 @@ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
                        goto handled;
 
-       memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+       ether_addr_copy(search_claim.addr, ethhdr->h_source);
        search_claim.vid = vid;
        claim = batadv_claim_hash_find(bat_priv, &search_claim);
 
@@ -1547,9 +1547,6 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
                goto allow;
 
-       /* in VLAN case, the mac header might not be set. */
-       skb_reset_mac_header(skb);
-
        if (batadv_bla_process_claim(bat_priv, primary_if, skb))
                goto handled;
 
@@ -1560,7 +1557,7 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                if (is_multicast_ether_addr(ethhdr->h_dest))
                        goto handled;
 
-       memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
+       ether_addr_copy(search_claim.addr, ethhdr->h_source);
        search_claim.vid = vid;
 
        claim = batadv_claim_hash_find(bat_priv, &search_claim);
index edee50411892f9ddaafd44a63163e24e79a3ae75..b25fd64d727b0d6e8227671f860b133095df5100 100644 (file)
@@ -277,7 +277,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
        /* if this entry is already known, just update it */
        if (dat_entry) {
                if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
-                       memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
+                       ether_addr_copy(dat_entry->mac_addr, mac_addr);
                dat_entry->last_update = jiffies;
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
                           "Entry updated: %pI4 %pM (vid: %d)\n",
@@ -292,7 +292,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
 
        dat_entry->ip = ip;
        dat_entry->vid = vid;
-       memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
+       ether_addr_copy(dat_entry->mac_addr, mac_addr);
        dat_entry->last_update = jiffies;
        atomic_set(&dat_entry->refcount, 2);
 
@@ -1027,6 +1027,11 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        if (!skb_new)
                goto out;
 
+       /* the rest of the TX path assumes that the mac_header offset pointing
+        * to the inner Ethernet header has been set, therefore reset it now.
+        */
+       skb_reset_mac_header(skb_new);
+
        if (vid & BATADV_VLAN_HAS_TAG)
                skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
                                          vid & VLAN_VID_MASK);
index ac9be9b67a252a0a8b23088e08bdbbd49b9981b0..d76e1d06c5b53019230aee93ba39730ab60987e1 100644 (file)
@@ -25,6 +25,9 @@
 
 #include <linux/if_arp.h>
 
+/**
+ * BATADV_DAT_ADDR_MAX - maximum address value in the DHT space
+ */
 #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
 
 void batadv_dat_status_update(struct net_device *net_dev);
index 88df9b1d552de529b20cb699c303ec76034dee12..bcc4bea632fa69ead6567e016f2f265840f7968b 100644 (file)
@@ -449,8 +449,8 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
        frag_header.reserved = 0;
        frag_header.no = 0;
        frag_header.total_size = htons(skb->len);
-       memcpy(frag_header.orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(frag_header.dest, orig_node->orig, ETH_ALEN);
+       ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
+       ether_addr_copy(frag_header.dest, orig_node->orig);
 
        /* Eat and send fragments from the tail of skb */
        while (skb->len > max_fragment_size) {
index 55cf2260d295b6f4002d438ed34b52382ed7eeea..c835e137423bb9ec70b98b5130d5a33c12f9b139 100644 (file)
@@ -389,8 +389,6 @@ out:
                batadv_neigh_ifinfo_free_ref(router_gw_tq);
        if (router_orig_tq)
                batadv_neigh_ifinfo_free_ref(router_orig_tq);
-
-       return;
 }
 
 /**
@@ -680,7 +678,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
        if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
                return BATADV_DHCP_NO;
 
-       ethhdr = (struct ethhdr *)skb->data;
+       ethhdr = eth_hdr(skb);
        proto = ethhdr->h_proto;
        *header_len += ETH_HLEN;
 
@@ -689,7 +687,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
                if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
                        return BATADV_DHCP_NO;
 
-               vhdr = (struct vlan_ethhdr *)skb->data;
+               vhdr = vlan_eth_hdr(skb);
                proto = vhdr->h_vlan_encapsulated_proto;
                *header_len += VLAN_HLEN;
        }
@@ -728,7 +726,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
                return BATADV_DHCP_NO;
 
        /* skb->data might have been reallocated by pskb_may_pull() */
-       ethhdr = (struct ethhdr *)skb->data;
+       ethhdr = eth_hdr(skb);
        if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
                ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
 
@@ -765,7 +763,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
                if (*p != ETH_ALEN)
                        return BATADV_DHCP_NO;
 
-               memcpy(chaddr, skb->data + chaddr_offset, ETH_ALEN);
+               ether_addr_copy(chaddr, skb->data + chaddr_offset);
        }
 
        return ret;
index abb9d6e0388be65a20e0c19d377f7bdda47ea1da..161ef8f17d2ef273615bfaad9e9c728fbe754acc 100644 (file)
@@ -158,6 +158,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_neigh_node *neigh_node = NULL;
        size_t packet_len = sizeof(struct batadv_icmp_packet);
+       uint8_t *addr;
 
        if (len < sizeof(struct batadv_icmp_header)) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -227,10 +228,10 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
                        goto dst_unreach;
 
                icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmp_header;
-               if (packet_len == sizeof(*icmp_packet_rr))
-                       memcpy(icmp_packet_rr->rr,
-                              neigh_node->if_incoming->net_dev->dev_addr,
-                              ETH_ALEN);
+               if (packet_len == sizeof(*icmp_packet_rr)) {
+                       addr = neigh_node->if_incoming->net_dev->dev_addr;
+                       ether_addr_copy(icmp_packet_rr->rr[0], addr);
+               }
 
                break;
        default:
@@ -250,7 +251,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
                goto free_skb;
        }
 
-       memcpy(icmp_header->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(icmp_header->orig, primary_if->net_dev->dev_addr);
 
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
        goto out;
index 66ae135b9f273abdea581f2fc8eaa5ecb74254f0..d1183e882167c3cd75aa842c94c612818e250d30 100644 (file)
@@ -34,6 +34,7 @@
 #include "gateway_client.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
+#include "multicast.h"
 #include "gateway_common.h"
 #include "hash.h"
 #include "bat_algo.h"
@@ -110,6 +111,9 @@ int batadv_mesh_init(struct net_device *soft_iface)
        spin_lock_init(&bat_priv->tt.last_changeset_lock);
        spin_lock_init(&bat_priv->tt.commit_lock);
        spin_lock_init(&bat_priv->gw.list_lock);
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       spin_lock_init(&bat_priv->mcast.want_lists_lock);
+#endif
        spin_lock_init(&bat_priv->tvlv.container_list_lock);
        spin_lock_init(&bat_priv->tvlv.handler_list_lock);
        spin_lock_init(&bat_priv->softif_vlan_list_lock);
@@ -117,9 +121,17 @@ int batadv_mesh_init(struct net_device *soft_iface)
        INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
        INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
        INIT_HLIST_HEAD(&bat_priv->gw.list);
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
+       INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
+       INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
+#endif
        INIT_LIST_HEAD(&bat_priv->tt.changes_list);
        INIT_LIST_HEAD(&bat_priv->tt.req_list);
        INIT_LIST_HEAD(&bat_priv->tt.roam_list);
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
+#endif
        INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
        INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
        INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
@@ -145,6 +157,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
                goto err;
 
        batadv_gw_init(bat_priv);
+       batadv_mcast_init(bat_priv);
 
        atomic_set(&bat_priv->gw.reselect, 0);
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
@@ -169,6 +182,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
        batadv_dat_free(bat_priv);
        batadv_bla_free(bat_priv);
 
+       batadv_mcast_free(bat_priv);
+
        /* Free the TT and the originator tables only after having terminated
         * all the other depending components which may use these structures for
         * their purposes.
@@ -1133,8 +1148,8 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
        unicast_tvlv_packet->reserved = 0;
        unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
        unicast_tvlv_packet->align = 0;
-       memcpy(unicast_tvlv_packet->src, src, ETH_ALEN);
-       memcpy(unicast_tvlv_packet->dst, dst, ETH_ALEN);
+       ether_addr_copy(unicast_tvlv_packet->src, src);
+       ether_addr_copy(unicast_tvlv_packet->dst, dst);
 
        tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
        tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
index 9374f1a51348514d7b5af1eb6767128bac4d744d..770dc890ceefdb712f254b378c825cbeab255742 100644 (file)
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2014.1.0"
+#define BATADV_SOURCE_VERSION "2014.2.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -176,6 +176,8 @@ enum batadv_uev_type {
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <net/sock.h>          /* struct sock */
+#include <net/addrconf.h>      /* ipv6 address stuff */
+#include <linux/ip.h>
 #include <net/rtnetlink.h>
 #include <linux/jiffies.h>
 #include <linux/seq_file.h>
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
new file mode 100644 (file)
index 0000000..8c7ca81
--- /dev/null
@@ -0,0 +1,748 @@
+/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "main.h"
+#include "multicast.h"
+#include "originator.h"
+#include "hard-interface.h"
+#include "translation-table.h"
+#include "multicast.h"
+
+/**
+ * batadv_mcast_mla_softif_get - get softif multicast listeners
+ * @dev: the device to collect multicast addresses from
+ * @mcast_list: a list to put found addresses into
+ *
+ * Collect multicast addresses of the local multicast listeners
+ * on the given soft interface, dev, in the given mcast_list.
+ *
+ * Returns -ENOMEM on memory allocation error or the number of
+ * items added to the mcast_list otherwise.
+ */
+static int batadv_mcast_mla_softif_get(struct net_device *dev,
+                                      struct hlist_head *mcast_list)
+{
+       struct netdev_hw_addr *mc_list_entry;
+       struct batadv_hw_addr *new;
+       int ret = 0;
+
+       netif_addr_lock_bh(dev);
+       netdev_for_each_mc_addr(mc_list_entry, dev) {
+               new = kmalloc(sizeof(*new), GFP_ATOMIC);
+               if (!new) {
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               ether_addr_copy(new->addr, mc_list_entry->addr);
+               hlist_add_head(&new->list, mcast_list);
+               ret++;
+       }
+       netif_addr_unlock_bh(dev);
+
+       return ret;
+}
+
+/**
+ * batadv_mcast_mla_is_duplicate - check whether an address is in a list
+ * @mcast_addr: the multicast address to check
+ * @mcast_list: the list with multicast addresses to search in
+ *
+ * Returns true if the given address is already in the given list.
+ * Otherwise returns false.
+ */
+static bool batadv_mcast_mla_is_duplicate(uint8_t *mcast_addr,
+                                         struct hlist_head *mcast_list)
+{
+       struct batadv_hw_addr *mcast_entry;
+
+       hlist_for_each_entry(mcast_entry, mcast_list, list)
+               if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
+                       return true;
+
+       return false;
+}
+
+/**
+ * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * @mcast_list: the list to free
+ *
+ * Removes and frees all items in the given mcast_list.
+ */
+static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
+{
+       struct batadv_hw_addr *mcast_entry;
+       struct hlist_node *tmp;
+
+       hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
+               hlist_del(&mcast_entry->list);
+               kfree(mcast_entry);
+       }
+}
+
+/**
+ * batadv_mcast_mla_tt_retract - clean up multicast listener announcements
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mcast_list: a list of addresses which should _not_ be removed
+ *
+ * Retracts the announcement of any multicast listener from the
+ * translation table except the ones listed in the given mcast_list.
+ *
+ * If mcast_list is NULL then all are retracted.
+ */
+static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
+                                       struct hlist_head *mcast_list)
+{
+       struct batadv_hw_addr *mcast_entry;
+       struct hlist_node *tmp;
+
+       hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
+                                 list) {
+               if (mcast_list &&
+                   batadv_mcast_mla_is_duplicate(mcast_entry->addr,
+                                                 mcast_list))
+                       continue;
+
+               batadv_tt_local_remove(bat_priv, mcast_entry->addr,
+                                      BATADV_NO_FLAGS,
+                                      "mcast TT outdated", false);
+
+               hlist_del(&mcast_entry->list);
+               kfree(mcast_entry);
+       }
+}
+
+/**
+ * batadv_mcast_mla_tt_add - add multicast listener announcements
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mcast_list: a list of addresses which are going to get added
+ *
+ * Adds multicast listener announcements from the given mcast_list to the
+ * translation table if they have not been added yet.
+ */
+static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
+                                   struct hlist_head *mcast_list)
+{
+       struct batadv_hw_addr *mcast_entry;
+       struct hlist_node *tmp;
+
+       if (!mcast_list)
+               return;
+
+       hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
+               if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
+                                                 &bat_priv->mcast.mla_list))
+                       continue;
+
+               if (!batadv_tt_local_add(bat_priv->soft_iface,
+                                        mcast_entry->addr, BATADV_NO_FLAGS,
+                                        BATADV_NULL_IFINDEX, BATADV_NO_MARK))
+                       continue;
+
+               hlist_del(&mcast_entry->list);
+               hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
+       }
+}
+
+/**
+ * batadv_mcast_has_bridge - check whether the soft-iface is bridged
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Checks whether there is a bridge on top of our soft interface. Returns
+ * true if so, false otherwise.
+ */
+static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
+{
+       struct net_device *upper = bat_priv->soft_iface;
+
+       rcu_read_lock();
+       do {
+               upper = netdev_master_upper_dev_get_rcu(upper);
+       } while (upper && !(upper->priv_flags & IFF_EBRIDGE));
+       rcu_read_unlock();
+
+       return upper;
+}
+
+/**
+ * batadv_mcast_mla_tvlv_update - update multicast tvlv
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Updates the own multicast tvlv with our current multicast related settings,
+ * capabilities and inabilities.
+ *
+ * Returns true if the tvlv container is registered afterwards. Otherwise
+ * returns false.
+ */
+static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
+{
+       struct batadv_tvlv_mcast_data mcast_data;
+
+       mcast_data.flags = BATADV_NO_FLAGS;
+       memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
+
+       /* Avoid attaching MLAs, if there is a bridge on top of our soft
+        * interface, we don't support that yet (TODO)
+        */
+       if (batadv_mcast_has_bridge(bat_priv)) {
+               if (bat_priv->mcast.enabled) {
+                       batadv_tvlv_container_unregister(bat_priv,
+                                                        BATADV_TVLV_MCAST, 1);
+                       bat_priv->mcast.enabled = false;
+               }
+
+               return false;
+       }
+
+       if (!bat_priv->mcast.enabled ||
+           mcast_data.flags != bat_priv->mcast.flags) {
+               batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 1,
+                                              &mcast_data, sizeof(mcast_data));
+               bat_priv->mcast.flags = mcast_data.flags;
+               bat_priv->mcast.enabled = true;
+       }
+
+       return true;
+}
+
+/**
+ * batadv_mcast_mla_update - update the own MLAs
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Updates the own multicast listener announcements in the translation
+ * table as well as the own, announced multicast tvlv container.
+ */
+void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
+{
+       struct net_device *soft_iface = bat_priv->soft_iface;
+       struct hlist_head mcast_list = HLIST_HEAD_INIT;
+       int ret;
+
+       if (!batadv_mcast_mla_tvlv_update(bat_priv))
+               goto update;
+
+       ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list);
+       if (ret < 0)
+               goto out;
+
+update:
+       batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
+       batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
+
+out:
+       batadv_mcast_mla_list_free(&mcast_list);
+}
+
+/**
+ * batadv_mcast_forw_mode_check_ipv4 - check for optimized forwarding potential
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the IPv4 packet to check
+ * @is_unsnoopable: stores whether the destination is snoopable
+ *
+ * Checks whether the given IPv4 packet has the potential to be forwarded with a
+ * mode more optimal than classic flooding.
+ *
+ * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM in case of
+ * memory allocation failure.
+ */
+static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
+                                            struct sk_buff *skb,
+                                            bool *is_unsnoopable)
+{
+       struct iphdr *iphdr;
+
+       /* We might fail due to out-of-memory -> drop it */
+       if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
+               return -ENOMEM;
+
+       iphdr = ip_hdr(skb);
+
+       /* TODO: Implement Multicast Router Discovery (RFC4286),
+        * then allow scope > link local, too
+        */
+       if (!ipv4_is_local_multicast(iphdr->daddr))
+               return -EINVAL;
+
+       /* link-local multicast listeners behind a bridge are
+        * not snoopable (see RFC4541, section 2.1.2.2)
+        */
+       *is_unsnoopable = true;
+
+       return 0;
+}
+
+/**
+ * batadv_mcast_forw_mode_check_ipv6 - check for optimized forwarding potential
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the IPv6 packet to check
+ * @is_unsnoopable: stores whether the destination is snoopable
+ *
+ * Checks whether the given IPv6 packet has the potential to be forwarded with a
+ * mode more optimal than classic flooding.
+ *
+ * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM if we are out
+ * of memory.
+ */
+static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
+                                            struct sk_buff *skb,
+                                            bool *is_unsnoopable)
+{
+       struct ipv6hdr *ip6hdr;
+
+       /* We might fail due to out-of-memory -> drop it */
+       if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
+               return -ENOMEM;
+
+       ip6hdr = ipv6_hdr(skb);
+
+       /* TODO: Implement Multicast Router Discovery (RFC4286),
+        * then allow scope > link local, too
+        */
+       if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) != IPV6_ADDR_SCOPE_LINKLOCAL)
+               return -EINVAL;
+
+       /* link-local-all-nodes multicast listeners behind a bridge are
+        * not snoopable (see RFC4541, section 3, paragraph 3)
+        */
+       if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
+               *is_unsnoopable = true;
+
+       return 0;
+}
+
+/**
+ * batadv_mcast_forw_mode_check - check for optimized forwarding potential
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast frame to check
+ * @is_unsnoopable: stores whether the destination is snoopable
+ *
+ * Checks whether the given multicast ethernet frame has the potential to be
+ * forwarded with a mode more optimal than classic flooding.
+ *
+ * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM if we are out
+ * of memory.
+ */
+static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
+                                       struct sk_buff *skb,
+                                       bool *is_unsnoopable)
+{
+       struct ethhdr *ethhdr = eth_hdr(skb);
+
+       if (!atomic_read(&bat_priv->multicast_mode))
+               return -EINVAL;
+
+       if (atomic_read(&bat_priv->mcast.num_disabled))
+               return -EINVAL;
+
+       switch (ntohs(ethhdr->h_proto)) {
+       case ETH_P_IP:
+               return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
+                                                        is_unsnoopable);
+       case ETH_P_IPV6:
+               return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
+                                                        is_unsnoopable);
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * batadv_mcast_want_all_ip_count - count nodes with unspecific mcast interest
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: ethernet header of a packet
+ *
+ * Returns the number of nodes which want all IPv4 multicast traffic if the
+ * given ethhdr is from an IPv4 packet or the number of nodes which want all
+ * IPv6 traffic if it matches an IPv6 packet.
+ */
+static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
+                                              struct ethhdr *ethhdr)
+{
+       switch (ntohs(ethhdr->h_proto)) {
+       case ETH_P_IP:
+               return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
+       case ETH_P_IPV6:
+               return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
+       default:
+               /* we shouldn't be here... */
+               return 0;
+       }
+}
+
+/**
+ * batadv_mcast_forw_tt_node_get - get a multicast tt node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: the ether header containing the multicast destination
+ *
+ * Returns an orig_node matching the multicast address provided by ethhdr
+ * via a translation table lookup. This increases the returned nodes refcount.
+ */
+static struct batadv_orig_node *
+batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
+                             struct ethhdr *ethhdr)
+{
+       return batadv_transtable_search(bat_priv, ethhdr->h_source,
+                                       ethhdr->h_dest, BATADV_NO_FLAGS);
+}
+
+/**
+ * batadv_mcast_want_forw_ipv4_node_get - get a node with an ipv4 flag
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
+ * increases its refcount.
+ */
+static struct batadv_orig_node *
+batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
+{
+       struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp_orig_node,
+                                &bat_priv->mcast.want_all_ipv4_list,
+                                mcast_want_all_ipv4_node) {
+               if (!atomic_inc_not_zero(&orig_node->refcount))
+                       continue;
+
+               orig_node = tmp_orig_node;
+               break;
+       }
+       rcu_read_unlock();
+
+       return orig_node;
+}
+
+/**
+ * batadv_mcast_want_forw_ipv6_node_get - get a node with an ipv6 flag
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
+ * and increases its refcount.
+ */
+static struct batadv_orig_node *
+batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
+{
+       struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp_orig_node,
+                                &bat_priv->mcast.want_all_ipv6_list,
+                                mcast_want_all_ipv6_node) {
+               if (!atomic_inc_not_zero(&orig_node->refcount))
+                       continue;
+
+               orig_node = tmp_orig_node;
+               break;
+       }
+       rcu_read_unlock();
+
+       return orig_node;
+}
+
+/**
+ * batadv_mcast_want_forw_ip_node_get - get a node with an ipv4/ipv6 flag
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ethhdr: an ethernet header to determine the protocol family from
+ *
+ * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
+ * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and
+ * increases its refcount.
+ */
+static struct batadv_orig_node *
+batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
+                             struct ethhdr *ethhdr)
+{
+       switch (ntohs(ethhdr->h_proto)) {
+       case ETH_P_IP:
+               return batadv_mcast_forw_ipv4_node_get(bat_priv);
+       case ETH_P_IPV6:
+               return batadv_mcast_forw_ipv6_node_get(bat_priv);
+       default:
+               /* we shouldn't be here... */
+               return NULL;
+       }
+}
+
+/**
+ * batadv_mcast_want_forw_unsnoop_node_get - get a node with an unsnoopable flag
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
+ * set and increases its refcount.
+ */
+static struct batadv_orig_node *
+batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
+{
+       struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tmp_orig_node,
+                                &bat_priv->mcast.want_all_unsnoopables_list,
+                                mcast_want_all_unsnoopables_node) {
+               if (!atomic_inc_not_zero(&orig_node->refcount))
+                       continue;
+
+               orig_node = tmp_orig_node;
+               break;
+       }
+       rcu_read_unlock();
+
+       return orig_node;
+}
+
+/**
+ * batadv_mcast_forw_mode - check on how to forward a multicast packet
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: The multicast packet to check
+ * @orig: an originator to be set to forward the skb to
+ *
+ * Returns the forwarding mode as enum batadv_forw_mode and in case of
+ * BATADV_FORW_SINGLE set the orig to the single originator the skb
+ * should be forwarded to.
+ */
+enum batadv_forw_mode
+batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                      struct batadv_orig_node **orig)
+{
+       int ret, tt_count, ip_count, unsnoop_count, total_count;
+       bool is_unsnoopable = false;
+       struct ethhdr *ethhdr;
+
+       ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable);
+       if (ret == -ENOMEM)
+               return BATADV_FORW_NONE;
+       else if (ret < 0)
+               return BATADV_FORW_ALL;
+
+       ethhdr = eth_hdr(skb);
+
+       tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
+                                              BATADV_NO_FLAGS);
+       ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
+       unsnoop_count = !is_unsnoopable ? 0 :
+                       atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
+
+       total_count = tt_count + ip_count + unsnoop_count;
+
+       switch (total_count) {
+       case 1:
+               if (tt_count)
+                       *orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
+               else if (ip_count)
+                       *orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
+               else if (unsnoop_count)
+                       *orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
+
+               if (*orig)
+                       return BATADV_FORW_SINGLE;
+
+               /* fall through */
+       case 0:
+               return BATADV_FORW_NONE;
+       default:
+               return BATADV_FORW_ALL;
+       }
+}
+
+/**
+ * batadv_mcast_want_unsnoop_update - update unsnoop counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
+ * orig, has toggled then this method updates counter and list accordingly.
+ */
+static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
+                                            struct batadv_orig_node *orig,
+                                            uint8_t mcast_flags)
+{
+       /* switched from flag unset to set */
+       if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
+           !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
+               atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
+
+               spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+               hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
+                                  &bat_priv->mcast.want_all_unsnoopables_list);
+               spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+       /* switched from flag set to unset */
+       } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
+                  orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
+               atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
+
+               spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+               hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
+               spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+       }
+}
+
+/**
+ * batadv_mcast_want_ipv4_update - update want-all-ipv4 counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
+ * toggled then this method updates counter and list accordingly.
+ */
+static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t mcast_flags)
+{
+       /* switched from flag unset to set */
+       if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
+           !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
+               atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
+
+               spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+               hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
+                                  &bat_priv->mcast.want_all_ipv4_list);
+               spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+       /* switched from flag set to unset */
+       } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
+                  orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
+               atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
+
+               spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+               hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
+               spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+       }
+}
+
+/**
+ * batadv_mcast_want_ipv6_update - update want-all-ipv6 counter and list
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node which multicast state might have changed of
+ * @mcast_flags: flags indicating the new multicast state
+ *
+ * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
+ * toggled then this method updates counter and list accordingly.
+ */
+static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t mcast_flags)
+{
+       /* switched from flag unset to set */
+       if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
+           !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
+               atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
+
+               spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+               hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
+                                  &bat_priv->mcast.want_all_ipv6_list);
+               spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+       /* switched from flag set to unset */
+       } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
+                  orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
+               atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
+
+               spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+               hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
+               spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+       }
+}
+
+/**
+ * batadv_mcast_tvlv_ogm_handler_v1 - process incoming multicast tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the multicast data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                            struct batadv_orig_node *orig,
+                                            uint8_t flags,
+                                            void *tvlv_value,
+                                            uint16_t tvlv_value_len)
+{
+       bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+       uint8_t mcast_flags = BATADV_NO_FLAGS;
+       bool orig_initialized;
+
+       orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
+
+       /* If mcast support is turned on decrease the disabled mcast node
+        * counter only if we had increased it for this node before. If this
+        * is a completely new orig_node no need to decrease the counter.
+        */
+       if (orig_mcast_enabled &&
+           !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
+               if (orig_initialized)
+                       atomic_dec(&bat_priv->mcast.num_disabled);
+               orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
+       /* If mcast support is being switched off increase the disabled
+        * mcast node counter.
+        */
+       } else if (!orig_mcast_enabled &&
+                  orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) {
+               atomic_inc(&bat_priv->mcast.num_disabled);
+               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
+       }
+
+       orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
+
+       if (orig_mcast_enabled && tvlv_value &&
+           (tvlv_value_len >= sizeof(mcast_flags)))
+               mcast_flags = *(uint8_t *)tvlv_value;
+
+       batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
+       batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
+       batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
+
+       orig->mcast_flags = mcast_flags;
+}
+
+/**
+ * batadv_mcast_init - initialize the multicast optimizations structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_mcast_init(struct batadv_priv *bat_priv)
+{
+       batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler_v1,
+                                    NULL, BATADV_TVLV_MCAST, 1,
+                                    BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+}
+
+/**
+ * batadv_mcast_free - free the multicast optimizations structures
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_mcast_free(struct batadv_priv *bat_priv)
+{
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 1);
+
+       batadv_mcast_mla_tt_retract(bat_priv, NULL);
+}
+
+/**
+ * batadv_mcast_purge_orig - reset originator global mcast state modifications
+ * @orig: the originator which is going to get purged
+ */
+void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
+{
+       struct batadv_priv *bat_priv = orig->bat_priv;
+
+       if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST))
+               atomic_dec(&bat_priv->mcast.num_disabled);
+
+       batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
+       batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
+       batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
+}
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
new file mode 100644 (file)
index 0000000..73b5d45
--- /dev/null
@@ -0,0 +1,80 @@
+/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+ *
+ * Linus Lüssing
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _NET_BATMAN_ADV_MULTICAST_H_
+#define _NET_BATMAN_ADV_MULTICAST_H_
+
+/**
+ * batadv_forw_mode - the way a packet should be forwarded as
+ * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
+ *  flooding)
+ * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
+ *  BATMAN unicast routing protocol)
+ * @BATADV_FORW_NONE: don't forward, drop it
+ */
+enum batadv_forw_mode {
+       BATADV_FORW_ALL,
+       BATADV_FORW_SINGLE,
+       BATADV_FORW_NONE,
+};
+
+#ifdef CONFIG_BATMAN_ADV_MCAST
+
+void batadv_mcast_mla_update(struct batadv_priv *bat_priv);
+
+enum batadv_forw_mode
+batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                      struct batadv_orig_node **mcast_single_orig);
+
+void batadv_mcast_init(struct batadv_priv *bat_priv);
+
+void batadv_mcast_free(struct batadv_priv *bat_priv);
+
+void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
+
+#else
+
+static inline void batadv_mcast_mla_update(struct batadv_priv *bat_priv)
+{
+       return;
+}
+
+static inline enum batadv_forw_mode
+batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                      struct batadv_orig_node **mcast_single_orig)
+{
+       return BATADV_FORW_ALL;
+}
+
+static inline int batadv_mcast_init(struct batadv_priv *bat_priv)
+{
+       return 0;
+}
+
+static inline void batadv_mcast_free(struct batadv_priv *bat_priv)
+{
+       return;
+}
+
+static inline void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node)
+{
+       return;
+}
+
+#endif /* CONFIG_BATMAN_ADV_MCAST */
+
+#endif /* _NET_BATMAN_ADV_MULTICAST_H_ */
index f1b604d88dc3c2124c86083f36e5bafaad50b9bc..a9546fe541ebb0ff8905fd3fe82d48e48302ce9e 100644 (file)
@@ -819,7 +819,7 @@ static struct batadv_nc_node
 
        /* Initialize nc_node */
        INIT_LIST_HEAD(&nc_node->list);
-       memcpy(nc_node->addr, orig_node->orig, ETH_ALEN);
+       ether_addr_copy(nc_node->addr, orig_node->orig);
        nc_node->orig_node = orig_neigh_node;
        atomic_set(&nc_node->refcount, 2);
 
@@ -941,8 +941,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
        spin_lock_init(&nc_path->packet_list_lock);
        atomic_set(&nc_path->refcount, 2);
        nc_path->last_valid = jiffies;
-       memcpy(nc_path->next_hop, dst, ETH_ALEN);
-       memcpy(nc_path->prev_hop, src, ETH_ALEN);
+       ether_addr_copy(nc_path->next_hop, dst);
+       ether_addr_copy(nc_path->prev_hop, src);
 
        batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_path %pM -> %pM\n",
                   nc_path->prev_hop,
@@ -1114,15 +1114,15 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
        coded_packet->ttl = packet1->ttl;
 
        /* Info about first unicast packet */
-       memcpy(coded_packet->first_source, first_source, ETH_ALEN);
-       memcpy(coded_packet->first_orig_dest, packet1->dest, ETH_ALEN);
+       ether_addr_copy(coded_packet->first_source, first_source);
+       ether_addr_copy(coded_packet->first_orig_dest, packet1->dest);
        coded_packet->first_crc = packet_id1;
        coded_packet->first_ttvn = packet1->ttvn;
 
        /* Info about second unicast packet */
-       memcpy(coded_packet->second_dest, second_dest, ETH_ALEN);
-       memcpy(coded_packet->second_source, second_source, ETH_ALEN);
-       memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
+       ether_addr_copy(coded_packet->second_dest, second_dest);
+       ether_addr_copy(coded_packet->second_source, second_source);
+       ether_addr_copy(coded_packet->second_orig_dest, packet2->dest);
        coded_packet->second_crc = packet_id2;
        coded_packet->second_ttl = packet2->ttl;
        coded_packet->second_ttvn = packet2->ttvn;
@@ -1349,8 +1349,8 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 
        /* Set the mac header as if we actually sent the packet uncoded */
        ethhdr = eth_hdr(skb);
-       memcpy(ethhdr->h_source, ethhdr->h_dest, ETH_ALEN);
-       memcpy(ethhdr->h_dest, eth_dst_new, ETH_ALEN);
+       ether_addr_copy(ethhdr->h_source, ethhdr->h_dest);
+       ether_addr_copy(ethhdr->h_dest, eth_dst_new);
 
        /* Set data pointer to MAC header to mimic packets from our tx path */
        skb_push(skb, ETH_HLEN);
@@ -1636,7 +1636,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
 
        /* Reconstruct original mac header */
        ethhdr = eth_hdr(skb);
-       memcpy(ethhdr, &ethhdr_tmp, sizeof(*ethhdr));
+       *ethhdr = ethhdr_tmp;
 
        /* Select the correct unicast header information based on the location
         * of our mac address in the coded_packet header
@@ -1646,7 +1646,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
                 * so the Ethernet address must be copied to h_dest and
                 * pkt_type changed from PACKET_OTHERHOST to PACKET_HOST
                 */
-               memcpy(ethhdr->h_dest, coded_packet_tmp.second_dest, ETH_ALEN);
+               ether_addr_copy(ethhdr->h_dest, coded_packet_tmp.second_dest);
                skb->pkt_type = PACKET_HOST;
 
                orig_dest = coded_packet_tmp.second_orig_dest;
@@ -1682,7 +1682,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
        unicast_packet->packet_type = BATADV_UNICAST;
        unicast_packet->version = BATADV_COMPAT_VERSION;
        unicast_packet->ttl = ttl;
-       memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
+       ether_addr_copy(unicast_packet->dest, orig_dest);
        unicast_packet->ttvn = ttvn;
 
        batadv_nc_packet_free(nc_packet);
index 853941629dc15a3c60b569d315815aee4987fe08..ffd9dfbd9b0e856e35e2ac6ea594739e8feb614d 100644 (file)
@@ -27,6 +27,7 @@
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
 #include "fragmentation.h"
+#include "multicast.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -446,7 +447,7 @@ batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
        INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
        spin_lock_init(&neigh_node->ifinfo_lock);
 
-       memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
+       ether_addr_copy(neigh_node->addr, neigh_addr);
        neigh_node->if_incoming = hard_iface;
        neigh_node->orig_node = orig_node;
 
@@ -557,6 +558,8 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
        }
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
+       batadv_mcast_purge_orig(orig_node);
+
        /* Free nc_nodes */
        batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
@@ -664,15 +667,17 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
        /* extra reference for return */
        atomic_set(&orig_node->refcount, 2);
 
-       orig_node->tt_initialised = false;
        orig_node->bat_priv = bat_priv;
-       memcpy(orig_node->orig, addr, ETH_ALEN);
+       ether_addr_copy(orig_node->orig, addr);
        batadv_dat_init_orig_node_addr(orig_node);
        atomic_set(&orig_node->last_ttvn, 0);
        orig_node->tt_buff = NULL;
        orig_node->tt_buff_len = 0;
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       orig_node->mcast_flags = BATADV_NO_FLAGS;
+#endif
 
        /* create a vlan object for the "untagged" LAN */
        vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
index 0a381d1174c1fbb36a7c09173a004800d5d7d420..34e096d2dce1592dcb330daaf5b6c030de32cb93 100644 (file)
@@ -89,6 +89,19 @@ enum batadv_icmp_packettype {
        BATADV_PARAMETER_PROBLEM       = 12,
 };
 
+/**
+ * enum batadv_mcast_flags - flags for multicast capabilities and settings
+ * @BATADV_MCAST_WANT_ALL_UNSNOOPABLES: we want all packets destined for
+ *  224.0.0.0/24 or ff02::1
+ * @BATADV_MCAST_WANT_ALL_IPV4: we want all IPv4 multicast packets
+ * @BATADV_MCAST_WANT_ALL_IPV6: we want all IPv6 multicast packets
+ */
+enum batadv_mcast_flags {
+       BATADV_MCAST_WANT_ALL_UNSNOOPABLES      = BIT(0),
+       BATADV_MCAST_WANT_ALL_IPV4              = BIT(1),
+       BATADV_MCAST_WANT_ALL_IPV6              = BIT(2),
+};
+
 /* tt data subtypes */
 #define BATADV_TT_DATA_TYPE_MASK 0x0F
 
@@ -106,10 +119,30 @@ enum batadv_tt_data_flags {
        BATADV_TT_FULL_TABLE = BIT(4),
 };
 
-/* BATADV_TT_CLIENT flags.
- * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
- * BIT(15) are used for local computation only.
- * Flags from BIT(4) to BIT(7) are kept in sync with the rest of the network.
+/**
+ * enum batadv_tt_client_flags - TT client specific flags
+ * @BATADV_TT_CLIENT_DEL: the client has to be deleted from the table
+ * @BATADV_TT_CLIENT_ROAM: the client roamed to/from another node and the new
+ *  update telling its new real location has not been received/sent yet
+ * @BATADV_TT_CLIENT_WIFI: this client is connected through a wifi interface.
+ *  This information is used by the "AP Isolation" feature
+ * @BATADV_TT_CLIENT_ISOLA: this client is considered "isolated". This
+ *  information is used by the Extended Isolation feature
+ * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from the table
+ * @BATADV_TT_CLIENT_NEW: this client has been added to the local table but has
+ *  not been announced yet
+ * @BATADV_TT_CLIENT_PENDING: this client is marked for removal but it is kept
+ *  in the table for one more originator interval for consistency purposes
+ * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be part of
+ *  the network but no nnode has already announced it
+ *
+ * Bits from 0 to 7 are called _remote flags_ because they are sent on the wire.
+ * Bits from 8 to 15 are called _local flags_ because they are used for local
+ * computations only.
+ *
+ * Bits from 4 to 7 - a subset of remote flags - are ensured to be in sync with
+ * the other nodes in the network. To achieve this goal these flags are included
+ * in the TT CRC computation.
  */
 enum batadv_tt_client_flags {
        BATADV_TT_CLIENT_DEL     = BIT(0),
@@ -145,6 +178,7 @@ enum batadv_bla_claimframe {
  * @BATADV_TVLV_NC: network coding tvlv
  * @BATADV_TVLV_TT: translation table tvlv
  * @BATADV_TVLV_ROAM: roaming advertisement tvlv
+ * @BATADV_TVLV_MCAST: multicast capability tvlv
  */
 enum batadv_tvlv_type {
        BATADV_TVLV_GW          = 0x01,
@@ -152,6 +186,7 @@ enum batadv_tvlv_type {
        BATADV_TVLV_NC          = 0x03,
        BATADV_TVLV_TT          = 0x04,
        BATADV_TVLV_ROAM        = 0x05,
+       BATADV_TVLV_MCAST       = 0x06,
 };
 
 #pragma pack(2)
@@ -504,4 +539,14 @@ struct batadv_tvlv_roam_adv {
        __be16 vid;
 };
 
+/**
+ * struct batadv_tvlv_mcast_data - payload of a multicast tvlv
+ * @flags: multicast flags announced by the orig node
+ * @reserved: reserved field
+ */
+struct batadv_tvlv_mcast_data {
+       uint8_t flags;
+       uint8_t reserved[3];
+};
+
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
index a953d5b196a3825020ba306c8df42130a0d26eda..35141534938e76b545de943a7397c60ad23ebff7 100644 (file)
@@ -222,8 +222,8 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
 
                icmph = (struct batadv_icmp_header *)skb->data;
 
-               memcpy(icmph->dst, icmph->orig, ETH_ALEN);
-               memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+               ether_addr_copy(icmph->dst, icmph->orig);
+               ether_addr_copy(icmph->orig, primary_if->net_dev->dev_addr);
                icmph->msg_type = BATADV_ECHO_REPLY;
                icmph->ttl = BATADV_TTL;
 
@@ -276,9 +276,8 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
 
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
-       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-       memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
-              ETH_ALEN);
+       ether_addr_copy(icmp_packet->dst, icmp_packet->orig);
+       ether_addr_copy(icmp_packet->orig, primary_if->net_dev->dev_addr);
        icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
        icmp_packet->ttl = BATADV_TTL;
 
@@ -341,8 +340,8 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
                        goto out;
 
-               memcpy(&(icmp_packet_rr->rr[icmp_packet_rr->rr_cur]),
-                      ethhdr->h_dest, ETH_ALEN);
+               ether_addr_copy(icmp_packet_rr->rr[icmp_packet_rr->rr_cur],
+                               ethhdr->h_dest);
                icmp_packet_rr->rr_cur++;
        }
 
@@ -664,7 +663,7 @@ batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
        }
 
        /* update the packet header */
-       memcpy(unicast_packet->dest, orig_addr, ETH_ALEN);
+       ether_addr_copy(unicast_packet->dest, orig_addr);
        unicast_packet->ttvn = orig_ttvn;
 
        ret = true;
@@ -774,7 +773,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
        if (!primary_if)
                return 0;
 
-       memcpy(unicast_packet->dest, primary_if->net_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
 
        batadv_hardif_free_ref(primary_if);
 
index 843febd1e5198914a398215037cd2d926f167738..3d64ed20c393528793ca4aedd30e21284c51b7ed 100644 (file)
@@ -27,6 +27,7 @@
 #include "originator.h"
 #include "network-coding.h"
 #include "fragmentation.h"
+#include "multicast.h"
 
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
@@ -59,8 +60,8 @@ int batadv_send_skb_packet(struct sk_buff *skb,
        skb_reset_mac_header(skb);
 
        ethhdr = eth_hdr(skb);
-       memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
-       memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
+       ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
+       ether_addr_copy(ethhdr->h_dest, dst_addr);
        ethhdr->h_proto = htons(ETH_P_BATMAN);
 
        skb_set_network_header(skb, ETH_HLEN);
@@ -165,7 +166,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
        /* set unicast ttl */
        unicast_packet->ttl = BATADV_TTL;
        /* copy the destination for faster routing */
-       memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+       ether_addr_copy(unicast_packet->dest, orig_node->orig);
        /* set the destination tt version number */
        unicast_packet->ttvn = ttvn;
 
@@ -220,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
 
        uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
        uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
-       memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
        uc_4addr_packet->subtype = packet_subtype;
        uc_4addr_packet->reserved = 0;
 
@@ -248,15 +249,15 @@ out:
  *
  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
-static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
-                                  struct sk_buff *skb, int packet_type,
-                                  int packet_subtype,
-                                  struct batadv_orig_node *orig_node,
-                                  unsigned short vid)
+int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+                           struct sk_buff *skb, int packet_type,
+                           int packet_subtype,
+                           struct batadv_orig_node *orig_node,
+                           unsigned short vid)
 {
        struct ethhdr *ethhdr;
        struct batadv_unicast_packet *unicast_packet;
-       int ret = NET_XMIT_DROP, hdr_size;
+       int ret = NET_XMIT_DROP;
 
        if (!orig_node)
                goto out;
@@ -265,16 +266,12 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
        case BATADV_UNICAST:
                if (!batadv_send_skb_prepare_unicast(skb, orig_node))
                        goto out;
-
-               hdr_size = sizeof(*unicast_packet);
                break;
        case BATADV_UNICAST_4ADDR:
                if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
                                                           orig_node,
                                                           packet_subtype))
                        goto out;
-
-               hdr_size = sizeof(struct batadv_unicast_4addr_packet);
                break;
        default:
                /* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -283,7 +280,10 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
                goto out;
        }
 
-       ethhdr = (struct ethhdr *)(skb->data + hdr_size);
+       /* skb->data might have been reallocated by
+        * batadv_send_skb_prepare_unicast{,_4addr}()
+        */
+       ethhdr = eth_hdr(skb);
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
        /* inform the destination node that we are still missing a correct route
@@ -312,6 +312,7 @@ out:
  * @packet_type: the batman unicast packet type to use
  * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
  *  4addr packets)
+ * @dst_hint: can be used to override the destination contained in the skb
  * @vid: the vid to be used to search the translation table
  *
  * Look up the recipient node for the destination address in the ethernet
index aaddaa9661ce49752f11af09700e6f0ab7d9b387..38d0ec1833aed32363a5f1d3a1e471ec972dc3a8 100644 (file)
@@ -36,6 +36,11 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb,
                                           struct batadv_orig_node *orig_node,
                                           int packet_subtype);
+int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+                           struct sk_buff *skb, int packet_type,
+                           int packet_subtype,
+                           struct batadv_orig_node *orig_node,
+                           unsigned short vid);
 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb, int packet_type,
                                   int packet_subtype, uint8_t *dst_hint,
@@ -47,6 +52,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * batadv_send_skb_via_tt - send an skb via TT lookup
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
+ * @dst_hint: can be used to override the destination contained in the skb
  * @vid: the vid to be used to search the translation table
  *
  * Look up the recipient node for the destination address in the ethernet
@@ -68,6 +74,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the payload to send
  * @packet_subtype: the unicast 4addr packet subtype to use
+ * @dst_hint: can be used to override the destination contained in the skb
  * @vid: the vid to be used to search the translation table
  *
  * Look up the recipient node for the destination address in the ethernet
index f82c267e1886ee04cb50339d91e20fa559161737..744a59b85e15ded75f61da8a9fa5a8a87cdb7b8d 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
+#include "multicast.h"
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
 
@@ -111,8 +112,8 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       memcpy(old_addr, dev->dev_addr, ETH_ALEN);
-       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+       ether_addr_copy(old_addr, dev->dev_addr);
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
 
        /* only modify transtable if it has been initialized before */
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
@@ -170,17 +171,19 @@ static int batadv_interface_tx(struct sk_buff *skb,
        unsigned short vid;
        uint32_t seqno;
        int gw_mode;
+       enum batadv_forw_mode forw_mode;
+       struct batadv_orig_node *mcast_single_orig = NULL;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
 
        soft_iface->trans_start = jiffies;
        vid = batadv_get_vid(skb, 0);
-       ethhdr = (struct ethhdr *)skb->data;
+       ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
-               vhdr = (struct vlan_ethhdr *)skb->data;
+               vhdr = vlan_eth_hdr(skb);
 
                if (vhdr->h_vlan_encapsulated_proto != ethertype)
                        break;
@@ -194,7 +197,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
                goto dropped;
 
        /* skb->data might have been reallocated by batadv_bla_tx() */
-       ethhdr = (struct ethhdr *)skb->data;
+       ethhdr = eth_hdr(skb);
 
        /* Register the client MAC in the transtable */
        if (!is_multicast_ether_addr(ethhdr->h_source)) {
@@ -230,7 +233,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
                /* skb->data may have been modified by
                 * batadv_gw_dhcp_recipient_get()
                 */
-               ethhdr = (struct ethhdr *)skb->data;
+               ethhdr = eth_hdr(skb);
                /* if gw_mode is on, broadcast any non-DHCP message.
                 * All the DHCP packets are going to be sent as unicast
                 */
@@ -247,9 +250,19 @@ static int batadv_interface_tx(struct sk_buff *skb,
                         * directed to a DHCP server
                         */
                        goto dropped;
-       }
 
 send:
+               if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
+                       forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
+                                                          &mcast_single_orig);
+                       if (forw_mode == BATADV_FORW_NONE)
+                               goto dropped;
+
+                       if (forw_mode == BATADV_FORW_SINGLE)
+                               do_bcast = false;
+               }
+       }
+
        batadv_skb_set_priority(skb, 0);
 
        /* ethernet packet should be broadcasted */
@@ -279,8 +292,8 @@ send:
                /* hw address of first interface is the orig mac because only
                 * this mac is known throughout the mesh
                 */
-               memcpy(bcast_packet->orig,
-                      primary_if->net_dev->dev_addr, ETH_ALEN);
+               ether_addr_copy(bcast_packet->orig,
+                               primary_if->net_dev->dev_addr);
 
                /* set broadcast sequence number */
                seqno = atomic_inc_return(&bat_priv->bcast_seqno);
@@ -301,6 +314,10 @@ send:
                        if (ret)
                                goto dropped;
                        ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
+               } else if (mcast_single_orig) {
+                       ret = batadv_send_skb_unicast(bat_priv, skb,
+                                                     BATADV_UNICAST, 0,
+                                                     mcast_single_orig, vid);
                } else {
                        if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
                                                                  skb))
@@ -652,10 +669,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
        }
 
        batadv_sysfs_del_meshif(soft_iface);
-
-       rtnl_lock();
-       unregister_netdevice(soft_iface);
-       rtnl_unlock();
+       unregister_netdev(soft_iface);
 }
 
 /**
@@ -691,6 +705,14 @@ static int batadv_softif_init_late(struct net_device *dev)
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
        atomic_set(&bat_priv->distributed_arp_table, 1);
+#endif
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       bat_priv->mcast.flags = BATADV_NO_FLAGS;
+       atomic_set(&bat_priv->multicast_mode, 1);
+       atomic_set(&bat_priv->mcast.num_disabled, 0);
+       atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
+       atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
+       atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
 #endif
        atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
        atomic_set(&bat_priv->gw_sel_class, 20);
index e456bf6bb2844e61560fa961a6984d5ef83a8817..1ebb0d9e2ea547d1c263a6b09d30d81214e4ba33 100644 (file)
@@ -539,6 +539,9 @@ BATADV_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, BATADV_TQ_MAX_VALUE,
                     batadv_post_gw_reselect);
 static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
                   batadv_store_gw_bwidth);
+#ifdef CONFIG_BATMAN_ADV_MCAST
+BATADV_ATTR_SIF_BOOL(multicast_mode, S_IRUGO | S_IWUSR, NULL);
+#endif
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
 #endif
@@ -557,6 +560,9 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
        &batadv_attr_distributed_arp_table,
+#endif
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       &batadv_attr_multicast_mode,
 #endif
        &batadv_attr_fragmentation,
        &batadv_attr_routing_algo,
index 959dde721c46d057e23c494ba0b55175338466dc..d636bde72c9ace9cfbcead01353c955f17923155 100644 (file)
@@ -24,6 +24,7 @@
 #include "originator.h"
 #include "routing.h"
 #include "bridge_loop_avoidance.h"
+#include "multicast.h"
 
 #include <linux/crc32c.h>
 
@@ -96,7 +97,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
        if (!hash)
                return NULL;
 
-       memcpy(to_search.addr, addr, ETH_ALEN);
+       ether_addr_copy(to_search.addr, addr);
        to_search.vid = vid;
 
        index = batadv_choose_tt(&to_search, hash->size);
@@ -192,6 +193,31 @@ batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
        }
 }
 
+/**
+ * batadv_tt_global_hash_count - count the number of orig entries
+ * @hash: hash table containing the tt entries
+ * @addr: the mac address of the client to count entries for
+ * @vid: VLAN identifier
+ *
+ * Return the number of originators advertising the given address/data
+ * (excluding ourself).
+ */
+int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
+                               const uint8_t *addr, unsigned short vid)
+{
+       struct batadv_tt_global_entry *tt_global_entry;
+       int count;
+
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
+       if (!tt_global_entry)
+               return 0;
+
+       count = atomic_read(&tt_global_entry->orig_list_count);
+       batadv_tt_global_entry_free_ref(tt_global_entry);
+
+       return count;
+}
+
 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
 {
        struct batadv_tt_orig_list_entry *orig_entry;
@@ -333,7 +359,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
        tt_change_node->change.flags = flags;
        memset(tt_change_node->change.reserved, 0,
               sizeof(tt_change_node->change.reserved));
-       memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
+       ether_addr_copy(tt_change_node->change.addr, common->addr);
        tt_change_node->change.vid = htons(common->vid);
 
        del_op_requested = flags & BATADV_TT_CLIENT_DEL;
@@ -484,7 +510,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_tt_global_entry *tt_global;
+       struct batadv_tt_global_entry *tt_global = NULL;
        struct net_device *in_dev = NULL;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry;
@@ -497,7 +523,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                in_dev = dev_get_by_index(&init_net, ifindex);
 
        tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
-       tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
+
+       if (!is_multicast_ether_addr(addr))
+               tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
 
        if (tt_local) {
                tt_local->last_seen = jiffies;
@@ -549,7 +577,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                   addr, BATADV_PRINT_VID(vid),
                   (uint8_t)atomic_read(&bat_priv->tt.vn));
 
-       memcpy(tt_local->common.addr, addr, ETH_ALEN);
+       ether_addr_copy(tt_local->common.addr, addr);
        /* The local entry has to be marked as NEW to avoid to send it in
         * a full table response going out before the next ttvn increment
         * (consistency check)
@@ -562,8 +590,11 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        tt_local->last_seen = jiffies;
        tt_local->common.added_at = tt_local->last_seen;
 
-       /* the batman interface mac address should never be purged */
-       if (batadv_compare_eth(addr, soft_iface->dev_addr))
+       /* the batman interface mac and multicast addresses should never be
+        * purged
+        */
+       if (batadv_compare_eth(addr, soft_iface->dev_addr) ||
+           is_multicast_ether_addr(addr))
                tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
 
        hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
@@ -1219,6 +1250,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
        hlist_add_head_rcu(&orig_entry->list,
                           &tt_global->orig_list);
        spin_unlock_bh(&tt_global->list_lock);
+       atomic_inc(&tt_global->orig_list_count);
+
 out:
        if (orig_entry)
                batadv_tt_orig_list_entry_free_ref(orig_entry);
@@ -1277,7 +1310,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                        goto out;
 
                common = &tt_global_entry->common;
-               memcpy(common->addr, tt_addr, ETH_ALEN);
+               ether_addr_copy(common->addr, tt_addr);
                common->vid = vid;
 
                common->flags = flags;
@@ -1292,6 +1325,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                common->added_at = jiffies;
 
                INIT_HLIST_HEAD(&tt_global_entry->orig_list);
+               atomic_set(&tt_global_entry->orig_list_count, 0);
                spin_lock_init(&tt_global_entry->list_lock);
 
                hash_added = batadv_hash_add(bat_priv->tt.global_hash,
@@ -1361,6 +1395,11 @@ add_orig_entry:
        ret = true;
 
 out_remove:
+       /* Do not remove multicast addresses from the local hash on
+        * global additions
+        */
+       if (is_multicast_ether_addr(tt_addr))
+               goto out;
 
        /* remove address from local hash if present */
        local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
@@ -1552,6 +1591,25 @@ out:
        return 0;
 }
 
+/**
+ * batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * @tt_global_entry: the global entry to remove the orig_entry from
+ * @orig_entry: the orig entry to remove and free
+ *
+ * Remove an orig_entry from its list in the given tt_global_entry and
+ * free this orig_entry afterwards.
+ */
+static void
+batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+                               struct batadv_tt_orig_list_entry *orig_entry)
+{
+       batadv_tt_global_size_dec(orig_entry->orig_node,
+                                 tt_global_entry->common.vid);
+       atomic_dec(&tt_global_entry->orig_list_count);
+       hlist_del_rcu(&orig_entry->list);
+       batadv_tt_orig_list_entry_free_ref(orig_entry);
+}
+
 /* deletes the orig list of a tt_global_entry */
 static void
 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
@@ -1562,20 +1620,26 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
 
        spin_lock_bh(&tt_global_entry->list_lock);
        head = &tt_global_entry->orig_list;
-       hlist_for_each_entry_safe(orig_entry, safe, head, list) {
-               hlist_del_rcu(&orig_entry->list);
-               batadv_tt_global_size_dec(orig_entry->orig_node,
-                                         tt_global_entry->common.vid);
-               batadv_tt_orig_list_entry_free_ref(orig_entry);
-       }
+       hlist_for_each_entry_safe(orig_entry, safe, head, list)
+               batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
        spin_unlock_bh(&tt_global_entry->list_lock);
 }
 
+/**
+ * batadv_tt_global_del_orig_node - remove orig_node from a global tt entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_global_entry: the global entry to remove the orig_node from
+ * @orig_node: the originator announcing the client
+ * @message: message to append to the log on deletion
+ *
+ * Remove the given orig_node and its according orig_entry from the given
+ * global tt entry.
+ */
 static void
-batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
-                               struct batadv_tt_global_entry *tt_global_entry,
-                               struct batadv_orig_node *orig_node,
-                               const char *message)
+batadv_tt_global_del_orig_node(struct batadv_priv *bat_priv,
+                              struct batadv_tt_global_entry *tt_global_entry,
+                              struct batadv_orig_node *orig_node,
+                              const char *message)
 {
        struct hlist_head *head;
        struct hlist_node *safe;
@@ -1592,10 +1656,8 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
                                   orig_node->orig,
                                   tt_global_entry->common.addr,
                                   BATADV_PRINT_VID(vid), message);
-                       hlist_del_rcu(&orig_entry->list);
-                       batadv_tt_global_size_dec(orig_node,
-                                                 tt_global_entry->common.vid);
-                       batadv_tt_orig_list_entry_free_ref(orig_entry);
+                       batadv_tt_global_del_orig_entry(tt_global_entry,
+                                                       orig_entry);
                }
        }
        spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1637,8 +1699,8 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
                /* there is another entry, we can simply delete this
                 * one and can still use the other one.
                 */
-               batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
-                                               orig_node, message);
+               batadv_tt_global_del_orig_node(bat_priv, tt_global_entry,
+                                              orig_node, message);
 }
 
 /**
@@ -1664,8 +1726,8 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                goto out;
 
        if (!roaming) {
-               batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
-                                               orig_node, message);
+               batadv_tt_global_del_orig_node(bat_priv, tt_global_entry,
+                                              orig_node, message);
 
                if (hlist_empty(&tt_global_entry->orig_list))
                        batadv_tt_global_free(bat_priv, tt_global_entry,
@@ -1748,8 +1810,8 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                                                 struct batadv_tt_global_entry,
                                                 common);
 
-                       batadv_tt_global_del_orig_entry(bat_priv, tt_global,
-                                                       orig_node, message);
+                       batadv_tt_global_del_orig_node(bat_priv, tt_global,
+                                                      orig_node, message);
 
                        if (hlist_empty(&tt_global->orig_list)) {
                                vid = tt_global->common.vid;
@@ -1763,7 +1825,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                }
                spin_unlock_bh(list_lock);
        }
-       orig_node->tt_initialised = false;
+       orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
 }
 
 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
@@ -2160,7 +2222,7 @@ batadv_new_tt_req_node(struct batadv_priv *bat_priv,
        if (!tt_req_node)
                goto unlock;
 
-       memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
+       ether_addr_copy(tt_req_node->addr, orig_node->orig);
        tt_req_node->issued_at = jiffies;
 
        list_add(&tt_req_node->list, &bat_priv->tt.req_list);
@@ -2240,8 +2302,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
                        if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
                                continue;
 
-                       memcpy(tt_change->addr, tt_common_entry->addr,
-                              ETH_ALEN);
+                       ether_addr_copy(tt_change->addr, tt_common_entry->addr);
                        tt_change->flags = tt_common_entry->flags;
                        tt_change->vid = htons(tt_common_entry->vid);
                        memset(tt_change->reserved, 0,
@@ -2724,7 +2785,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
                                return;
                }
        }
-       orig_node->tt_initialised = true;
+       orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
@@ -2932,7 +2993,7 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
                tt_roam_node->first_time = jiffies;
                atomic_set(&tt_roam_node->counter,
                           BATADV_ROAMING_MAX_COUNT - 1);
-               memcpy(tt_roam_node->addr, client, ETH_ALEN);
+               ether_addr_copy(tt_roam_node->addr, client);
 
                list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
                ret = true;
@@ -3121,6 +3182,9 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
  */
 static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 {
+       /* Update multicast addresses in local translation table */
+       batadv_mcast_mla_update(bat_priv);
+
        if (atomic_read(&bat_priv->tt.local_changes) < 1) {
                if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
                        batadv_tt_tvlv_container_update(bat_priv);
@@ -3211,13 +3275,15 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
        uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
        struct batadv_tvlv_tt_vlan_data *tt_vlan;
        bool full_table = true;
+       bool has_tt_init;
 
        tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
+       has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
+
        /* orig table not initialised AND first diff is in the OGM OR the ttvn
         * increased by one -> we can apply the attached changes
         */
-       if ((!orig_node->tt_initialised && ttvn == 1) ||
-           ttvn - orig_ttvn == 1) {
+       if ((!has_tt_init && ttvn == 1) || ttvn - orig_ttvn == 1) {
                /* the OGM could not contain the changes due to their size or
                 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
                 * times.
@@ -3257,7 +3323,7 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                /* if we missed more than one change or our tables are not
                 * in sync anymore -> request fresh tt data
                 */
-               if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
+               if (!has_tt_init || ttvn != orig_ttvn ||
                    !batadv_tt_global_check_crc(orig_node, tt_vlan,
                                                tt_num_vlan)) {
 request_table:
index 20a1d7861ded99ecb18cace18af3b85f06fbe018..ad84d7b89e399930132ac2537d66222eb4712119 100644 (file)
@@ -29,6 +29,8 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
                               int32_t match_vid, const char *message);
+int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
+                               const uint8_t *addr, unsigned short vid);
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
                                                  const uint8_t *src,
                                                  const uint8_t *addr,
index 78370ab31f9c2f7db1793ecedb0735eb4d8fa087..34891a56773f09ebcccab01fe3191b1a56651aed 100644 (file)
@@ -24,8 +24,9 @@
 
 #ifdef CONFIG_BATMAN_ADV_DAT
 
-/* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
- * BATADV_DAT_ADDR_MAX is changed as well.
+/**
+ * batadv_dat_addr_t - it is the type used for all DHT addresses. If it is
+ *  changed, BATADV_DAT_ADDR_MAX is changed as well.
  *
  * *Please be careful: batadv_dat_addr_t must be UNSIGNED*
  */
@@ -163,7 +164,7 @@ struct batadv_vlan_tt {
 };
 
 /**
- * batadv_orig_node_vlan - VLAN specific data per orig_node
+ * struct batadv_orig_node_vlan - VLAN specific data per orig_node
  * @vid: the VLAN identifier
  * @tt: VLAN specific TT attributes
  * @list: list node for orig_node::vlan_list
@@ -204,14 +205,18 @@ struct batadv_orig_bat_iv {
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @mcast_flags: multicast flags announced by the orig node
+ * @mcast_want_all_unsnoop_node: a list node for the
+ *  mcast.want_all_unsnoopables list
+ * @mcast_want_all_ipv4_node: a list node for the mcast.want_all_ipv4 list
+ * @mcast_want_all_ipv6_node: a list node for the mcast.want_all_ipv6 list
  * @capabilities: announced capabilities of this originator
+ * @capa_initialized: bitfield to remember whether a capability was initialized
  * @last_ttvn: last seen translation table version number
  * @tt_buff: last tt changeset this node received from the orig node
  * @tt_buff_len: length of the last tt changeset this node received from the
  *  orig node
  * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_initialised: bool keeping track of whether or not this node have received
- *  any translation table information from the orig node yet
  * @tt_lock: prevents from updating the table while reading it. Table update is
  *  made up by two operations (data structure update and metdata -CRC/TTVN-
  *  recalculation) and they have to be executed atomically in order to avoid
@@ -247,12 +252,18 @@ struct batadv_orig_node {
 #endif
        unsigned long last_seen;
        unsigned long bcast_seqno_reset;
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       uint8_t mcast_flags;
+       struct hlist_node mcast_want_all_unsnoopables_node;
+       struct hlist_node mcast_want_all_ipv4_node;
+       struct hlist_node mcast_want_all_ipv6_node;
+#endif
        uint8_t capabilities;
+       uint8_t capa_initialized;
        atomic_t last_ttvn;
        unsigned char *tt_buff;
        int16_t tt_buff_len;
        spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-       bool tt_initialised;
        /* prevents from changing the table while reading it */
        spinlock_t tt_lock;
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
@@ -282,10 +293,15 @@ struct batadv_orig_node {
  * enum batadv_orig_capabilities - orig node capabilities
  * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
  * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
+ * @BATADV_ORIG_CAPA_HAS_TT: orig node has tt capability
+ * @BATADV_ORIG_CAPA_HAS_MCAST: orig node has some multicast capability
+ *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
        BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
        BATADV_ORIG_CAPA_HAS_NC = BIT(1),
+       BATADV_ORIG_CAPA_HAS_TT = BIT(2),
+       BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
 };
 
 /**
@@ -334,7 +350,7 @@ struct batadv_neigh_node {
 };
 
 /**
- * struct batadv_neigh_node_bat_iv - neighbor information per outgoing
+ * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
  *  interface for BATMAN IV
  * @tq_recv: ring buffer of received TQ values from this neigh node
  * @tq_index: ring buffer index
@@ -544,7 +560,7 @@ struct batadv_priv_bla {
 #endif
 
 /**
- * struct batadv_debug_log - debug logging data
+ * struct batadv_priv_debug_log - debug logging data
  * @log_buff: buffer holding the logs (ring bufer)
  * @log_start: index of next character to read
  * @log_end: index of next character to write
@@ -607,6 +623,39 @@ struct batadv_priv_dat {
 };
 #endif
 
+#ifdef CONFIG_BATMAN_ADV_MCAST
+/**
+ * struct batadv_priv_mcast - per mesh interface mcast data
+ * @mla_list: list of multicast addresses we are currently announcing via TT
+ * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable
+ *  multicast traffic
+ * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic
+ * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic
+ * @flags: the flags we have last sent in our mcast tvlv
+ * @enabled: whether the multicast tvlv is currently enabled
+ * @num_disabled: number of nodes that have no mcast tvlv
+ * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic
+ * @num_want_all_ipv4: counter for items in want_all_ipv4_list
+ * @num_want_all_ipv6: counter for items in want_all_ipv6_list
+ * @want_lists_lock: lock for protecting modifications to mcast want lists
+ *  (traversals are rcu-locked)
+ */
+struct batadv_priv_mcast {
+       struct hlist_head mla_list;
+       struct hlist_head want_all_unsnoopables_list;
+       struct hlist_head want_all_ipv4_list;
+       struct hlist_head want_all_ipv6_list;
+       uint8_t flags;
+       bool enabled;
+       atomic_t num_disabled;
+       atomic_t num_want_all_unsnoopables;
+       atomic_t num_want_all_ipv4;
+       atomic_t num_want_all_ipv6;
+       /* protects want_all_{unsnoopables,ipv4,ipv6}_list */
+       spinlock_t want_lists_lock;
+};
+#endif
+
 /**
  * struct batadv_priv_nc - per mesh interface network coding private data
  * @work: work queue callback item for cleanup
@@ -672,6 +721,8 @@ struct batadv_softif_vlan {
  *  enabled
  * @distributed_arp_table: bool indicating whether distributed ARP table is
  *  enabled
+ * @multicast_mode: Enable or disable multicast optimizations on this node's
+ *  sender/originating side
  * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes)
  * @gw_sel_class: gateway selection class (applies if gw_mode client)
  * @orig_interval: OGM broadcast interval in milliseconds
@@ -702,6 +753,7 @@ struct batadv_softif_vlan {
  * @tt: translation table data
  * @tvlv: type-version-length-value data
  * @dat: distributed arp table data
+ * @mcast: multicast data
  * @network_coding: bool indicating whether network coding is enabled
  * @batadv_priv_nc: network coding data
  */
@@ -720,6 +772,9 @@ struct batadv_priv {
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
        atomic_t distributed_arp_table;
+#endif
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       atomic_t multicast_mode;
 #endif
        atomic_t gw_mode;
        atomic_t gw_sel_class;
@@ -759,6 +814,9 @@ struct batadv_priv {
 #ifdef CONFIG_BATMAN_ADV_DAT
        struct batadv_priv_dat dat;
 #endif
+#ifdef CONFIG_BATMAN_ADV_MCAST
+       struct batadv_priv_mcast mcast;
+#endif
 #ifdef CONFIG_BATMAN_ADV_NC
        atomic_t network_coding;
        struct batadv_priv_nc nc;
@@ -881,12 +939,14 @@ struct batadv_tt_local_entry {
  * struct batadv_tt_global_entry - translation table global entry data
  * @common: general translation table data
  * @orig_list: list of orig nodes announcing this non-mesh client
+ * @orig_list_count: number of items in the orig_list
  * @list_lock: lock protecting orig_list
  * @roam_at: time at which TT_GLOBAL_ROAM was set
  */
 struct batadv_tt_global_entry {
        struct batadv_tt_common_entry common;
        struct hlist_head orig_list;
+       atomic_t orig_list_count;
        spinlock_t list_lock;   /* protects orig_list */
        unsigned long roam_at;
 };
@@ -1004,8 +1064,8 @@ struct batadv_nc_packet {
 };
 
 /**
- * batadv_skb_cb - control buffer structure used to store private data relevant
- *  to batman-adv in the skb->cb buffer in skbs.
+ * struct batadv_skb_cb - control buffer structure used to store private data
+ *  relevant to batman-adv in the skb->cb buffer in skbs.
  * @decoded: Marks a skb as decoded, which is checked when searching for coding
  *  opportunities in network-coding.c
  */
@@ -1115,6 +1175,16 @@ struct batadv_dat_entry {
        struct rcu_head rcu;
 };
 
+/**
+ * struct batadv_hw_addr - a list entry for a MAC address
+ * @list: list node for the linking of entries
+ * @addr: the MAC address of this list entry
+ */
+struct batadv_hw_addr {
+       struct hlist_node list;
+       unsigned char addr[ETH_ALEN];
+};
+
 /**
  * struct batadv_dat_candidate - candidate destination for DAT operations
  * @type: the type of the selected candidate. It can one of the following:
index adb3ea04adaaa73d3faf4db783f096e91e93321e..73492b91105ac0aba10aec738415128d0582bfc5 100644 (file)
@@ -27,7 +27,7 @@
 
 #include "6lowpan.h"
 
-#include "../ieee802154/6lowpan.h" /* for the compression support */
+#include <net/6lowpan.h> /* for the compression support */
 
 #define IFACE_NAME_TEMPLATE "bt%d"
 #define EUI64_ADDR_LEN 8
index 680eac808d74935b1d6c7c89c8294e8cf42a58e7..5d281f1eaf55d82117ab908928f2277df25f9714 100644 (file)
 #ifndef __6LOWPAN_H
 #define __6LOWPAN_H
 
+#include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <net/bluetooth/l2cap.h>
 
+#if IS_ENABLED(CONFIG_BT_6LOWPAN)
 int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb);
 int bt_6lowpan_add_conn(struct l2cap_conn *conn);
 int bt_6lowpan_del_conn(struct l2cap_conn *conn);
 int bt_6lowpan_init(void);
 void bt_6lowpan_cleanup(void);
+#else
+static int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+       return -EOPNOTSUPP;
+}
+static int bt_6lowpan_add_conn(struct l2cap_conn *conn)
+{
+       return -EOPNOTSUPP;
+}
+int bt_6lowpan_del_conn(struct l2cap_conn *conn)
+{
+       return -EOPNOTSUPP;
+}
+static int bt_6lowpan_init(void)
+{
+       return -EOPNOTSUPP;
+}
+static void bt_6lowpan_cleanup(void) { }
+#endif
 
 #endif /* __6LOWPAN_H */
index 985b56070d2608c0e24965aa0c77d7b24498509a..06ec14499ca129d2e5b8833a01b7530bc829802d 100644 (file)
@@ -6,13 +6,13 @@ menuconfig BT
        tristate "Bluetooth subsystem support"
        depends on NET && !S390
        depends on RFKILL || !RFKILL
+       select 6LOWPAN_IPHC if BT_6LOWPAN
        select CRC16
        select CRYPTO
        select CRYPTO_BLKCIPHER
        select CRYPTO_AES
        select CRYPTO_ECB
        select CRYPTO_SHA256
-       select 6LOWPAN_IPHC
        help
          Bluetooth is low-cost, low-power, short-range wireless technology.
          It was designed as a replacement for cables and other short-range
@@ -40,6 +40,12 @@ menuconfig BT
          to Bluetooth kernel modules are provided in the BlueZ packages.  For
          more information, see <http://www.bluez.org/>.
 
+config BT_6LOWPAN
+       bool "Bluetooth 6LoWPAN support"
+       depends on BT && IPV6
+       help
+         IPv6 compression over Bluetooth.
+
 source "net/bluetooth/rfcomm/Kconfig"
 
 source "net/bluetooth/bnep/Kconfig"
index 80cb215826e878b7f9bc306bd02f38e243bf1e4a..ca51246b1016f2496a53220256385f8072c5b360 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_BT_HIDP) += hidp/
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
        hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
-       a2mp.o amp.o 6lowpan.o
+       a2mp.o amp.o
+bluetooth-$(CONFIG_BT_6LOWPAN) += 6lowpan.o
 
 subdir-ccflags-y += -D__CHECK_ENDIAN__
index efcd108822c43134e3d64755e24c16410f0598d5..9514cc9e850ca9f4662cc0bedd898f07cc7f96f5 100644 (file)
@@ -162,7 +162,7 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
                return -ENOMEM;
        }
 
-       rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+       rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
        rsp->ext_feat = 0;
 
        __a2mp_add_cl(mgr, rsp->cl);
@@ -235,7 +235,7 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
                        BT_DBG("chan %p state %s", chan,
                               state_to_string(chan->state));
 
-                       if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP)
+                       if (chan->scid == L2CAP_CID_A2MP)
                                continue;
 
                        l2cap_chan_lock(chan);
@@ -649,7 +649,7 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
        if (err) {
                struct a2mp_cmd_rej rej;
 
-               rej.reason = __constant_cpu_to_le16(0);
+               rej.reason = cpu_to_le16(0);
                hdr = (void *) skb->data;
 
                BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
@@ -695,7 +695,13 @@ static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
 static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
                                              unsigned long len, int nb)
 {
-       return bt_skb_alloc(len, GFP_KERNEL);
+       struct sk_buff *skb;
+
+       skb = bt_skb_alloc(len, GFP_KERNEL);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       return skb;
 }
 
 static struct l2cap_ops a2mp_chan_ops = {
@@ -726,7 +732,11 @@ static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
 
        BT_DBG("chan %p", chan);
 
-       chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
+       chan->chan_type = L2CAP_CHAN_FIXED;
+       chan->scid = L2CAP_CID_A2MP;
+       chan->dcid = L2CAP_CID_A2MP;
+       chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
+       chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
        chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
 
        chan->ops = &a2mp_chan_ops;
index 0c5866bb49b6fc8933b01d0e5ed0c9d3f8564c63..2021c481cdb657a8011938f5e8486c4c93f0c116 100644 (file)
@@ -31,7 +31,7 @@
 #include <net/bluetooth/bluetooth.h>
 #include <linux/proc_fs.h>
 
-#define VERSION "2.18"
+#define VERSION "2.19"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO   8
index ba5366c320dacc7d4db4659aa144051baf1b035a..d958e2dca52fa5bb4d166e0073fa90d18729a73f 100644 (file)
@@ -82,7 +82,7 @@ static void hci_acl_create_connection(struct hci_conn *conn)
                        cp.pscan_rep_mode = ie->data.pscan_rep_mode;
                        cp.pscan_mode     = ie->data.pscan_mode;
                        cp.clock_offset   = ie->data.clock_offset |
-                                           __constant_cpu_to_le16(0x8000);
+                                           cpu_to_le16(0x8000);
                }
 
                memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -182,8 +182,8 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
 
        cp.handle   = cpu_to_le16(handle);
 
-       cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
-       cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
+       cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
+       cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
        cp.voice_setting  = cpu_to_le16(conn->setting);
 
        switch (conn->setting & SCO_AIRMODE_MASK) {
@@ -225,13 +225,13 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
        cp.conn_interval_max    = cpu_to_le16(max);
        cp.conn_latency         = cpu_to_le16(latency);
        cp.supervision_timeout  = cpu_to_le16(to_multiplier);
-       cp.min_ce_len           = __constant_cpu_to_le16(0x0001);
-       cp.max_ce_len           = __constant_cpu_to_le16(0x0001);
+       cp.min_ce_len           = cpu_to_le16(0x0000);
+       cp.max_ce_len           = cpu_to_le16(0x0000);
 
        hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
 }
 
-void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
+void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
                      __u8 ltk[16])
 {
        struct hci_dev *hdev = conn->hdev;
@@ -242,9 +242,9 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
        memset(&cp, 0, sizeof(cp));
 
        cp.handle = cpu_to_le16(conn->handle);
-       memcpy(cp.ltk, ltk, sizeof(cp.ltk));
+       cp.rand = rand;
        cp.ediv = ediv;
-       memcpy(cp.rand, rand, sizeof(cp.rand));
+       memcpy(cp.ltk, ltk, sizeof(cp.ltk));
 
        hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
 }
@@ -337,9 +337,9 @@ static void hci_conn_idle(struct work_struct *work)
        if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
                struct hci_cp_sniff_subrate cp;
                cp.handle             = cpu_to_le16(conn->handle);
-               cp.max_latency        = __constant_cpu_to_le16(0);
-               cp.min_remote_timeout = __constant_cpu_to_le16(0);
-               cp.min_local_timeout  = __constant_cpu_to_le16(0);
+               cp.max_latency        = cpu_to_le16(0);
+               cp.min_remote_timeout = cpu_to_le16(0);
+               cp.min_local_timeout  = cpu_to_le16(0);
                hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
        }
 
@@ -348,8 +348,8 @@ static void hci_conn_idle(struct work_struct *work)
                cp.handle       = cpu_to_le16(conn->handle);
                cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
                cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
-               cp.attempt      = __constant_cpu_to_le16(4);
-               cp.timeout      = __constant_cpu_to_le16(1);
+               cp.attempt      = cpu_to_le16(4);
+               cp.timeout      = cpu_to_le16(1);
                hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
        }
 }
@@ -363,6 +363,16 @@ static void hci_conn_auto_accept(struct work_struct *work)
                     &conn->dst);
 }
 
+static void le_conn_timeout(struct work_struct *work)
+{
+       struct hci_conn *conn = container_of(work, struct hci_conn,
+                                            le_conn_timeout.work);
+
+       BT_DBG("");
+
+       hci_le_create_connection_cancel(conn);
+}
+
 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
 {
        struct hci_conn *conn;
@@ -410,6 +420,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
        INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
        INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
        INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
+       INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
 
        atomic_set(&conn->refcnt, 0);
 
@@ -442,6 +453,8 @@ int hci_conn_del(struct hci_conn *conn)
                /* Unacked frames */
                hdev->acl_cnt += conn->sent;
        } else if (conn->type == LE_LINK) {
+               cancel_delayed_work_sync(&conn->le_conn_timeout);
+
                if (hdev->le_pkts)
                        hdev->le_cnt += conn->sent;
                else
@@ -514,6 +527,26 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 }
 EXPORT_SYMBOL(hci_get_route);
 
+/* This function requires the caller holds hdev->lock */
+void hci_le_conn_failed(struct hci_conn *conn, u8 status)
+{
+       struct hci_dev *hdev = conn->hdev;
+
+       conn->state = BT_CLOSED;
+
+       mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+                           status);
+
+       hci_proto_connect_cfm(conn, status);
+
+       hci_conn_del(conn);
+
+       /* Since we may have temporarily stopped the background scanning in
+        * favor of connection establishment, we should restart it.
+        */
+       hci_update_background_scan(hdev);
+}
+
 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
 {
        struct hci_conn *conn;
@@ -530,55 +563,55 @@ static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
        if (!conn)
                goto done;
 
-       conn->state = BT_CLOSED;
-
-       mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
-                           status);
-
-       hci_proto_connect_cfm(conn, status);
-
-       hci_conn_del(conn);
+       hci_le_conn_failed(conn, status);
 
 done:
        hci_dev_unlock(hdev);
 }
 
-static int hci_create_le_conn(struct hci_conn *conn)
+static void hci_req_add_le_create_conn(struct hci_request *req,
+                                      struct hci_conn *conn)
 {
-       struct hci_dev *hdev = conn->hdev;
        struct hci_cp_le_create_conn cp;
-       struct hci_request req;
-       int err;
-
-       hci_req_init(&req, hdev);
+       struct hci_dev *hdev = conn->hdev;
+       u8 own_addr_type;
 
        memset(&cp, 0, sizeof(cp));
+
+       /* Update random address, but set require_privacy to false so
+        * that we never connect with an unresolvable address.
+        */
+       if (hci_update_random_address(req, false, &own_addr_type))
+               return;
+
+       /* Save the address type used for this connnection attempt so we able
+        * to retrieve this information if we need it.
+        */
+       conn->src_type = own_addr_type;
+
        cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
        cp.scan_window = cpu_to_le16(hdev->le_scan_window);
        bacpy(&cp.peer_addr, &conn->dst);
        cp.peer_addr_type = conn->dst_type;
-       cp.own_address_type = conn->src_type;
-       cp.conn_interval_min = cpu_to_le16(hdev->le_conn_min_interval);
-       cp.conn_interval_max = cpu_to_le16(hdev->le_conn_max_interval);
-       cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
-       cp.min_ce_len = __constant_cpu_to_le16(0x0000);
-       cp.max_ce_len = __constant_cpu_to_le16(0x0000);
+       cp.own_address_type = own_addr_type;
+       cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
+       cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
+       cp.supervision_timeout = cpu_to_le16(0x002a);
+       cp.min_ce_len = cpu_to_le16(0x0000);
+       cp.max_ce_len = cpu_to_le16(0x0000);
 
-       hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+       hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
 
-       err = hci_req_run(&req, create_le_conn_complete);
-       if (err) {
-               hci_conn_del(conn);
-               return err;
-       }
-
-       return 0;
+       conn->state = BT_CONNECT;
 }
 
-static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
-                                   u8 dst_type, u8 sec_level, u8 auth_type)
+struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
+                               u8 dst_type, u8 sec_level, u8 auth_type)
 {
+       struct hci_conn_params *params;
        struct hci_conn *conn;
+       struct smp_irk *irk;
+       struct hci_request req;
        int err;
 
        if (test_bit(HCI_ADVERTISING, &hdev->flags))
@@ -607,35 +640,74 @@ static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
        if (conn)
                return ERR_PTR(-EBUSY);
 
+       /* When given an identity address with existing identity
+        * resolving key, the connection needs to be established
+        * to a resolvable random address.
+        *
+        * This uses the cached random resolvable address from
+        * a previous scan. When no cached address is available,
+        * try connecting to the identity address instead.
+        *
+        * Storing the resolvable random address is required here
+        * to handle connection failures. The address will later
+        * be resolved back into the original identity address
+        * from the connect request.
+        */
+       irk = hci_find_irk_by_addr(hdev, dst, dst_type);
+       if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
+               dst = &irk->rpa;
+               dst_type = ADDR_LE_DEV_RANDOM;
+       }
+
        conn = hci_conn_add(hdev, LE_LINK, dst);
        if (!conn)
                return ERR_PTR(-ENOMEM);
 
-       if (dst_type == BDADDR_LE_PUBLIC)
-               conn->dst_type = ADDR_LE_DEV_PUBLIC;
-       else
-               conn->dst_type = ADDR_LE_DEV_RANDOM;
-
-       conn->src_type = hdev->own_addr_type;
+       conn->dst_type = dst_type;
 
-       conn->state = BT_CONNECT;
        conn->out = true;
        conn->link_mode |= HCI_LM_MASTER;
        conn->sec_level = BT_SECURITY_LOW;
        conn->pending_sec_level = sec_level;
        conn->auth_type = auth_type;
 
-       err = hci_create_le_conn(conn);
-       if (err)
+       params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+       if (params) {
+               conn->le_conn_min_interval = params->conn_min_interval;
+               conn->le_conn_max_interval = params->conn_max_interval;
+       } else {
+               conn->le_conn_min_interval = hdev->le_conn_min_interval;
+               conn->le_conn_max_interval = hdev->le_conn_max_interval;
+       }
+
+       hci_req_init(&req, hdev);
+
+       /* If controller is scanning, we stop it since some controllers are
+        * not able to scan and connect at the same time. Also set the
+        * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
+        * handler for scan disabling knows to set the correct discovery
+        * state.
+        */
+       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+               hci_req_add_le_scan_disable(&req);
+               set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
+       }
+
+       hci_req_add_le_create_conn(&req, conn);
+
+       err = hci_req_run(&req, create_le_conn_complete);
+       if (err) {
+               hci_conn_del(conn);
                return ERR_PTR(err);
+       }
 
 done:
        hci_conn_hold(conn);
        return conn;
 }
 
-static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
-                                               u8 sec_level, u8 auth_type)
+struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
+                                u8 sec_level, u8 auth_type)
 {
        struct hci_conn *acl;
 
@@ -704,27 +776,22 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
        return sco;
 }
 
-/* Create SCO, ACL or LE connection. */
-struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
-                            __u8 dst_type, __u8 sec_level, __u8 auth_type)
-{
-       BT_DBG("%s dst %pMR type 0x%x", hdev->name, dst, type);
-
-       switch (type) {
-       case LE_LINK:
-               return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
-       case ACL_LINK:
-               return hci_connect_acl(hdev, dst, sec_level, auth_type);
-       }
-
-       return ERR_PTR(-EINVAL);
-}
-
 /* Check link security requirement */
 int hci_conn_check_link_mode(struct hci_conn *conn)
 {
        BT_DBG("hcon %p", conn);
 
+       /* In Secure Connections Only mode, it is required that Secure
+        * Connections is used and the link is encrypted with AES-CCM
+        * using a P-256 authenticated combination key.
+        */
+       if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
+               if (!hci_conn_sc_enabled(conn) ||
+                   !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
+                   conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
+                       return 0;
+       }
+
        if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
                return 0;
 
@@ -800,14 +867,23 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
        if (!(conn->link_mode & HCI_LM_AUTH))
                goto auth;
 
-       /* An authenticated combination key has sufficient security for any
-          security level. */
-       if (conn->key_type == HCI_LK_AUTH_COMBINATION)
+       /* An authenticated FIPS approved combination key has sufficient
+        * security for security level 4. */
+       if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
+           sec_level == BT_SECURITY_FIPS)
+               goto encrypt;
+
+       /* An authenticated combination key has sufficient security for
+          security level 3. */
+       if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
+            conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
+           sec_level == BT_SECURITY_HIGH)
                goto encrypt;
 
        /* An unauthenticated combination key has sufficient security for
           security level 1 and 2. */
-       if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
+       if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
+            conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
            (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
                goto encrypt;
 
@@ -816,7 +892,8 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
           is generated using maximum PIN code length (16).
           For pre 2.1 units. */
        if (conn->key_type == HCI_LK_COMBINATION &&
-           (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
+           (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
+            conn->pin_length == 16))
                goto encrypt;
 
 auth:
@@ -840,13 +917,17 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
 {
        BT_DBG("hcon %p", conn);
 
-       if (sec_level != BT_SECURITY_HIGH)
-               return 1; /* Accept if non-secure is required */
+       /* Accept if non-secure or higher security level is required */
+       if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
+               return 1;
 
-       if (conn->sec_level == BT_SECURITY_HIGH)
+       /* Accept if secure or higher security level is already present */
+       if (conn->sec_level == BT_SECURITY_HIGH ||
+           conn->sec_level == BT_SECURITY_FIPS)
                return 1;
 
-       return 0; /* Reject not secure link */
+       /* Reject not secure link */
+       return 0;
 }
 EXPORT_SYMBOL(hci_conn_check_secure);
 
index 5e8663c194c18e3c29ab221efe667dd311944294..1c6ffaa8902f5e9fe32a86f2a19cf9073d6f0dae 100644 (file)
 #include <linux/idr.h>
 #include <linux/rfkill.h>
 #include <linux/debugfs.h>
+#include <linux/crypto.h>
 #include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
+#include "smp.h"
+
 static void hci_rx_work(struct work_struct *work);
 static void hci_cmd_work(struct work_struct *work);
 static void hci_tx_work(struct work_struct *work);
@@ -285,24 +288,6 @@ static const struct file_operations link_keys_fops = {
        .release        = single_release,
 };
 
-static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
-                                  size_t count, loff_t *ppos)
-{
-       struct hci_dev *hdev = file->private_data;
-       char buf[3];
-
-       buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
-       buf[1] = '\n';
-       buf[2] = '\0';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
-}
-
-static const struct file_operations use_debug_keys_fops = {
-       .open           = simple_open,
-       .read           = use_debug_keys_read,
-       .llseek         = default_llseek,
-};
-
 static int dev_class_show(struct seq_file *f, void *ptr)
 {
        struct hci_dev *hdev = f->private;
@@ -415,6 +400,70 @@ static int ssp_debug_mode_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
                        ssp_debug_mode_set, "%llu\n");
 
+static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_sc_support_write(struct file *file,
+                                     const char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[32];
+       size_t buf_size = min(count, (sizeof(buf)-1));
+       bool enable;
+
+       if (test_bit(HCI_UP, &hdev->flags))
+               return -EBUSY;
+
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       if (strtobool(buf, &enable))
+               return -EINVAL;
+
+       if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+               return -EALREADY;
+
+       change_bit(HCI_FORCE_SC, &hdev->dev_flags);
+
+       return count;
+}
+
+static const struct file_operations force_sc_support_fops = {
+       .open           = simple_open,
+       .read           = force_sc_support_read,
+       .write          = force_sc_support_write,
+       .llseek         = default_llseek,
+};
+
+static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations sc_only_mode_fops = {
+       .open           = simple_open,
+       .read           = sc_only_mode_read,
+       .llseek         = default_llseek,
+};
+
 static int idle_timeout_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
@@ -443,6 +492,37 @@ static int idle_timeout_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
                        idle_timeout_set, "%llu\n");
 
+static int rpa_timeout_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       /* Require the RPA timeout to be at least 30 seconds and at most
+        * 24 hours.
+        */
+       if (val < 30 || val > (60 * 60 * 24))
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->rpa_timeout = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int rpa_timeout_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->rpa_timeout;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
+                       rpa_timeout_set, "%llu\n");
+
 static int sniff_min_interval_set(void *data, u64 val)
 {
        struct hci_dev *hdev = data;
@@ -499,6 +579,59 @@ static int sniff_max_interval_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
                        sniff_max_interval_set, "%llu\n");
 
+static int identity_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       bdaddr_t addr;
+       u8 addr_type;
+
+       hci_dev_lock(hdev);
+
+       hci_copy_identity_address(hdev, &addr, &addr_type);
+
+       seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
+                  16, hdev->irk, &hdev->rpa);
+
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int identity_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, identity_show, inode->i_private);
+}
+
+static const struct file_operations identity_fops = {
+       .open           = identity_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int random_address_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "%pMR\n", &hdev->random_addr);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int random_address_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, random_address_show, inode->i_private);
+}
+
+static const struct file_operations random_address_fops = {
+       .open           = random_address_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int static_address_show(struct seq_file *f, void *p)
 {
        struct hci_dev *hdev = f->private;
@@ -522,33 +655,107 @@ static const struct file_operations static_address_fops = {
        .release        = single_release,
 };
 
-static int own_address_type_set(void *data, u64 val)
+static ssize_t force_static_address_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
 {
-       struct hci_dev *hdev = data;
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
 
-       if (val != 0 && val != 1)
+       buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_static_address_write(struct file *file,
+                                         const char __user *user_buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[32];
+       size_t buf_size = min(count, (sizeof(buf)-1));
+       bool enable;
+
+       if (test_bit(HCI_UP, &hdev->flags))
+               return -EBUSY;
+
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       if (strtobool(buf, &enable))
                return -EINVAL;
 
+       if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
+               return -EALREADY;
+
+       change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
+
+       return count;
+}
+
+static const struct file_operations force_static_address_fops = {
+       .open           = simple_open,
+       .read           = force_static_address_read,
+       .write          = force_static_address_write,
+       .llseek         = default_llseek,
+};
+
+static int white_list_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct bdaddr_list *b;
+
        hci_dev_lock(hdev);
-       hdev->own_addr_type = val;
+       list_for_each_entry(b, &hdev->le_white_list, list)
+               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
        hci_dev_unlock(hdev);
 
        return 0;
 }
 
-static int own_address_type_get(void *data, u64 *val)
+static int white_list_open(struct inode *inode, struct file *file)
 {
-       struct hci_dev *hdev = data;
+       return single_open(file, white_list_show, inode->i_private);
+}
+
+static const struct file_operations white_list_fops = {
+       .open           = white_list_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct list_head *p, *n;
 
        hci_dev_lock(hdev);
-       *val = hdev->own_addr_type;
+       list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
+               struct smp_irk *irk = list_entry(p, struct smp_irk, list);
+               seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
+                          &irk->bdaddr, irk->addr_type,
+                          16, irk->val, &irk->rpa);
+       }
        hci_dev_unlock(hdev);
 
        return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
-                       own_address_type_set, "%llu\n");
+static int identity_resolving_keys_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, identity_resolving_keys_show,
+                          inode->i_private);
+}
+
+static const struct file_operations identity_resolving_keys_fops = {
+       .open           = identity_resolving_keys_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 
 static int long_term_keys_show(struct seq_file *f, void *ptr)
 {
@@ -556,12 +763,12 @@ static int long_term_keys_show(struct seq_file *f, void *ptr)
        struct list_head *p, *n;
 
        hci_dev_lock(hdev);
-       list_for_each_safe(p, n, &hdev->link_keys) {
+       list_for_each_safe(p, n, &hdev->long_term_keys) {
                struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
-               seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
+               seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
                           &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
                           ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
-                          8, ltk->rand, 16, ltk->val);
+                          __le64_to_cpu(ltk->rand), 16, ltk->val);
        }
        hci_dev_unlock(hdev);
 
@@ -636,6 +843,34 @@ static int conn_max_interval_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
                        conn_max_interval_set, "%llu\n");
 
+static int adv_channel_map_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x01 || val > 0x07)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_adv_channel_map = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int adv_channel_map_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_adv_channel_map;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
+                       adv_channel_map_set, "%llu\n");
+
 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
                           size_t count, loff_t *ppos)
 {
@@ -679,65 +914,174 @@ static const struct file_operations lowpan_debugfs_fops = {
        .llseek         = default_llseek,
 };
 
-/* ---- HCI requests ---- */
-
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
+static int le_auto_conn_show(struct seq_file *sf, void *ptr)
 {
-       BT_DBG("%s result 0x%2.2x", hdev->name, result);
+       struct hci_dev *hdev = sf->private;
+       struct hci_conn_params *p;
 
-       if (hdev->req_status == HCI_REQ_PEND) {
-               hdev->req_result = result;
-               hdev->req_status = HCI_REQ_DONE;
-               wake_up_interruptible(&hdev->req_wait_q);
+       hci_dev_lock(hdev);
+
+       list_for_each_entry(p, &hdev->le_conn_params, list) {
+               seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
+                          p->auto_connect);
        }
-}
 
-static void hci_req_cancel(struct hci_dev *hdev, int err)
-{
-       BT_DBG("%s err 0x%2.2x", hdev->name, err);
+       hci_dev_unlock(hdev);
 
-       if (hdev->req_status == HCI_REQ_PEND) {
-               hdev->req_result = err;
-               hdev->req_status = HCI_REQ_CANCELED;
-               wake_up_interruptible(&hdev->req_wait_q);
-       }
+       return 0;
 }
 
-static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
-                                           u8 event)
+static int le_auto_conn_open(struct inode *inode, struct file *file)
 {
-       struct hci_ev_cmd_complete *ev;
-       struct hci_event_hdr *hdr;
-       struct sk_buff *skb;
+       return single_open(file, le_auto_conn_show, inode->i_private);
+}
 
-       hci_dev_lock(hdev);
+static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
+                                 size_t count, loff_t *offset)
+{
+       struct seq_file *sf = file->private_data;
+       struct hci_dev *hdev = sf->private;
+       u8 auto_connect = 0;
+       bdaddr_t addr;
+       u8 addr_type;
+       char *buf;
+       int err = 0;
+       int n;
 
-       skb = hdev->recv_evt;
-       hdev->recv_evt = NULL;
+       /* Don't allow partial write */
+       if (*offset != 0)
+               return -EINVAL;
 
-       hci_dev_unlock(hdev);
+       if (count < 3)
+               return -EINVAL;
 
-       if (!skb)
-               return ERR_PTR(-ENODATA);
+       buf = kzalloc(count, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
 
-       if (skb->len < sizeof(*hdr)) {
-               BT_ERR("Too short HCI event");
-               goto failed;
+       if (copy_from_user(buf, data, count)) {
+               err = -EFAULT;
+               goto done;
        }
 
-       hdr = (void *) skb->data;
-       skb_pull(skb, HCI_EVENT_HDR_SIZE);
+       if (memcmp(buf, "add", 3) == 0) {
+               n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
+                          &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
+                          &addr.b[1], &addr.b[0], &addr_type,
+                          &auto_connect);
 
-       if (event) {
-               if (hdr->evt != event)
-                       goto failed;
-               return skb;
-       }
+               if (n < 7) {
+                       err = -EINVAL;
+                       goto done;
+               }
 
-       if (hdr->evt != HCI_EV_CMD_COMPLETE) {
-               BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
-               goto failed;
-       }
+               hci_dev_lock(hdev);
+               err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
+                                         hdev->le_conn_min_interval,
+                                         hdev->le_conn_max_interval);
+               hci_dev_unlock(hdev);
+
+               if (err)
+                       goto done;
+       } else if (memcmp(buf, "del", 3) == 0) {
+               n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
+                          &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
+                          &addr.b[1], &addr.b[0], &addr_type);
+
+               if (n < 7) {
+                       err = -EINVAL;
+                       goto done;
+               }
+
+               hci_dev_lock(hdev);
+               hci_conn_params_del(hdev, &addr, addr_type);
+               hci_dev_unlock(hdev);
+       } else if (memcmp(buf, "clr", 3) == 0) {
+               hci_dev_lock(hdev);
+               hci_conn_params_clear(hdev);
+               hci_pend_le_conns_clear(hdev);
+               hci_update_background_scan(hdev);
+               hci_dev_unlock(hdev);
+       } else {
+               err = -EINVAL;
+       }
+
+done:
+       kfree(buf);
+
+       if (err)
+               return err;
+       else
+               return count;
+}
+
+static const struct file_operations le_auto_conn_fops = {
+       .open           = le_auto_conn_open,
+       .read           = seq_read,
+       .write          = le_auto_conn_write,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/* ---- HCI requests ---- */
+
+static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
+{
+       BT_DBG("%s result 0x%2.2x", hdev->name, result);
+
+       if (hdev->req_status == HCI_REQ_PEND) {
+               hdev->req_result = result;
+               hdev->req_status = HCI_REQ_DONE;
+               wake_up_interruptible(&hdev->req_wait_q);
+       }
+}
+
+static void hci_req_cancel(struct hci_dev *hdev, int err)
+{
+       BT_DBG("%s err 0x%2.2x", hdev->name, err);
+
+       if (hdev->req_status == HCI_REQ_PEND) {
+               hdev->req_result = err;
+               hdev->req_status = HCI_REQ_CANCELED;
+               wake_up_interruptible(&hdev->req_wait_q);
+       }
+}
+
+static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
+                                           u8 event)
+{
+       struct hci_ev_cmd_complete *ev;
+       struct hci_event_hdr *hdr;
+       struct sk_buff *skb;
+
+       hci_dev_lock(hdev);
+
+       skb = hdev->recv_evt;
+       hdev->recv_evt = NULL;
+
+       hci_dev_unlock(hdev);
+
+       if (!skb)
+               return ERR_PTR(-ENODATA);
+
+       if (skb->len < sizeof(*hdr)) {
+               BT_ERR("Too short HCI event");
+               goto failed;
+       }
+
+       hdr = (void *) skb->data;
+       skb_pull(skb, HCI_EVENT_HDR_SIZE);
+
+       if (event) {
+               if (hdr->evt != event)
+                       goto failed;
+               return skb;
+       }
+
+       if (hdr->evt != HCI_EV_CMD_COMPLETE) {
+               BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
+               goto failed;
+       }
 
        if (skb->len < sizeof(*ev)) {
                BT_ERR("Too short cmd_complete event");
@@ -1005,7 +1349,7 @@ static void bredr_setup(struct hci_request *req)
        hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
 
        /* Connection accept timeout ~20 secs */
-       param = __constant_cpu_to_le16(0x7d00);
+       param = cpu_to_le16(0x7d00);
        hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 
        /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
@@ -1027,14 +1371,17 @@ static void le_setup(struct hci_request *req)
        /* Read LE Local Supported Features */
        hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
 
+       /* Read LE Supported States */
+       hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
+
        /* Read LE Advertising Channel TX Power */
        hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
 
        /* Read LE White List Size */
        hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
 
-       /* Read LE Supported States */
-       hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
+       /* Clear LE White List */
+       hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
 
        /* LE-only controllers have LE implicitly enabled */
        if (!lmp_bredr_capable(hdev))
@@ -1288,6 +1635,10 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
                events[2] |= 0x08;      /* Truncated Page Complete */
        }
 
+       /* Enable Authenticated Payload Timeout Expired event if supported */
+       if (lmp_ping_capable(hdev))
+               events[2] |= 0x80;
+
        hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
 }
 
@@ -1322,21 +1673,8 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
        if (hdev->commands[5] & 0x10)
                hci_setup_link_policy(req);
 
-       if (lmp_le_capable(hdev)) {
-               if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
-                       /* If the controller has a public BD_ADDR, then
-                        * by default use that one. If this is a LE only
-                        * controller without a public address, default
-                        * to the random address.
-                        */
-                       if (bacmp(&hdev->bdaddr, BDADDR_ANY))
-                               hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
-                       else
-                               hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
-               }
-
+       if (lmp_le_capable(hdev))
                hci_set_le_support(req);
-       }
 
        /* Read features beyond page 1 if available */
        for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
@@ -1359,6 +1697,15 @@ static void hci_init4_req(struct hci_request *req, unsigned long opt)
        /* Check for Synchronization Train support */
        if (lmp_sync_train_capable(hdev))
                hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
+
+       /* Enable Secure Connections if supported and configured */
+       if ((lmp_sc_capable(hdev) ||
+            test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
+           test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
+               u8 support = 0x01;
+               hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
+                           sizeof(support), &support);
+       }
 }
 
 static int __hci_init(struct hci_dev *hdev)
@@ -1417,8 +1764,6 @@ static int __hci_init(struct hci_dev *hdev)
                                    hdev, &inquiry_cache_fops);
                debugfs_create_file("link_keys", 0400, hdev->debugfs,
                                    hdev, &link_keys_fops);
-               debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
-                                   hdev, &use_debug_keys_fops);
                debugfs_create_file("dev_class", 0444, hdev->debugfs,
                                    hdev, &dev_class_fops);
                debugfs_create_file("voice_setting", 0444, hdev->debugfs,
@@ -1430,6 +1775,10 @@ static int __hci_init(struct hci_dev *hdev)
                                    hdev, &auto_accept_delay_fops);
                debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
                                    hdev, &ssp_debug_mode_fops);
+               debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
+                                   hdev, &force_sc_support_fops);
+               debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
+                                   hdev, &sc_only_mode_fops);
        }
 
        if (lmp_sniff_capable(hdev)) {
@@ -1442,20 +1791,43 @@ static int __hci_init(struct hci_dev *hdev)
        }
 
        if (lmp_le_capable(hdev)) {
+               debugfs_create_file("identity", 0400, hdev->debugfs,
+                                   hdev, &identity_fops);
+               debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
+                                   hdev, &rpa_timeout_fops);
+               debugfs_create_file("random_address", 0444, hdev->debugfs,
+                                   hdev, &random_address_fops);
+               debugfs_create_file("static_address", 0444, hdev->debugfs,
+                                   hdev, &static_address_fops);
+
+               /* For controllers with a public address, provide a debug
+                * option to force the usage of the configured static
+                * address. By default the public address is used.
+                */
+               if (bacmp(&hdev->bdaddr, BDADDR_ANY))
+                       debugfs_create_file("force_static_address", 0644,
+                                           hdev->debugfs, hdev,
+                                           &force_static_address_fops);
+
                debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
                                  &hdev->le_white_list_size);
-               debugfs_create_file("static_address", 0444, hdev->debugfs,
-                                  hdev, &static_address_fops);
-               debugfs_create_file("own_address_type", 0644, hdev->debugfs,
-                                   hdev, &own_address_type_fops);
+               debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
+                                   &white_list_fops);
+               debugfs_create_file("identity_resolving_keys", 0400,
+                                   hdev->debugfs, hdev,
+                                   &identity_resolving_keys_fops);
                debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
                                    hdev, &long_term_keys_fops);
                debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
                                    hdev, &conn_min_interval_fops);
                debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
                                    hdev, &conn_max_interval_fops);
+               debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
+                                   hdev, &adv_channel_map_fops);
                debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
                                    &lowpan_debugfs_fops);
+               debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
+                                   &le_auto_conn_fops);
        }
 
        return 0;
@@ -1548,6 +1920,8 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
 
        switch (state) {
        case DISCOVERY_STOPPED:
+               hci_update_background_scan(hdev);
+
                if (hdev->discovery.state != DISCOVERY_STARTING)
                        mgmt_discovering(hdev, 0);
                break;
@@ -1876,10 +2250,15 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                 * be able to determine if there is a public address
                 * or not.
                 *
+                * In case of user channel usage, it is not important
+                * if a public address or static random address is
+                * available.
+                *
                 * This check is only valid for BR/EDR controllers
                 * since AMP controllers do not have an address.
                 */
-               if (hdev->dev_type == HCI_BREDR &&
+               if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+                   hdev->dev_type == HCI_BREDR &&
                    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
                    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
                        ret = -EADDRNOTAVAIL;
@@ -1916,6 +2295,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
 
        if (!ret) {
                hci_dev_hold(hdev);
+               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
                if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
@@ -2014,9 +2394,13 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        cancel_delayed_work_sync(&hdev->le_scan_disable);
 
+       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+               cancel_delayed_work_sync(&hdev->rpa_expired);
+
        hci_dev_lock(hdev);
        hci_inquiry_cache_flush(hdev);
        hci_conn_hash_flush(hdev);
+       hci_pend_le_conns_clear(hdev);
        hci_dev_unlock(hdev);
 
        hci_notify(hdev, HCI_DEV_DOWN);
@@ -2074,6 +2458,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        memset(hdev->eir, 0, sizeof(hdev->eir));
        memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+       bacpy(&hdev->random_addr, BDADDR_ANY);
 
        hci_req_unlock(hdev);
 
@@ -2437,7 +2822,7 @@ static void hci_discov_off(struct work_struct *work)
        mgmt_discoverable_timeout(hdev);
 }
 
-int hci_uuids_clear(struct hci_dev *hdev)
+void hci_uuids_clear(struct hci_dev *hdev)
 {
        struct bt_uuid *uuid, *tmp;
 
@@ -2445,11 +2830,9 @@ int hci_uuids_clear(struct hci_dev *hdev)
                list_del(&uuid->list);
                kfree(uuid);
        }
-
-       return 0;
 }
 
-int hci_link_keys_clear(struct hci_dev *hdev)
+void hci_link_keys_clear(struct hci_dev *hdev)
 {
        struct list_head *p, *n;
 
@@ -2461,11 +2844,9 @@ int hci_link_keys_clear(struct hci_dev *hdev)
                list_del(p);
                kfree(key);
        }
-
-       return 0;
 }
 
-int hci_smp_ltks_clear(struct hci_dev *hdev)
+void hci_smp_ltks_clear(struct hci_dev *hdev)
 {
        struct smp_ltk *k, *tmp;
 
@@ -2473,8 +2854,16 @@ int hci_smp_ltks_clear(struct hci_dev *hdev)
                list_del(&k->list);
                kfree(k);
        }
+}
 
-       return 0;
+void hci_smp_irks_clear(struct hci_dev *hdev)
+{
+       struct smp_irk *k, *tmp;
+
+       list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+               list_del(&k->list);
+               kfree(k);
+       }
 }
 
 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -2524,13 +2913,24 @@ static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
        return false;
 }
 
-struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
+static bool ltk_type_master(u8 type)
+{
+       if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
+               return true;
+
+       return false;
+}
+
+struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
+                            bool master)
 {
        struct smp_ltk *k;
 
        list_for_each_entry(k, &hdev->long_term_keys, list) {
-               if (k->ediv != ediv ||
-                   memcmp(rand, k->rand, sizeof(k->rand)))
+               if (k->ediv != ediv || k->rand != rand)
+                       continue;
+
+               if (ltk_type_master(k->type) != master)
                        continue;
 
                return k;
@@ -2540,18 +2940,56 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
 }
 
 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                                    u8 addr_type)
+                                    u8 addr_type, bool master)
 {
        struct smp_ltk *k;
 
        list_for_each_entry(k, &hdev->long_term_keys, list)
                if (addr_type == k->bdaddr_type &&
-                   bacmp(bdaddr, &k->bdaddr) == 0)
+                   bacmp(bdaddr, &k->bdaddr) == 0 &&
+                   ltk_type_master(k->type) == master)
                        return k;
 
        return NULL;
 }
 
+struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
+{
+       struct smp_irk *irk;
+
+       list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+               if (!bacmp(&irk->rpa, rpa))
+                       return irk;
+       }
+
+       list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+               if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
+                       bacpy(&irk->rpa, rpa);
+                       return irk;
+               }
+       }
+
+       return NULL;
+}
+
+struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                    u8 addr_type)
+{
+       struct smp_irk *irk;
+
+       /* Identity Address must be public or static random */
+       if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
+               return NULL;
+
+       list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
+               if (addr_type == irk->addr_type &&
+                   bacmp(bdaddr, &irk->bdaddr) == 0)
+                       return irk;
+       }
+
+       return NULL;
+}
+
 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
                     bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
 {
@@ -2565,7 +3003,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
                key = old_key;
        } else {
                old_key_type = conn ? conn->key_type : 0xff;
-               key = kzalloc(sizeof(*key), GFP_ATOMIC);
+               key = kzalloc(sizeof(*key), GFP_KERNEL);
                if (!key)
                        return -ENOMEM;
                list_add(&key->list, &hdev->link_keys);
@@ -2605,222 +3043,537 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
        return 0;
 }
 
-int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
-               int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
-               ediv, u8 rand[8])
+struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 addr_type, u8 type, u8 authenticated,
+                           u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
 {
        struct smp_ltk *key, *old_key;
+       bool master = ltk_type_master(type);
 
-       if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
-               return 0;
-
-       old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
+       old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
        if (old_key)
                key = old_key;
        else {
-               key = kzalloc(sizeof(*key), GFP_ATOMIC);
+               key = kzalloc(sizeof(*key), GFP_KERNEL);
                if (!key)
-                       return -ENOMEM;
+                       return NULL;
                list_add(&key->list, &hdev->long_term_keys);
        }
 
-       bacpy(&key->bdaddr, bdaddr);
-       key->bdaddr_type = addr_type;
-       memcpy(key->val, tk, sizeof(key->val));
-       key->authenticated = authenticated;
-       key->ediv = ediv;
-       key->enc_size = enc_size;
-       key->type = type;
-       memcpy(key->rand, rand, sizeof(key->rand));
+       bacpy(&key->bdaddr, bdaddr);
+       key->bdaddr_type = addr_type;
+       memcpy(key->val, tk, sizeof(key->val));
+       key->authenticated = authenticated;
+       key->ediv = ediv;
+       key->rand = rand;
+       key->enc_size = enc_size;
+       key->type = type;
+
+       return key;
+}
+
+struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 addr_type, u8 val[16], bdaddr_t *rpa)
+{
+       struct smp_irk *irk;
+
+       irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
+       if (!irk) {
+               irk = kzalloc(sizeof(*irk), GFP_KERNEL);
+               if (!irk)
+                       return NULL;
+
+               bacpy(&irk->bdaddr, bdaddr);
+               irk->addr_type = addr_type;
+
+               list_add(&irk->list, &hdev->identity_resolving_keys);
+       }
+
+       memcpy(irk->val, val, 16);
+       bacpy(&irk->rpa, rpa);
+
+       return irk;
+}
+
+int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct link_key *key;
+
+       key = hci_find_link_key(hdev, bdaddr);
+       if (!key)
+               return -ENOENT;
+
+       BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+       list_del(&key->list);
+       kfree(key);
+
+       return 0;
+}
+
+int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
+{
+       struct smp_ltk *k, *tmp;
+       int removed = 0;
+
+       list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
+               if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
+                       continue;
+
+               BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+               list_del(&k->list);
+               kfree(k);
+               removed++;
+       }
+
+       return removed ? 0 : -ENOENT;
+}
+
+void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
+{
+       struct smp_irk *k, *tmp;
+
+       list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
+               if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
+                       continue;
+
+               BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+               list_del(&k->list);
+               kfree(k);
+       }
+}
+
+/* HCI command timer function */
+static void hci_cmd_timeout(unsigned long arg)
+{
+       struct hci_dev *hdev = (void *) arg;
+
+       if (hdev->sent_cmd) {
+               struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+               u16 opcode = __le16_to_cpu(sent->opcode);
+
+               BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
+       } else {
+               BT_ERR("%s command tx timeout", hdev->name);
+       }
+
+       atomic_set(&hdev->cmd_cnt, 1);
+       queue_work(hdev->workqueue, &hdev->cmd_work);
+}
+
+struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
+                                         bdaddr_t *bdaddr)
+{
+       struct oob_data *data;
+
+       list_for_each_entry(data, &hdev->remote_oob_data, list)
+               if (bacmp(bdaddr, &data->bdaddr) == 0)
+                       return data;
+
+       return NULL;
+}
+
+int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
+{
+       struct oob_data *data;
+
+       data = hci_find_remote_oob_data(hdev, bdaddr);
+       if (!data)
+               return -ENOENT;
+
+       BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+
+       list_del(&data->list);
+       kfree(data);
+
+       return 0;
+}
+
+void hci_remote_oob_data_clear(struct hci_dev *hdev)
+{
+       struct oob_data *data, *n;
+
+       list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
+               list_del(&data->list);
+               kfree(data);
+       }
+}
+
+int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 *hash, u8 *randomizer)
+{
+       struct oob_data *data;
+
+       data = hci_find_remote_oob_data(hdev, bdaddr);
+       if (!data) {
+               data = kmalloc(sizeof(*data), GFP_KERNEL);
+               if (!data)
+                       return -ENOMEM;
+
+               bacpy(&data->bdaddr, bdaddr);
+               list_add(&data->list, &hdev->remote_oob_data);
+       }
+
+       memcpy(data->hash192, hash, sizeof(data->hash192));
+       memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
+
+       memset(data->hash256, 0, sizeof(data->hash256));
+       memset(data->randomizer256, 0, sizeof(data->randomizer256));
+
+       BT_DBG("%s for %pMR", hdev->name, bdaddr);
+
+       return 0;
+}
+
+int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                               u8 *hash192, u8 *randomizer192,
+                               u8 *hash256, u8 *randomizer256)
+{
+       struct oob_data *data;
+
+       data = hci_find_remote_oob_data(hdev, bdaddr);
+       if (!data) {
+               data = kmalloc(sizeof(*data), GFP_KERNEL);
+               if (!data)
+                       return -ENOMEM;
+
+               bacpy(&data->bdaddr, bdaddr);
+               list_add(&data->list, &hdev->remote_oob_data);
+       }
+
+       memcpy(data->hash192, hash192, sizeof(data->hash192));
+       memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
+
+       memcpy(data->hash256, hash256, sizeof(data->hash256));
+       memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
+
+       BT_DBG("%s for %pMR", hdev->name, bdaddr);
+
+       return 0;
+}
+
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+                                        bdaddr_t *bdaddr, u8 type)
+{
+       struct bdaddr_list *b;
+
+       list_for_each_entry(b, &hdev->blacklist, list) {
+               if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
+                       return b;
+       }
+
+       return NULL;
+}
+
+static void hci_blacklist_clear(struct hci_dev *hdev)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &hdev->blacklist) {
+               struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
+
+               list_del(p);
+               kfree(b);
+       }
+}
+
+int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+       struct bdaddr_list *entry;
+
+       if (!bacmp(bdaddr, BDADDR_ANY))
+               return -EBADF;
+
+       if (hci_blacklist_lookup(hdev, bdaddr, type))
+               return -EEXIST;
+
+       entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       bacpy(&entry->bdaddr, bdaddr);
+       entry->bdaddr_type = type;
+
+       list_add(&entry->list, &hdev->blacklist);
+
+       return mgmt_device_blocked(hdev, bdaddr, type);
+}
+
+int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+       struct bdaddr_list *entry;
+
+       if (!bacmp(bdaddr, BDADDR_ANY)) {
+               hci_blacklist_clear(hdev);
+               return 0;
+       }
+
+       entry = hci_blacklist_lookup(hdev, bdaddr, type);
+       if (!entry)
+               return -ENOENT;
+
+       list_del(&entry->list);
+       kfree(entry);
+
+       return mgmt_device_unblocked(hdev, bdaddr, type);
+}
+
+struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
+                                         bdaddr_t *bdaddr, u8 type)
+{
+       struct bdaddr_list *b;
+
+       list_for_each_entry(b, &hdev->le_white_list, list) {
+               if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
+                       return b;
+       }
+
+       return NULL;
+}
+
+void hci_white_list_clear(struct hci_dev *hdev)
+{
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &hdev->le_white_list) {
+               struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
+
+               list_del(p);
+               kfree(b);
+       }
+}
+
+int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+{
+       struct bdaddr_list *entry;
+
+       if (!bacmp(bdaddr, BDADDR_ANY))
+               return -EBADF;
 
-       if (!new_key)
-               return 0;
+       entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       bacpy(&entry->bdaddr, bdaddr);
+       entry->bdaddr_type = type;
 
-       if (type & HCI_SMP_LTK)
-               mgmt_new_ltk(hdev, key, 1);
+       list_add(&entry->list, &hdev->le_white_list);
 
        return 0;
 }
 
-int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
+int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
 {
-       struct link_key *key;
+       struct bdaddr_list *entry;
 
-       key = hci_find_link_key(hdev, bdaddr);
-       if (!key)
-               return -ENOENT;
+       if (!bacmp(bdaddr, BDADDR_ANY))
+               return -EBADF;
 
-       BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+       entry = hci_white_list_lookup(hdev, bdaddr, type);
+       if (!entry)
+               return -ENOENT;
 
-       list_del(&key->list);
-       kfree(key);
+       list_del(&entry->list);
+       kfree(entry);
 
        return 0;
 }
 
-int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
+/* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
+                                              bdaddr_t *addr, u8 addr_type)
 {
-       struct smp_ltk *k, *tmp;
-
-       list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
-               if (bacmp(bdaddr, &k->bdaddr))
-                       continue;
-
-               BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+       struct hci_conn_params *params;
 
-               list_del(&k->list);
-               kfree(k);
+       list_for_each_entry(params, &hdev->le_conn_params, list) {
+               if (bacmp(&params->addr, addr) == 0 &&
+                   params->addr_type == addr_type) {
+                       return params;
+               }
        }
 
-       return 0;
+       return NULL;
 }
 
-/* HCI command timer function */
-static void hci_cmd_timeout(unsigned long arg)
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
 {
-       struct hci_dev *hdev = (void *) arg;
+       struct hci_conn *conn;
 
-       if (hdev->sent_cmd) {
-               struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
-               u16 opcode = __le16_to_cpu(sent->opcode);
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+       if (!conn)
+               return false;
 
-               BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
-       } else {
-               BT_ERR("%s command tx timeout", hdev->name);
-       }
+       if (conn->dst_type != type)
+               return false;
 
-       atomic_set(&hdev->cmd_cnt, 1);
-       queue_work(hdev->workqueue, &hdev->cmd_work);
+       if (conn->state != BT_CONNECTED)
+               return false;
+
+       return true;
 }
 
-struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
-                                         bdaddr_t *bdaddr)
+static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
 {
-       struct oob_data *data;
+       if (addr_type == ADDR_LE_DEV_PUBLIC)
+               return true;
 
-       list_for_each_entry(data, &hdev->remote_oob_data, list)
-               if (bacmp(bdaddr, &data->bdaddr) == 0)
-                       return data;
+       /* Check for Random Static address type */
+       if ((addr->b[5] & 0xc0) == 0xc0)
+               return true;
 
-       return NULL;
+       return false;
 }
 
-int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
+/* This function requires the caller holds hdev->lock */
+int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
+                       u8 auto_connect, u16 conn_min_interval,
+                       u16 conn_max_interval)
 {
-       struct oob_data *data;
+       struct hci_conn_params *params;
 
-       data = hci_find_remote_oob_data(hdev, bdaddr);
-       if (!data)
-               return -ENOENT;
+       if (!is_identity_address(addr, addr_type))
+               return -EINVAL;
 
-       BT_DBG("%s removing %pMR", hdev->name, bdaddr);
+       params = hci_conn_params_lookup(hdev, addr, addr_type);
+       if (params)
+               goto update;
 
-       list_del(&data->list);
-       kfree(data);
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
+       if (!params) {
+               BT_ERR("Out of memory");
+               return -ENOMEM;
+       }
 
-       return 0;
-}
+       bacpy(&params->addr, addr);
+       params->addr_type = addr_type;
 
-int hci_remote_oob_data_clear(struct hci_dev *hdev)
-{
-       struct oob_data *data, *n;
+       list_add(&params->list, &hdev->le_conn_params);
 
-       list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
-               list_del(&data->list);
-               kfree(data);
+update:
+       params->conn_min_interval = conn_min_interval;
+       params->conn_max_interval = conn_max_interval;
+       params->auto_connect = auto_connect;
+
+       switch (auto_connect) {
+       case HCI_AUTO_CONN_DISABLED:
+       case HCI_AUTO_CONN_LINK_LOSS:
+               hci_pend_le_conn_del(hdev, addr, addr_type);
+               break;
+       case HCI_AUTO_CONN_ALWAYS:
+               if (!is_connected(hdev, addr, addr_type))
+                       hci_pend_le_conn_add(hdev, addr, addr_type);
+               break;
        }
 
+       BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
+              "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
+              conn_min_interval, conn_max_interval);
+
        return 0;
 }
 
-int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
-                           u8 *randomizer)
+/* This function requires the caller holds hdev->lock */
+void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
 {
-       struct oob_data *data;
-
-       data = hci_find_remote_oob_data(hdev, bdaddr);
-
-       if (!data) {
-               data = kmalloc(sizeof(*data), GFP_ATOMIC);
-               if (!data)
-                       return -ENOMEM;
+       struct hci_conn_params *params;
 
-               bacpy(&data->bdaddr, bdaddr);
-               list_add(&data->list, &hdev->remote_oob_data);
-       }
+       params = hci_conn_params_lookup(hdev, addr, addr_type);
+       if (!params)
+               return;
 
-       memcpy(data->hash, hash, sizeof(data->hash));
-       memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
+       hci_pend_le_conn_del(hdev, addr, addr_type);
 
-       BT_DBG("%s for %pMR", hdev->name, bdaddr);
+       list_del(&params->list);
+       kfree(params);
 
-       return 0;
+       BT_DBG("addr %pMR (type %u)", addr, addr_type);
 }
 
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
-                                        bdaddr_t *bdaddr, u8 type)
+/* This function requires the caller holds hdev->lock */
+void hci_conn_params_clear(struct hci_dev *hdev)
 {
-       struct bdaddr_list *b;
+       struct hci_conn_params *params, *tmp;
 
-       list_for_each_entry(b, &hdev->blacklist, list) {
-               if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
-                       return b;
+       list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
+               list_del(&params->list);
+               kfree(params);
        }
 
-       return NULL;
+       BT_DBG("All LE connection parameters were removed");
 }
 
-int hci_blacklist_clear(struct hci_dev *hdev)
+/* This function requires the caller holds hdev->lock */
+struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
+                                           bdaddr_t *addr, u8 addr_type)
 {
-       struct list_head *p, *n;
-
-       list_for_each_safe(p, n, &hdev->blacklist) {
-               struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
+       struct bdaddr_list *entry;
 
-               list_del(p);
-               kfree(b);
+       list_for_each_entry(entry, &hdev->pend_le_conns, list) {
+               if (bacmp(&entry->bdaddr, addr) == 0 &&
+                   entry->bdaddr_type == addr_type)
+                       return entry;
        }
 
-       return 0;
+       return NULL;
 }
 
-int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+/* This function requires the caller holds hdev->lock */
+void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
 {
        struct bdaddr_list *entry;
 
-       if (!bacmp(bdaddr, BDADDR_ANY))
-               return -EBADF;
+       entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
+       if (entry)
+               goto done;
 
-       if (hci_blacklist_lookup(hdev, bdaddr, type))
-               return -EEXIST;
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               BT_ERR("Out of memory");
+               return;
+       }
 
-       entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
-       if (!entry)
-               return -ENOMEM;
+       bacpy(&entry->bdaddr, addr);
+       entry->bdaddr_type = addr_type;
 
-       bacpy(&entry->bdaddr, bdaddr);
-       entry->bdaddr_type = type;
+       list_add(&entry->list, &hdev->pend_le_conns);
 
-       list_add(&entry->list, &hdev->blacklist);
+       BT_DBG("addr %pMR (type %u)", addr, addr_type);
 
-       return mgmt_device_blocked(hdev, bdaddr, type);
+done:
+       hci_update_background_scan(hdev);
 }
 
-int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
+/* This function requires the caller holds hdev->lock */
+void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
 {
        struct bdaddr_list *entry;
 
-       if (!bacmp(bdaddr, BDADDR_ANY))
-               return hci_blacklist_clear(hdev);
-
-       entry = hci_blacklist_lookup(hdev, bdaddr, type);
+       entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
        if (!entry)
-               return -ENOENT;
+               goto done;
 
        list_del(&entry->list);
        kfree(entry);
 
-       return mgmt_device_unblocked(hdev, bdaddr, type);
+       BT_DBG("addr %pMR (type %u)", addr, addr_type);
+
+done:
+       hci_update_background_scan(hdev);
+}
+
+/* This function requires the caller holds hdev->lock */
+void hci_pend_le_conns_clear(struct hci_dev *hdev)
+{
+       struct bdaddr_list *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
+               list_del(&entry->list);
+               kfree(entry);
+       }
+
+       BT_DBG("All LE pending connections cleared");
 }
 
 static void inquiry_complete(struct hci_dev *hdev, u8 status)
@@ -2882,7 +3635,6 @@ static void le_scan_disable_work(struct work_struct *work)
 {
        struct hci_dev *hdev = container_of(work, struct hci_dev,
                                            le_scan_disable.work);
-       struct hci_cp_le_set_scan_enable cp;
        struct hci_request req;
        int err;
 
@@ -2890,15 +3642,128 @@ static void le_scan_disable_work(struct work_struct *work)
 
        hci_req_init(&req, hdev);
 
-       memset(&cp, 0, sizeof(cp));
-       cp.enable = LE_SCAN_DISABLE;
-       hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+       hci_req_add_le_scan_disable(&req);
 
        err = hci_req_run(&req, le_scan_disable_work_complete);
        if (err)
                BT_ERR("Disable LE scanning request failed: err %d", err);
 }
 
+static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
+{
+       struct hci_dev *hdev = req->hdev;
+
+       /* If we're advertising or initiating an LE connection we can't
+        * go ahead and change the random address at this time. This is
+        * because the eventual initiator address used for the
+        * subsequently created connection will be undefined (some
+        * controllers use the new address and others the one we had
+        * when the operation started).
+        *
+        * In this kind of scenario skip the update and let the random
+        * address be updated at the next cycle.
+        */
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
+           hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+               BT_DBG("Deferring random address update");
+               return;
+       }
+
+       hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
+}
+
+int hci_update_random_address(struct hci_request *req, bool require_privacy,
+                             u8 *own_addr_type)
+{
+       struct hci_dev *hdev = req->hdev;
+       int err;
+
+       /* If privacy is enabled use a resolvable private address. If
+        * current RPA has expired or there is something else than
+        * the current RPA in use, then generate a new one.
+        */
+       if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+               int to;
+
+               *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+               if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
+                   !bacmp(&hdev->random_addr, &hdev->rpa))
+                       return 0;
+
+               err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
+               if (err < 0) {
+                       BT_ERR("%s failed to generate new RPA", hdev->name);
+                       return err;
+               }
+
+               set_random_addr(req, &hdev->rpa);
+
+               to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
+               queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
+
+               return 0;
+       }
+
+       /* In case of required privacy without resolvable private address,
+        * use an unresolvable private address. This is useful for active
+        * scanning and non-connectable advertising.
+        */
+       if (require_privacy) {
+               bdaddr_t urpa;
+
+               get_random_bytes(&urpa, 6);
+               urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
+
+               *own_addr_type = ADDR_LE_DEV_RANDOM;
+               set_random_addr(req, &urpa);
+               return 0;
+       }
+
+       /* If forcing static address is in use or there is no public
+        * address use the static address as random address (but skip
+        * the HCI command if the current random address is already the
+        * static one.
+        */
+       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
+           !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+               *own_addr_type = ADDR_LE_DEV_RANDOM;
+               if (bacmp(&hdev->static_addr, &hdev->random_addr))
+                       hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+                                   &hdev->static_addr);
+               return 0;
+       }
+
+       /* Neither privacy nor static address is being used so use a
+        * public address.
+        */
+       *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+       return 0;
+}
+
+/* Copy the Identity Address of the controller.
+ *
+ * If the controller has a public BD_ADDR, then by default use that one.
+ * If this is a LE only controller without a public address, default to
+ * the static random address.
+ *
+ * For debugging purposes it is possible to force controllers with a
+ * public address to use the static random address instead.
+ */
+void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                              u8 *bdaddr_type)
+{
+       if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
+           !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
+               bacpy(bdaddr, &hdev->static_addr);
+               *bdaddr_type = ADDR_LE_DEV_RANDOM;
+       } else {
+               bacpy(bdaddr, &hdev->bdaddr);
+               *bdaddr_type = ADDR_LE_DEV_PUBLIC;
+       }
+}
+
 /* Alloc HCI device */
 struct hci_dev *hci_alloc_dev(void)
 {
@@ -2919,11 +3784,14 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->sniff_max_interval = 800;
        hdev->sniff_min_interval = 80;
 
+       hdev->le_adv_channel_map = 0x07;
        hdev->le_scan_interval = 0x0060;
        hdev->le_scan_window = 0x0030;
        hdev->le_conn_min_interval = 0x0028;
        hdev->le_conn_max_interval = 0x0038;
 
+       hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
+
        mutex_init(&hdev->lock);
        mutex_init(&hdev->req_lock);
 
@@ -2932,7 +3800,11 @@ struct hci_dev *hci_alloc_dev(void)
        INIT_LIST_HEAD(&hdev->uuids);
        INIT_LIST_HEAD(&hdev->link_keys);
        INIT_LIST_HEAD(&hdev->long_term_keys);
+       INIT_LIST_HEAD(&hdev->identity_resolving_keys);
        INIT_LIST_HEAD(&hdev->remote_oob_data);
+       INIT_LIST_HEAD(&hdev->le_white_list);
+       INIT_LIST_HEAD(&hdev->le_conn_params);
+       INIT_LIST_HEAD(&hdev->pend_le_conns);
        INIT_LIST_HEAD(&hdev->conn_hash.list);
 
        INIT_WORK(&hdev->rx_work, hci_rx_work);
@@ -3017,9 +3889,18 @@ int hci_register_dev(struct hci_dev *hdev)
 
        dev_set_name(&hdev->dev, "%s", hdev->name);
 
+       hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
+                                              CRYPTO_ALG_ASYNC);
+       if (IS_ERR(hdev->tfm_aes)) {
+               BT_ERR("Unable to create crypto context");
+               error = PTR_ERR(hdev->tfm_aes);
+               hdev->tfm_aes = NULL;
+               goto err_wqueue;
+       }
+
        error = device_add(&hdev->dev);
        if (error < 0)
-               goto err_wqueue;
+               goto err_tfm;
 
        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
                                    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
@@ -3055,6 +3936,8 @@ int hci_register_dev(struct hci_dev *hdev)
 
        return id;
 
+err_tfm:
+       crypto_free_blkcipher(hdev->tfm_aes);
 err_wqueue:
        destroy_workqueue(hdev->workqueue);
        destroy_workqueue(hdev->req_workqueue);
@@ -3105,6 +3988,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
                rfkill_destroy(hdev->rfkill);
        }
 
+       if (hdev->tfm_aes)
+               crypto_free_blkcipher(hdev->tfm_aes);
+
        device_del(&hdev->dev);
 
        debugfs_remove_recursive(hdev->debugfs);
@@ -3117,7 +4003,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
        hci_uuids_clear(hdev);
        hci_link_keys_clear(hdev);
        hci_smp_ltks_clear(hdev);
+       hci_smp_irks_clear(hdev);
        hci_remote_oob_data_clear(hdev);
+       hci_white_list_clear(hdev);
+       hci_conn_params_clear(hdev);
+       hci_pend_le_conns_clear(hdev);
        hci_dev_unlock(hdev);
 
        hci_dev_put(hdev);
@@ -4345,3 +5235,104 @@ static void hci_cmd_work(struct work_struct *work)
                }
        }
 }
+
+void hci_req_add_le_scan_disable(struct hci_request *req)
+{
+       struct hci_cp_le_set_scan_enable cp;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.enable = LE_SCAN_DISABLE;
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+}
+
+void hci_req_add_le_passive_scan(struct hci_request *req)
+{
+       struct hci_cp_le_set_scan_param param_cp;
+       struct hci_cp_le_set_scan_enable enable_cp;
+       struct hci_dev *hdev = req->hdev;
+       u8 own_addr_type;
+
+       /* Set require_privacy to true to avoid identification from
+        * unknown peer devices. Since this is passive scanning, no
+        * SCAN_REQ using the local identity should be sent. Mandating
+        * privacy is just an extra precaution.
+        */
+       if (hci_update_random_address(req, true, &own_addr_type))
+               return;
+
+       memset(&param_cp, 0, sizeof(param_cp));
+       param_cp.type = LE_SCAN_PASSIVE;
+       param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
+       param_cp.window = cpu_to_le16(hdev->le_scan_window);
+       param_cp.own_address_type = own_addr_type;
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
+                   &param_cp);
+
+       memset(&enable_cp, 0, sizeof(enable_cp));
+       enable_cp.enable = LE_SCAN_ENABLE;
+       enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
+                   &enable_cp);
+}
+
+static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
+{
+       if (status)
+               BT_DBG("HCI request failed to update background scanning: "
+                      "status 0x%2.2x", status);
+}
+
+/* This function controls the background scanning based on hdev->pend_le_conns
+ * list. If there are pending LE connection we start the background scanning,
+ * otherwise we stop it.
+ *
+ * This function requires the caller holds hdev->lock.
+ */
+void hci_update_background_scan(struct hci_dev *hdev)
+{
+       struct hci_request req;
+       struct hci_conn *conn;
+       int err;
+
+       hci_req_init(&req, hdev);
+
+       if (list_empty(&hdev->pend_le_conns)) {
+               /* If there is no pending LE connections, we should stop
+                * the background scanning.
+                */
+
+               /* If controller is not scanning we are done. */
+               if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+                       return;
+
+               hci_req_add_le_scan_disable(&req);
+
+               BT_DBG("%s stopping background scanning", hdev->name);
+       } else {
+               /* If there is at least one pending LE connection, we should
+                * keep the background scan running.
+                */
+
+               /* If controller is connecting, we should not start scanning
+                * since some controllers are not able to scan and connect at
+                * the same time.
+                */
+               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+               if (conn)
+                       return;
+
+               /* If controller is currently scanning, we stop it to ensure we
+                * don't miss any advertising (due to duplicates filter).
+                */
+               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+                       hci_req_add_le_scan_disable(&req);
+
+               hci_req_add_le_passive_scan(&req);
+
+               BT_DBG("%s starting background scanning", hdev->name);
+       }
+
+       err = hci_req_run(&req, update_background_scan_complete);
+       if (err)
+               BT_ERR("Failed to run HCI request: err %d", err);
+}
index 5f812455a4504260927cb2dd5a3bdc018e0e20d4..49774912cb01f23ef6f85cb26f8613f538edec94 100644 (file)
@@ -199,6 +199,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
        memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
        hdev->scan_rsp_data_len = 0;
 
+       hdev->le_scan_type = LE_SCAN_PASSIVE;
+
        hdev->ssp_debug_mode = 0;
 }
 
@@ -461,6 +463,34 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
+static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       u8 status = *((u8 *) skb->data);
+       struct hci_cp_write_sc_support *sent;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
+       if (!sent)
+               return;
+
+       if (!status) {
+               if (sent->support)
+                       hdev->features[1][0] |= LMP_HOST_SC;
+               else
+                       hdev->features[1][0] &= ~LMP_HOST_SC;
+       }
+
+       if (test_bit(HCI_MGMT, &hdev->dev_flags))
+               mgmt_sc_enable_complete(hdev, sent->support, status);
+       else if (!status) {
+               if (sent->support)
+                       set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               else
+                       clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+       }
+}
+
 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_read_local_version *rp = (void *) skb->data;
@@ -904,16 +934,50 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
        hci_dev_unlock(hdev);
 }
 
-static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
-                                            struct sk_buff *skb)
+static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
+                                      struct sk_buff *skb)
 {
        struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
        hci_dev_lock(hdev);
-       mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
-                                               rp->randomizer, rp->status);
+       mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
+                                         NULL, NULL, rp->status);
+       hci_dev_unlock(hdev);
+}
+
+static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
+                                          struct sk_buff *skb)
+{
+       struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       hci_dev_lock(hdev);
+       mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
+                                         rp->hash256, rp->randomizer256,
+                                         rp->status);
+       hci_dev_unlock(hdev);
+}
+
+
+static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+       bdaddr_t *sent;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
+       if (!sent)
+               return;
+
+       hci_dev_lock(hdev);
+
+       if (!status)
+               bacpy(&hdev->random_addr, sent);
+
        hci_dev_unlock(hdev);
 }
 
@@ -929,12 +993,27 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_dev_lock(hdev);
 
-       if (!status) {
-               if (*sent)
-                       set_bit(HCI_ADVERTISING, &hdev->dev_flags);
-               else
-                       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
-       }
+       if (!status)
+               mgmt_advertising(hdev, *sent);
+
+       hci_dev_unlock(hdev);
+}
+
+static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_cp_le_set_scan_param *cp;
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+
+       if (!status)
+               hdev->le_scan_type = cp->type;
 
        hci_dev_unlock(hdev);
 }
@@ -960,7 +1039,19 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
                break;
 
        case LE_SCAN_DISABLE:
+               /* Cancel this timer so that we don't try to disable scanning
+                * when it's already disabled.
+                */
+               cancel_delayed_work(&hdev->le_scan_disable);
+
                clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
+               /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
+                * interrupted scanning due to a connect request. Mark
+                * therefore discovery as stopped.
+                */
+               if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
+                                      &hdev->dev_flags))
+                       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
                break;
 
        default:
@@ -980,6 +1071,49 @@ static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
                hdev->le_white_list_size = rp->size;
 }
 
+static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
+                                      struct sk_buff *skb)
+{
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (!status)
+               hci_white_list_clear(hdev);
+}
+
+static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
+                                       struct sk_buff *skb)
+{
+       struct hci_cp_le_add_to_white_list *sent;
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
+       if (!sent)
+               return;
+
+       if (!status)
+               hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
+}
+
+static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
+                                         struct sk_buff *skb)
+{
+       struct hci_cp_le_del_from_white_list *sent;
+       __u8 status = *((__u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
+       if (!sent)
+               return;
+
+       if (!status)
+               hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
+}
+
 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
                                            struct sk_buff *skb)
 {
@@ -1020,6 +1154,25 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
        }
 }
 
+static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_cp_le_set_adv_param *cp;
+       u8 status = *((u8 *) skb->data);
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+       hdev->adv_addr_type = cp->own_address_type;
+       hci_dev_unlock(hdev);
+}
+
 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
                                          struct sk_buff *skb)
 {
@@ -1185,9 +1338,12 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
                return 0;
 
        /* Only request authentication for SSP connections or non-SSP
-        * devices with sec_level HIGH or if MITM protection is requested */
+        * devices with sec_level MEDIUM or HIGH or if MITM protection
+        * is requested.
+        */
        if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
-           conn->pending_sec_level != BT_SECURITY_HIGH)
+           conn->pending_sec_level != BT_SECURITY_HIGH &&
+           conn->pending_sec_level != BT_SECURITY_MEDIUM)
                return 0;
 
        return 1;
@@ -1518,6 +1674,87 @@ static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
        amp_write_remote_assoc(hdev, cp->phy_handle);
 }
 
+static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
+{
+       struct hci_cp_le_create_conn *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       /* All connection failure handling is taken care of by the
+        * hci_le_conn_failed function which is triggered by the HCI
+        * request completion callbacks used for connecting.
+        */
+       if (status)
+               return;
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
+       if (!conn)
+               goto unlock;
+
+       /* Store the initiator and responder address information which
+        * is needed for SMP. These values will not change during the
+        * lifetime of the connection.
+        */
+       conn->init_addr_type = cp->own_address_type;
+       if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
+               bacpy(&conn->init_addr, &hdev->random_addr);
+       else
+               bacpy(&conn->init_addr, &hdev->bdaddr);
+
+       conn->resp_addr_type = cp->peer_addr_type;
+       bacpy(&conn->resp_addr, &cp->peer_addr);
+
+       /* We don't want the connection attempt to stick around
+        * indefinitely since LE doesn't have a page timeout concept
+        * like BR/EDR. Set a timer for any connection that doesn't use
+        * the white list for connecting.
+        */
+       if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
+               queue_delayed_work(conn->hdev->workqueue,
+                                  &conn->le_conn_timeout,
+                                  HCI_LE_CONN_TIMEOUT);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
+{
+       struct hci_cp_le_start_enc *cp;
+       struct hci_conn *conn;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (!status)
+               return;
+
+       hci_dev_lock(hdev);
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
+       if (!cp)
+               goto unlock;
+
+       conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
+       if (!conn)
+               goto unlock;
+
+       if (conn->state != BT_CONNECTED)
+               goto unlock;
+
+       hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+       hci_conn_drop(conn);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
@@ -1659,7 +1896,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        } else {
                conn->state = BT_CLOSED;
                if (conn->type == ACL_LINK)
-                       mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
+                       mgmt_connect_failed(hdev, &conn->dst, conn->type,
                                            conn->dst_type, ev->status);
        }
 
@@ -1738,9 +1975,9 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        bacpy(&cp.bdaddr, &ev->bdaddr);
                        cp.pkt_type = cpu_to_le16(conn->pkt_type);
 
-                       cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
-                       cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
-                       cp.max_latency    = __constant_cpu_to_le16(0xffff);
+                       cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
+                       cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
+                       cp.max_latency    = cpu_to_le16(0xffff);
                        cp.content_format = cpu_to_le16(hdev->voice_setting);
                        cp.retrans_effort = 0xff;
 
@@ -1780,7 +2017,9 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_disconn_complete *ev = (void *) skb->data;
        u8 reason = hci_to_mgmt_reason(ev->reason);
+       struct hci_conn_params *params;
        struct hci_conn *conn;
+       bool mgmt_connected;
        u8 type;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
@@ -1799,13 +2038,30 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        conn->state = BT_CLOSED;
 
-       if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
-               mgmt_device_disconnected(hdev, &conn->dst, conn->type,
-                                        conn->dst_type, reason);
+       mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
+       mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
+                               reason, mgmt_connected);
 
        if (conn->type == ACL_LINK && conn->flush_key)
                hci_remove_link_key(hdev, &conn->dst);
 
+       params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+       if (params) {
+               switch (params->auto_connect) {
+               case HCI_AUTO_CONN_LINK_LOSS:
+                       if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
+                               break;
+                       /* Fall through */
+
+               case HCI_AUTO_CONN_ALWAYS:
+                       hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
+                       break;
+
+               default:
+                       break;
+               }
+       }
+
        type = conn->type;
 
        hci_proto_disconn_cfm(conn, ev->reason);
@@ -1943,34 +2199,57 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
-       if (conn) {
-               if (!ev->status) {
-                       if (ev->encrypt) {
-                               /* Encryption implies authentication */
-                               conn->link_mode |= HCI_LM_AUTH;
-                               conn->link_mode |= HCI_LM_ENCRYPT;
-                               conn->sec_level = conn->pending_sec_level;
-                       } else
-                               conn->link_mode &= ~HCI_LM_ENCRYPT;
+       if (!conn)
+               goto unlock;
+
+       if (!ev->status) {
+               if (ev->encrypt) {
+                       /* Encryption implies authentication */
+                       conn->link_mode |= HCI_LM_AUTH;
+                       conn->link_mode |= HCI_LM_ENCRYPT;
+                       conn->sec_level = conn->pending_sec_level;
+
+                       /* P-256 authentication key implies FIPS */
+                       if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
+                               conn->link_mode |= HCI_LM_FIPS;
+
+                       if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
+                           conn->type == LE_LINK)
+                               set_bit(HCI_CONN_AES_CCM, &conn->flags);
+               } else {
+                       conn->link_mode &= ~HCI_LM_ENCRYPT;
+                       clear_bit(HCI_CONN_AES_CCM, &conn->flags);
                }
+       }
 
-               clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
+       clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
 
-               if (ev->status && conn->state == BT_CONNECTED) {
-                       hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+       if (ev->status && conn->state == BT_CONNECTED) {
+               hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+               hci_conn_drop(conn);
+               goto unlock;
+       }
+
+       if (conn->state == BT_CONFIG) {
+               if (!ev->status)
+                       conn->state = BT_CONNECTED;
+
+               /* In Secure Connections Only mode, do not allow any
+                * connections that are not encrypted with AES-CCM
+                * using a P-256 authenticated combination key.
+                */
+               if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
+                   (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
+                    conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
+                       hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
                        hci_conn_drop(conn);
                        goto unlock;
                }
 
-               if (conn->state == BT_CONFIG) {
-                       if (!ev->status)
-                               conn->state = BT_CONNECTED;
-
-                       hci_proto_connect_cfm(conn, ev->status);
-                       hci_conn_drop(conn);
-               } else
-                       hci_encrypt_cfm(conn, ev->status, ev->encrypt);
-       }
+               hci_proto_connect_cfm(conn, ev->status);
+               hci_conn_drop(conn);
+       } else
+               hci_encrypt_cfm(conn, ev->status, ev->encrypt);
 
 unlock:
        hci_dev_unlock(hdev);
@@ -2144,6 +2423,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_write_ssp_mode(hdev, skb);
                break;
 
+       case HCI_OP_WRITE_SC_SUPPORT:
+               hci_cc_write_sc_support(hdev, skb);
+               break;
+
        case HCI_OP_READ_LOCAL_VERSION:
                hci_cc_read_local_version(hdev, skb);
                break;
@@ -2213,7 +2496,11 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                break;
 
        case HCI_OP_READ_LOCAL_OOB_DATA:
-               hci_cc_read_local_oob_data_reply(hdev, skb);
+               hci_cc_read_local_oob_data(hdev, skb);
+               break;
+
+       case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
+               hci_cc_read_local_oob_ext_data(hdev, skb);
                break;
 
        case HCI_OP_LE_READ_BUFFER_SIZE:
@@ -2244,10 +2531,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_user_passkey_neg_reply(hdev, skb);
                break;
 
+       case HCI_OP_LE_SET_RANDOM_ADDR:
+               hci_cc_le_set_random_addr(hdev, skb);
+               break;
+
        case HCI_OP_LE_SET_ADV_ENABLE:
                hci_cc_le_set_adv_enable(hdev, skb);
                break;
 
+       case HCI_OP_LE_SET_SCAN_PARAM:
+               hci_cc_le_set_scan_param(hdev, skb);
+               break;
+
        case HCI_OP_LE_SET_SCAN_ENABLE:
                hci_cc_le_set_scan_enable(hdev, skb);
                break;
@@ -2256,6 +2551,18 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_le_read_white_list_size(hdev, skb);
                break;
 
+       case HCI_OP_LE_CLEAR_WHITE_LIST:
+               hci_cc_le_clear_white_list(hdev, skb);
+               break;
+
+       case HCI_OP_LE_ADD_TO_WHITE_LIST:
+               hci_cc_le_add_to_white_list(hdev, skb);
+               break;
+
+       case HCI_OP_LE_DEL_FROM_WHITE_LIST:
+               hci_cc_le_del_from_white_list(hdev, skb);
+               break;
+
        case HCI_OP_LE_READ_SUPPORTED_STATES:
                hci_cc_le_read_supported_states(hdev, skb);
                break;
@@ -2264,6 +2571,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_write_le_host_supported(hdev, skb);
                break;
 
+       case HCI_OP_LE_SET_ADV_PARAM:
+               hci_cc_set_adv_param(hdev, skb);
+               break;
+
        case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
                hci_cc_write_remote_amp_assoc(hdev, skb);
                break;
@@ -2351,6 +2662,14 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cs_accept_phylink(hdev, ev->status);
                break;
 
+       case HCI_OP_LE_CREATE_CONN:
+               hci_cs_le_create_conn(hdev, ev->status);
+               break;
+
+       case HCI_OP_LE_START_ENC:
+               hci_cs_le_start_enc(hdev, ev->status);
+               break;
+
        default:
                BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
                break;
@@ -2630,7 +2949,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
        if (conn) {
-               if (key->type == HCI_LK_UNAUTH_COMBINATION &&
+               if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
+                    key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
                    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
                        BT_DBG("%s ignoring unauthenticated key", hdev->name);
                        goto not_found;
@@ -2844,6 +3164,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev,
                         * features do not indicate SSP support */
                        clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
                }
+
+               if (ev->features[0] & LMP_HOST_SC)
+                       set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
        }
 
        if (conn->state != BT_CONFIG)
@@ -2905,6 +3228,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
        case 0x1c:      /* SCO interval rejected */
        case 0x1a:      /* Unsupported Remote Feature */
        case 0x1f:      /* Unspecified error */
+       case 0x20:      /* Unsupported LMP Parameter value */
                if (conn->out) {
                        conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
                                        (hdev->esco_type & EDR_ESCO_MASK);
@@ -3194,8 +3518,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
        }
 
 confirm:
-       mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
-                                 confirm_hint);
+       mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
+                                 le32_to_cpu(ev->passkey), confirm_hint);
 
 unlock:
        hci_dev_unlock(hdev);
@@ -3337,20 +3661,36 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
 
        data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
        if (data) {
-               struct hci_cp_remote_oob_data_reply cp;
+               if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
+                       struct hci_cp_remote_oob_ext_data_reply cp;
 
-               bacpy(&cp.bdaddr, &ev->bdaddr);
-               memcpy(cp.hash, data->hash, sizeof(cp.hash));
-               memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
+                       bacpy(&cp.bdaddr, &ev->bdaddr);
+                       memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
+                       memcpy(cp.randomizer192, data->randomizer192,
+                              sizeof(cp.randomizer192));
+                       memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
+                       memcpy(cp.randomizer256, data->randomizer256,
+                              sizeof(cp.randomizer256));
+
+                       hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
+                                    sizeof(cp), &cp);
+               } else {
+                       struct hci_cp_remote_oob_data_reply cp;
+
+                       bacpy(&cp.bdaddr, &ev->bdaddr);
+                       memcpy(cp.hash, data->hash192, sizeof(cp.hash));
+                       memcpy(cp.randomizer, data->randomizer192,
+                              sizeof(cp.randomizer));
 
-               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
-                            &cp);
+                       hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
+                                    sizeof(cp), &cp);
+               }
        } else {
                struct hci_cp_remote_oob_data_neg_reply cp;
 
                bacpy(&cp.bdaddr, &ev->bdaddr);
-               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
-                            &cp);
+               hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
+                            sizeof(cp), &cp);
        }
 
 unlock:
@@ -3484,6 +3824,7 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_ev_le_conn_complete *ev = (void *) skb->data;
        struct hci_conn *conn;
+       struct smp_irk *irk;
 
        BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
 
@@ -3514,19 +3855,70 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        conn->out = true;
                        conn->link_mode |= HCI_LM_MASTER;
                }
+
+               /* If we didn't have a hci_conn object previously
+                * but we're in master role this must be something
+                * initiated using a white list. Since white list based
+                * connections are not "first class citizens" we don't
+                * have full tracking of them. Therefore, we go ahead
+                * with a "best effort" approach of determining the
+                * initiator address based on the HCI_PRIVACY flag.
+                */
+               if (conn->out) {
+                       conn->resp_addr_type = ev->bdaddr_type;
+                       bacpy(&conn->resp_addr, &ev->bdaddr);
+                       if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
+                               conn->init_addr_type = ADDR_LE_DEV_RANDOM;
+                               bacpy(&conn->init_addr, &hdev->rpa);
+                       } else {
+                               hci_copy_identity_address(hdev,
+                                                         &conn->init_addr,
+                                                         &conn->init_addr_type);
+                       }
+               } else {
+                       /* Set the responder (our side) address type based on
+                        * the advertising address type.
+                        */
+                       conn->resp_addr_type = hdev->adv_addr_type;
+                       if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
+                               bacpy(&conn->resp_addr, &hdev->random_addr);
+                       else
+                               bacpy(&conn->resp_addr, &hdev->bdaddr);
+
+                       conn->init_addr_type = ev->bdaddr_type;
+                       bacpy(&conn->init_addr, &ev->bdaddr);
+               }
+       } else {
+               cancel_delayed_work(&conn->le_conn_timeout);
+       }
+
+       /* Ensure that the hci_conn contains the identity address type
+        * regardless of which address the connection was made with.
+        */
+       hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+
+       /* Lookup the identity address from the stored connection
+        * address and address type.
+        *
+        * When establishing connections to an identity address, the
+        * connection procedure will store the resolvable random
+        * address first. Now if it can be converted back into the
+        * identity address, start using the identity address from
+        * now on.
+        */
+       irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
+       if (irk) {
+               bacpy(&conn->dst, &irk->bdaddr);
+               conn->dst_type = irk->addr_type;
        }
 
        if (ev->status) {
-               mgmt_connect_failed(hdev, &conn->dst, conn->type,
-                                   conn->dst_type, ev->status);
-               hci_proto_connect_cfm(conn, ev->status);
-               conn->state = BT_CLOSED;
-               hci_conn_del(conn);
+               hci_le_conn_failed(conn, ev->status);
                goto unlock;
        }
 
        if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
-               mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
+               mgmt_device_connected(hdev, &conn->dst, conn->type,
                                      conn->dst_type, 0, NULL, 0, NULL);
 
        conn->sec_level = BT_SECURITY_LOW;
@@ -3540,25 +3932,73 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_proto_connect_cfm(conn, ev->status);
 
+       hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
+
 unlock:
        hci_dev_unlock(hdev);
 }
 
+/* This function requires the caller holds hdev->lock */
+static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
+                                 u8 addr_type)
+{
+       struct hci_conn *conn;
+       struct smp_irk *irk;
+
+       /* If this is a resolvable address, we should resolve it and then
+        * update address and address type variables.
+        */
+       irk = hci_get_irk(hdev, addr, addr_type);
+       if (irk) {
+               addr = &irk->bdaddr;
+               addr_type = irk->addr_type;
+       }
+
+       if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
+               return;
+
+       conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
+                             HCI_AT_NO_BONDING);
+       if (!IS_ERR(conn))
+               return;
+
+       switch (PTR_ERR(conn)) {
+       case -EBUSY:
+               /* If hci_connect() returns -EBUSY it means there is already
+                * an LE connection attempt going on. Since controllers don't
+                * support more than one connection attempt at the time, we
+                * don't consider this an error case.
+                */
+               break;
+       default:
+               BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
+       }
+}
+
 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
        u8 num_reports = skb->data[0];
        void *ptr = &skb->data[1];
        s8 rssi;
 
+       hci_dev_lock(hdev);
+
        while (num_reports--) {
                struct hci_ev_le_advertising_info *ev = ptr;
 
+               if (ev->evt_type == LE_ADV_IND ||
+                   ev->evt_type == LE_ADV_DIRECT_IND)
+                       check_pending_le_conn(hdev, &ev->bdaddr,
+                                             ev->bdaddr_type);
+
                rssi = ev->data[ev->length];
                mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
                                  NULL, rssi, 0, 1, ev->data, ev->length);
 
                ptr += sizeof(*ev) + ev->length + 1;
        }
+
+       hci_dev_unlock(hdev);
 }
 
 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -3577,7 +4017,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
        if (conn == NULL)
                goto not_found;
 
-       ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
+       ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
        if (ltk == NULL)
                goto not_found;
 
@@ -3593,7 +4033,13 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
        hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
 
-       if (ltk->type & HCI_SMP_STK) {
+       /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
+        * temporary key used to encrypt a connection following
+        * pairing. It is used during the Encrypted Session Setup to
+        * distribute the keys. Later, security can be re-established
+        * using a distributed LTK.
+        */
+       if (ltk->type == HCI_SMP_STK_SLAVE) {
                list_del(&ltk->list);
                kfree(ltk);
        }
index 7552f9e3089ce790040268f1ce0a067d6ef728cb..b9a418e578e0000ec4ad68734b3e34575cd759b5 100644 (file)
@@ -211,22 +211,22 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
 
        switch (bt_cb(skb)->pkt_type) {
        case HCI_COMMAND_PKT:
-               opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
+               opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
                break;
        case HCI_EVENT_PKT:
-               opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
+               opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
                break;
        case HCI_ACLDATA_PKT:
                if (bt_cb(skb)->incoming)
-                       opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
+                       opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
                else
-                       opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
+                       opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
                break;
        case HCI_SCODATA_PKT:
                if (bt_cb(skb)->incoming)
-                       opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
+                       opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
                else
-                       opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
+                       opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
                break;
        default:
                return;
@@ -319,7 +319,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
                bacpy(&ni->bdaddr, &hdev->bdaddr);
                memcpy(ni->name, hdev->name, 8);
 
-               opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
+               opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
                break;
 
        case HCI_DEV_UNREG:
@@ -327,7 +327,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
                if (!skb)
                        return NULL;
 
-               opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
+               opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
                break;
 
        default:
@@ -716,6 +716,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
                err = hci_dev_open(hdev->id);
                if (err) {
                        clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+                       mgmt_index_added(hdev);
                        hci_dev_put(hdev);
                        goto done;
                }
index 0b61250cfdf90c9e3a488c9ca3cce41ac79d6a84..555982a78a585513431722a7f65e1934dc081f86 100644 (file)
@@ -49,14 +49,7 @@ static struct attribute *bt_link_attrs[] = {
        NULL
 };
 
-static struct attribute_group bt_link_group = {
-       .attrs = bt_link_attrs,
-};
-
-static const struct attribute_group *bt_link_groups[] = {
-       &bt_link_group,
-       NULL
-};
+ATTRIBUTE_GROUPS(bt_link);
 
 static void bt_link_release(struct device *dev)
 {
@@ -182,14 +175,7 @@ static struct attribute *bt_host_attrs[] = {
        NULL
 };
 
-static struct attribute_group bt_host_group = {
-       .attrs = bt_host_attrs,
-};
-
-static const struct attribute_group *bt_host_groups[] = {
-       &bt_host_group,
-       NULL
-};
+ATTRIBUTE_GROUPS(bt_host);
 
 static void bt_host_release(struct device *dev)
 {
index b0ad2c752d738039cff8ec32c028dd109a10388b..a1e5bb7d06e880b07cb6ab4e7cac153c0011fa79 100644 (file)
@@ -42,6 +42,8 @@
 #include "amp.h"
 #include "6lowpan.h"
 
+#define LE_FLOWCTL_MAX_CREDITS 65535
+
 bool disable_ertm;
 
 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
@@ -330,44 +332,20 @@ static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
        return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
 }
 
-static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
+static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
 {
+       u16 seq = seq_list->head;
        u16 mask = seq_list->mask;
 
-       if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
-               /* In case someone tries to pop the head of an empty list */
-               return L2CAP_SEQ_LIST_CLEAR;
-       } else if (seq_list->head == seq) {
-               /* Head can be removed in constant time */
-               seq_list->head = seq_list->list[seq & mask];
-               seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
-
-               if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
-                       seq_list->head = L2CAP_SEQ_LIST_CLEAR;
-                       seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
-               }
-       } else {
-               /* Walk the list to find the sequence number */
-               u16 prev = seq_list->head;
-               while (seq_list->list[prev & mask] != seq) {
-                       prev = seq_list->list[prev & mask];
-                       if (prev == L2CAP_SEQ_LIST_TAIL)
-                               return L2CAP_SEQ_LIST_CLEAR;
-               }
+       seq_list->head = seq_list->list[seq & mask];
+       seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
 
-               /* Unlink the number from the list and clear it */
-               seq_list->list[prev & mask] = seq_list->list[seq & mask];
-               seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
-               if (seq_list->tail == seq)
-                       seq_list->tail = prev;
+       if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
+               seq_list->head = L2CAP_SEQ_LIST_CLEAR;
+               seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
        }
-       return seq;
-}
 
-static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
-{
-       /* Remove the head in constant time */
-       return l2cap_seq_list_remove(seq_list, seq_list->head);
+       return seq;
 }
 
 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
@@ -506,7 +484,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
        chan->sdu_len = 0;
        chan->tx_credits = 0;
        chan->rx_credits = le_max_credits;
-       chan->mps = min_t(u16, chan->imtu, L2CAP_LE_DEFAULT_MPS);
+       chan->mps = min_t(u16, chan->imtu, le_default_mps);
 
        skb_queue_head_init(&chan->tx_q);
 }
@@ -522,18 +500,10 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
 
        switch (chan->chan_type) {
        case L2CAP_CHAN_CONN_ORIENTED:
-               if (conn->hcon->type == LE_LINK) {
-                       if (chan->dcid == L2CAP_CID_ATT) {
-                               chan->omtu = L2CAP_DEFAULT_MTU;
-                               chan->scid = L2CAP_CID_ATT;
-                       } else {
-                               chan->scid = l2cap_alloc_cid(conn);
-                       }
-               } else {
-                       /* Alloc CID for connection-oriented socket */
-                       chan->scid = l2cap_alloc_cid(conn);
+               /* Alloc CID for connection-oriented socket */
+               chan->scid = l2cap_alloc_cid(conn);
+               if (conn->hcon->type == ACL_LINK)
                        chan->omtu = L2CAP_DEFAULT_MTU;
-               }
                break;
 
        case L2CAP_CHAN_CONN_LESS:
@@ -543,11 +513,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
                chan->omtu = L2CAP_DEFAULT_MTU;
                break;
 
-       case L2CAP_CHAN_CONN_FIX_A2MP:
-               chan->scid = L2CAP_CID_A2MP;
-               chan->dcid = L2CAP_CID_A2MP;
-               chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
-               chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
+       case L2CAP_CHAN_FIXED:
+               /* Caller will set CID and CID specific MTU values */
                break;
 
        default:
@@ -595,7 +562,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
 
                chan->conn = NULL;
 
-               if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
+               if (chan->scid != L2CAP_CID_A2MP)
                        hci_conn_drop(conn->hcon);
 
                if (mgr && mgr->bredr_chan == chan)
@@ -642,6 +609,23 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
        return;
 }
 
+void l2cap_conn_update_id_addr(struct hci_conn *hcon)
+{
+       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct l2cap_chan *chan;
+
+       mutex_lock(&conn->chan_lock);
+
+       list_for_each_entry(chan, &conn->chan_l, list) {
+               l2cap_chan_lock(chan);
+               bacpy(&chan->dst, &hcon->dst);
+               chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
+               l2cap_chan_unlock(chan);
+       }
+
+       mutex_unlock(&conn->chan_lock);
+}
+
 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
 {
        struct l2cap_conn *conn = chan->conn;
@@ -681,7 +665,7 @@ static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
        rsp.scid   = cpu_to_le16(chan->dcid);
        rsp.dcid   = cpu_to_le16(chan->scid);
        rsp.result = cpu_to_le16(result);
-       rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
 
        l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
 }
@@ -699,10 +683,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
 
        case BT_CONNECTED:
        case BT_CONFIG:
-               /* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
-                * check for chan->psm.
-                */
-               if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
+               if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
                        __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
                        l2cap_send_disconn_req(chan, reason);
                } else
@@ -737,6 +718,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
        case L2CAP_CHAN_RAW:
                switch (chan->sec_level) {
                case BT_SECURITY_HIGH:
+               case BT_SECURITY_FIPS:
                        return HCI_AT_DEDICATED_BONDING_MITM;
                case BT_SECURITY_MEDIUM:
                        return HCI_AT_DEDICATED_BONDING;
@@ -745,21 +727,23 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
                }
                break;
        case L2CAP_CHAN_CONN_LESS:
-               if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
+               if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
                        if (chan->sec_level == BT_SECURITY_LOW)
                                chan->sec_level = BT_SECURITY_SDP;
                }
-               if (chan->sec_level == BT_SECURITY_HIGH)
+               if (chan->sec_level == BT_SECURITY_HIGH ||
+                   chan->sec_level == BT_SECURITY_FIPS)
                        return HCI_AT_NO_BONDING_MITM;
                else
                        return HCI_AT_NO_BONDING;
                break;
        case L2CAP_CHAN_CONN_ORIENTED:
-               if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
+               if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
                        if (chan->sec_level == BT_SECURITY_LOW)
                                chan->sec_level = BT_SECURITY_SDP;
 
-                       if (chan->sec_level == BT_SECURITY_HIGH)
+                       if (chan->sec_level == BT_SECURITY_HIGH ||
+                           chan->sec_level == BT_SECURITY_FIPS)
                                return HCI_AT_NO_BONDING_MITM;
                        else
                                return HCI_AT_NO_BONDING;
@@ -768,6 +752,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
        default:
                switch (chan->sec_level) {
                case BT_SECURITY_HIGH:
+               case BT_SECURITY_FIPS:
                        return HCI_AT_GENERAL_BONDING_MITM;
                case BT_SECURITY_MEDIUM:
                        return HCI_AT_GENERAL_BONDING;
@@ -1288,7 +1273,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
                }
        } else {
                struct l2cap_info_req req;
-               req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
+               req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
 
                conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
                conn->info_ident = l2cap_get_ident(conn);
@@ -1330,7 +1315,7 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
                __clear_ack_timer(chan);
        }
 
-       if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+       if (chan->scid == L2CAP_CID_A2MP) {
                l2cap_state_change(chan, BT_DISCONN);
                return;
        }
@@ -1385,18 +1370,18 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
 
                        if (l2cap_chan_check_security(chan)) {
                                if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
-                                       rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
-                                       rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
+                                       rsp.result = cpu_to_le16(L2CAP_CR_PEND);
+                                       rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
                                        chan->ops->defer(chan);
 
                                } else {
                                        l2cap_state_change(chan, BT_CONFIG);
-                                       rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
-                                       rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+                                       rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+                                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
                                }
                        } else {
-                               rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
-                               rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
+                               rsp.result = cpu_to_le16(L2CAP_CR_PEND);
+                               rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
                        }
 
                        l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1493,8 +1478,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
        if (!chan)
                goto clean;
 
-       chan->dcid = L2CAP_CID_ATT;
-
        bacpy(&chan->src, &hcon->src);
        bacpy(&chan->dst, &hcon->dst);
        chan->src_type = bdaddr_type(hcon, hcon->src_type);
@@ -1528,7 +1511,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
 
                l2cap_chan_lock(chan);
 
-               if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+               if (chan->scid == L2CAP_CID_A2MP) {
                        l2cap_chan_unlock(chan);
                        continue;
                }
@@ -1546,6 +1529,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
        }
 
        mutex_unlock(&conn->chan_lock);
+
+       queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
 }
 
 /* Notify sockets that we cannot guaranty reliability anymore */
@@ -1671,6 +1656,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
 
        kfree_skb(conn->rx_skb);
 
+       skb_queue_purge(&conn->pending_rx);
+       flush_work(&conn->pending_rx_work);
+
        l2cap_unregister_all_users(conn);
 
        mutex_lock(&conn->chan_lock);
@@ -1718,66 +1706,6 @@ static void security_timeout(struct work_struct *work)
        }
 }
 
-static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
-{
-       struct l2cap_conn *conn = hcon->l2cap_data;
-       struct hci_chan *hchan;
-
-       if (conn)
-               return conn;
-
-       hchan = hci_chan_create(hcon);
-       if (!hchan)
-               return NULL;
-
-       conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
-       if (!conn) {
-               hci_chan_del(hchan);
-               return NULL;
-       }
-
-       kref_init(&conn->ref);
-       hcon->l2cap_data = conn;
-       conn->hcon = hcon;
-       hci_conn_get(conn->hcon);
-       conn->hchan = hchan;
-
-       BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
-
-       switch (hcon->type) {
-       case LE_LINK:
-               if (hcon->hdev->le_mtu) {
-                       conn->mtu = hcon->hdev->le_mtu;
-                       break;
-               }
-               /* fall through */
-       default:
-               conn->mtu = hcon->hdev->acl_mtu;
-               break;
-       }
-
-       conn->feat_mask = 0;
-
-       if (hcon->type == ACL_LINK)
-               conn->hs_enabled = test_bit(HCI_HS_ENABLED,
-                                           &hcon->hdev->dev_flags);
-
-       spin_lock_init(&conn->lock);
-       mutex_init(&conn->chan_lock);
-
-       INIT_LIST_HEAD(&conn->chan_l);
-       INIT_LIST_HEAD(&conn->users);
-
-       if (hcon->type == LE_LINK)
-               INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
-       else
-               INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
-
-       conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
-
-       return conn;
-}
-
 static void l2cap_conn_free(struct kref *ref)
 {
        struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
@@ -1848,154 +1776,6 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
        return c1;
 }
 
-static bool is_valid_psm(u16 psm, u8 dst_type)
-{
-       if (!psm)
-               return false;
-
-       if (bdaddr_type_is_le(dst_type))
-               return (psm <= 0x00ff);
-
-       /* PSM must be odd and lsb of upper byte must be 0 */
-       return ((psm & 0x0101) == 0x0001);
-}
-
-int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
-                      bdaddr_t *dst, u8 dst_type)
-{
-       struct l2cap_conn *conn;
-       struct hci_conn *hcon;
-       struct hci_dev *hdev;
-       __u8 auth_type;
-       int err;
-
-       BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
-              dst_type, __le16_to_cpu(psm));
-
-       hdev = hci_get_route(dst, &chan->src);
-       if (!hdev)
-               return -EHOSTUNREACH;
-
-       hci_dev_lock(hdev);
-
-       l2cap_chan_lock(chan);
-
-       if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
-           chan->chan_type != L2CAP_CHAN_RAW) {
-               err = -EINVAL;
-               goto done;
-       }
-
-       if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
-               err = -EINVAL;
-               goto done;
-       }
-
-       switch (chan->mode) {
-       case L2CAP_MODE_BASIC:
-               break;
-       case L2CAP_MODE_LE_FLOWCTL:
-               l2cap_le_flowctl_init(chan);
-               break;
-       case L2CAP_MODE_ERTM:
-       case L2CAP_MODE_STREAMING:
-               if (!disable_ertm)
-                       break;
-               /* fall through */
-       default:
-               err = -ENOTSUPP;
-               goto done;
-       }
-
-       switch (chan->state) {
-       case BT_CONNECT:
-       case BT_CONNECT2:
-       case BT_CONFIG:
-               /* Already connecting */
-               err = 0;
-               goto done;
-
-       case BT_CONNECTED:
-               /* Already connected */
-               err = -EISCONN;
-               goto done;
-
-       case BT_OPEN:
-       case BT_BOUND:
-               /* Can connect */
-               break;
-
-       default:
-               err = -EBADFD;
-               goto done;
-       }
-
-       /* Set destination address and psm */
-       bacpy(&chan->dst, dst);
-       chan->dst_type = dst_type;
-
-       chan->psm = psm;
-       chan->dcid = cid;
-
-       auth_type = l2cap_get_auth_type(chan);
-
-       if (bdaddr_type_is_le(dst_type))
-               hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
-                                  chan->sec_level, auth_type);
-       else
-               hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
-                                  chan->sec_level, auth_type);
-
-       if (IS_ERR(hcon)) {
-               err = PTR_ERR(hcon);
-               goto done;
-       }
-
-       conn = l2cap_conn_add(hcon);
-       if (!conn) {
-               hci_conn_drop(hcon);
-               err = -ENOMEM;
-               goto done;
-       }
-
-       if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
-               hci_conn_drop(hcon);
-               err = -EBUSY;
-               goto done;
-       }
-
-       /* Update source addr of the socket */
-       bacpy(&chan->src, &hcon->src);
-       chan->src_type = bdaddr_type(hcon, hcon->src_type);
-
-       l2cap_chan_unlock(chan);
-       l2cap_chan_add(conn, chan);
-       l2cap_chan_lock(chan);
-
-       /* l2cap_chan_add takes its own ref so we can drop this one */
-       hci_conn_drop(hcon);
-
-       l2cap_state_change(chan, BT_CONNECT);
-       __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
-
-       if (hcon->state == BT_CONNECTED) {
-               if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
-                       __clear_chan_timer(chan);
-                       if (l2cap_chan_check_security(chan))
-                               l2cap_state_change(chan, BT_CONNECTED);
-               } else
-                       l2cap_do_start(chan);
-       }
-
-       err = 0;
-
-done:
-       l2cap_chan_unlock(chan);
-       hci_dev_unlock(hdev);
-       hci_dev_put(hdev);
-       return err;
-}
-
 static void l2cap_monitor_timeout(struct work_struct *work)
 {
        struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -2654,6 +2434,14 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
                if (IS_ERR(skb))
                        return PTR_ERR(skb);
 
+               /* Channel lock is released before requesting new skb and then
+                * reacquired thus we need to recheck channel state.
+                */
+               if (chan->state != BT_CONNECTED) {
+                       kfree_skb(skb);
+                       return -ENOTCONN;
+               }
+
                l2cap_do_send(chan, skb);
                return len;
        }
@@ -2703,6 +2491,14 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
                if (IS_ERR(skb))
                        return PTR_ERR(skb);
 
+               /* Channel lock is released before requesting new skb and then
+                * reacquired thus we need to recheck channel state.
+                */
+               if (chan->state != BT_CONNECTED) {
+                       kfree_skb(skb);
+                       return -ENOTCONN;
+               }
+
                l2cap_do_send(chan, skb);
                err = len;
                break;
@@ -3099,9 +2895,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
        lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
 
        if (conn->hcon->type == LE_LINK)
-               lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+               lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
        else
-               lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
+               lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
 
        cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
        cmd->code  = code;
@@ -3214,8 +3010,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
                efs.stype       = chan->local_stype;
                efs.msdu        = cpu_to_le16(chan->local_msdu);
                efs.sdu_itime   = cpu_to_le32(chan->local_sdu_itime);
-               efs.acc_lat     = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
-               efs.flush_to    = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
+               efs.acc_lat     = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+               efs.flush_to    = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
                break;
 
        case L2CAP_MODE_STREAMING:
@@ -3356,8 +3152,8 @@ static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
                rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
                rfc->monitor_timeout = rfc->retrans_timeout;
        } else {
-               rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
-               rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+               rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+               rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
        }
 }
 
@@ -3489,7 +3285,7 @@ done:
        }
 
        req->dcid  = cpu_to_le16(chan->dcid);
-       req->flags = __constant_cpu_to_le16(0);
+       req->flags = cpu_to_le16(0);
 
        return ptr - data;
 }
@@ -3703,7 +3499,7 @@ done:
        }
        rsp->scid   = cpu_to_le16(chan->dcid);
        rsp->result = cpu_to_le16(result);
-       rsp->flags  = __constant_cpu_to_le16(0);
+       rsp->flags  = cpu_to_le16(0);
 
        return ptr - data;
 }
@@ -3812,7 +3608,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
        }
 
        req->dcid   = cpu_to_le16(chan->dcid);
-       req->flags  = __constant_cpu_to_le16(0);
+       req->flags  = cpu_to_le16(0);
 
        return ptr - data;
 }
@@ -3843,7 +3639,7 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
        rsp.mtu     = cpu_to_le16(chan->imtu);
        rsp.mps     = cpu_to_le16(chan->mps);
        rsp.credits = cpu_to_le16(chan->rx_credits);
-       rsp.result  = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+       rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
 
        l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
                       &rsp);
@@ -3858,8 +3654,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
 
        rsp.scid   = cpu_to_le16(chan->dcid);
        rsp.dcid   = cpu_to_le16(chan->scid);
-       rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
-       rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+       rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
 
        if (chan->hs_hcon)
                rsp_code = L2CAP_CREATE_CHAN_RSP;
@@ -3888,8 +3684,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
        u16 txwin_ext = chan->ack_win;
        struct l2cap_conf_rfc rfc = {
                .mode = chan->mode,
-               .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
-               .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
+               .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
+               .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
                .max_pdu_size = cpu_to_le16(chan->imtu),
                .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
        };
@@ -3980,7 +3776,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
        l2cap_chan_lock(pchan);
 
        /* Check if the ACL is secure enough (if not SDP) */
-       if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
+       if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
            !hci_conn_check_link_mode(conn->hcon)) {
                conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
                result = L2CAP_CR_SEC_BLOCK;
@@ -4065,7 +3861,7 @@ sendresp:
 
        if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
                struct l2cap_info_req info;
-               info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
+               info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
 
                conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
                conn->info_ident = l2cap_get_ident(conn);
@@ -4214,7 +4010,7 @@ static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
 {
        struct l2cap_cmd_rej_cid rej;
 
-       rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
+       rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
        rej.scid = __cpu_to_le16(scid);
        rej.dcid = __cpu_to_le16(dcid);
 
@@ -4546,8 +4342,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
                u8 buf[8];
                u32 feat_mask = l2cap_feat_mask;
                struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
-               rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
-               rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
+               rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+               rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
                if (!disable_ertm)
                        feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
                                | L2CAP_FEAT_FCS;
@@ -4567,15 +4363,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
                else
                        l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
 
-               rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
-               rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
+               rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+               rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
                memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
                l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
                               buf);
        } else {
                struct l2cap_info_rsp rsp;
                rsp.type   = cpu_to_le16(type);
-               rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
+               rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
                l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
                               &rsp);
        }
@@ -4620,7 +4416,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
 
                if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
                        struct l2cap_info_req req;
-                       req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+                       req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
 
                        conn->info_ident = l2cap_get_ident(conn);
 
@@ -4714,8 +4510,8 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
 error:
        rsp.dcid = 0;
        rsp.scid = cpu_to_le16(scid);
-       rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
-       rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+       rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
+       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
 
        l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
                       sizeof(rsp), &rsp);
@@ -4779,7 +4575,7 @@ static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
        BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
 
        cfm.icid = cpu_to_le16(icid);
-       cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
+       cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
 
        l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
                       sizeof(cfm), &cfm);
@@ -4962,12 +4758,12 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
 
                if (result == L2CAP_CR_SUCCESS) {
                        /* Send successful response */
-                       rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
-                       rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+                       rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
+                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
                } else {
                        /* Send negative response */
-                       rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
-                       rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
+                       rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
+                       rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
                }
 
                l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
@@ -5095,7 +4891,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
        chan = l2cap_get_chan_by_dcid(conn, icid);
        if (!chan) {
                rsp.icid = cpu_to_le16(icid);
-               rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
+               rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
                l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
                               sizeof(rsp), &rsp);
                return 0;
@@ -5439,9 +5235,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
 
        err = l2cap_check_conn_param(min, max, latency, to_multiplier);
        if (err)
-               rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+               rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
        else
-               rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+               rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
 
        l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
                       sizeof(rsp), &rsp);
@@ -5709,7 +5505,7 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
 {
        struct l2cap_le_credits *pkt;
        struct l2cap_chan *chan;
-       u16 cid, credits;
+       u16 cid, credits, max_credits;
 
        if (cmd_len != sizeof(*pkt))
                return -EPROTO;
@@ -5724,6 +5520,17 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
        if (!chan)
                return -EBADSLT;
 
+       max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
+       if (credits > max_credits) {
+               BT_ERR("LE credits overflow");
+               l2cap_send_disconn_req(chan, ECONNRESET);
+
+               /* Return 0 so that we don't trigger an unnecessary
+                * command reject packet.
+                */
+               return 0;
+       }
+
        chan->tx_credits += credits;
 
        while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
@@ -5770,17 +5577,6 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
 {
        int err = 0;
 
-       if (!enable_lecoc) {
-               switch (cmd->code) {
-               case L2CAP_LE_CONN_REQ:
-               case L2CAP_LE_CONN_RSP:
-               case L2CAP_LE_CREDITS:
-               case L2CAP_DISCONN_REQ:
-               case L2CAP_DISCONN_RSP:
-                       return -EINVAL;
-               }
-       }
-
        switch (cmd->code) {
        case L2CAP_COMMAND_REJ:
                l2cap_le_command_rej(conn, cmd, cmd_len, data);
@@ -5854,7 +5650,7 @@ static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
 
                BT_ERR("Wrong link type (%d)", err);
 
-               rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+               rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
                l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
                               sizeof(rej), &rej);
        }
@@ -5899,7 +5695,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
 
                        BT_ERR("Wrong link type (%d)", err);
 
-                       rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+                       rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
                        l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
                                       sizeof(rej), &rej);
                }
@@ -6871,6 +6667,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
 
        if (!chan->rx_credits) {
                BT_ERR("No credits to receive LE L2CAP data");
+               l2cap_send_disconn_req(chan, ECONNRESET);
                return -ENOBUFS;
        }
 
@@ -6995,8 +6792,10 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
                 * But we don't have any other choice. L2CAP doesn't
                 * provide flow control mechanism. */
 
-               if (chan->imtu < skb->len)
+               if (chan->imtu < skb->len) {
+                       BT_ERR("Dropping L2CAP data: receive buffer overflow");
                        goto drop;
+               }
 
                if (!chan->ops->recv(chan, skb))
                        goto done;
@@ -7084,9 +6883,16 @@ drop:
 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
 {
        struct l2cap_hdr *lh = (void *) skb->data;
+       struct hci_conn *hcon = conn->hcon;
        u16 cid, len;
        __le16 psm;
 
+       if (hcon->state != BT_CONNECTED) {
+               BT_DBG("queueing pending rx skb");
+               skb_queue_tail(&conn->pending_rx, skb);
+               return;
+       }
+
        skb_pull(skb, L2CAP_HDR_SIZE);
        cid = __le16_to_cpu(lh->cid);
        len = __le16_to_cpu(lh->len);
@@ -7132,6 +6938,247 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
        }
 }
 
+static void process_pending_rx(struct work_struct *work)
+{
+       struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+                                              pending_rx_work);
+       struct sk_buff *skb;
+
+       BT_DBG("");
+
+       while ((skb = skb_dequeue(&conn->pending_rx)))
+               l2cap_recv_frame(conn, skb);
+}
+
+static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
+{
+       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct hci_chan *hchan;
+
+       if (conn)
+               return conn;
+
+       hchan = hci_chan_create(hcon);
+       if (!hchan)
+               return NULL;
+
+       conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
+       if (!conn) {
+               hci_chan_del(hchan);
+               return NULL;
+       }
+
+       kref_init(&conn->ref);
+       hcon->l2cap_data = conn;
+       conn->hcon = hcon;
+       hci_conn_get(conn->hcon);
+       conn->hchan = hchan;
+
+       BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
+
+       switch (hcon->type) {
+       case LE_LINK:
+               if (hcon->hdev->le_mtu) {
+                       conn->mtu = hcon->hdev->le_mtu;
+                       break;
+               }
+               /* fall through */
+       default:
+               conn->mtu = hcon->hdev->acl_mtu;
+               break;
+       }
+
+       conn->feat_mask = 0;
+
+       if (hcon->type == ACL_LINK)
+               conn->hs_enabled = test_bit(HCI_HS_ENABLED,
+                                           &hcon->hdev->dev_flags);
+
+       spin_lock_init(&conn->lock);
+       mutex_init(&conn->chan_lock);
+
+       INIT_LIST_HEAD(&conn->chan_l);
+       INIT_LIST_HEAD(&conn->users);
+
+       if (hcon->type == LE_LINK)
+               INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
+       else
+               INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
+
+       skb_queue_head_init(&conn->pending_rx);
+       INIT_WORK(&conn->pending_rx_work, process_pending_rx);
+
+       conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
+
+       return conn;
+}
+
+static bool is_valid_psm(u16 psm, u8 dst_type) {
+       if (!psm)
+               return false;
+
+       if (bdaddr_type_is_le(dst_type))
+               return (psm <= 0x00ff);
+
+       /* PSM must be odd and lsb of upper byte must be 0 */
+       return ((psm & 0x0101) == 0x0001);
+}
+
+int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
+                      bdaddr_t *dst, u8 dst_type)
+{
+       struct l2cap_conn *conn;
+       struct hci_conn *hcon;
+       struct hci_dev *hdev;
+       __u8 auth_type;
+       int err;
+
+       BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
+              dst_type, __le16_to_cpu(psm));
+
+       hdev = hci_get_route(dst, &chan->src);
+       if (!hdev)
+               return -EHOSTUNREACH;
+
+       hci_dev_lock(hdev);
+
+       l2cap_chan_lock(chan);
+
+       if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
+           chan->chan_type != L2CAP_CHAN_RAW) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
+               err = -EINVAL;
+               goto done;
+       }
+
+       switch (chan->mode) {
+       case L2CAP_MODE_BASIC:
+               break;
+       case L2CAP_MODE_LE_FLOWCTL:
+               l2cap_le_flowctl_init(chan);
+               break;
+       case L2CAP_MODE_ERTM:
+       case L2CAP_MODE_STREAMING:
+               if (!disable_ertm)
+                       break;
+               /* fall through */
+       default:
+               err = -ENOTSUPP;
+               goto done;
+       }
+
+       switch (chan->state) {
+       case BT_CONNECT:
+       case BT_CONNECT2:
+       case BT_CONFIG:
+               /* Already connecting */
+               err = 0;
+               goto done;
+
+       case BT_CONNECTED:
+               /* Already connected */
+               err = -EISCONN;
+               goto done;
+
+       case BT_OPEN:
+       case BT_BOUND:
+               /* Can connect */
+               break;
+
+       default:
+               err = -EBADFD;
+               goto done;
+       }
+
+       /* Set destination address and psm */
+       bacpy(&chan->dst, dst);
+       chan->dst_type = dst_type;
+
+       chan->psm = psm;
+       chan->dcid = cid;
+
+       auth_type = l2cap_get_auth_type(chan);
+
+       if (bdaddr_type_is_le(dst_type)) {
+               /* Convert from L2CAP channel address type to HCI address type
+                */
+               if (dst_type == BDADDR_LE_PUBLIC)
+                       dst_type = ADDR_LE_DEV_PUBLIC;
+               else
+                       dst_type = ADDR_LE_DEV_RANDOM;
+
+               hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
+                                     auth_type);
+       } else {
+               hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
+       }
+
+       if (IS_ERR(hcon)) {
+               err = PTR_ERR(hcon);
+               goto done;
+       }
+
+       conn = l2cap_conn_add(hcon);
+       if (!conn) {
+               hci_conn_drop(hcon);
+               err = -ENOMEM;
+               goto done;
+       }
+
+       if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
+               hci_conn_drop(hcon);
+               err = -EBUSY;
+               goto done;
+       }
+
+       /* Update source addr of the socket */
+       bacpy(&chan->src, &hcon->src);
+       chan->src_type = bdaddr_type(hcon, hcon->src_type);
+
+       l2cap_chan_unlock(chan);
+       l2cap_chan_add(conn, chan);
+       l2cap_chan_lock(chan);
+
+       /* l2cap_chan_add takes its own ref so we can drop this one */
+       hci_conn_drop(hcon);
+
+       l2cap_state_change(chan, BT_CONNECT);
+       __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
+
+       /* Release chan->sport so that it can be reused by other
+        * sockets (as it's only used for listening sockets).
+        */
+       write_lock(&chan_list_lock);
+       chan->sport = 0;
+       write_unlock(&chan_list_lock);
+
+       if (hcon->state == BT_CONNECTED) {
+               if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+                       __clear_chan_timer(chan);
+                       if (l2cap_chan_check_security(chan))
+                               l2cap_state_change(chan, BT_CONNECTED);
+               } else
+                       l2cap_do_start(chan);
+       }
+
+       err = 0;
+
+done:
+       l2cap_chan_unlock(chan);
+       hci_dev_unlock(hdev);
+       hci_dev_put(hdev);
+       return err;
+}
+
 /* ---- L2CAP interface with lower layer (HCI) ---- */
 
 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
@@ -7206,7 +7253,8 @@ static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
        if (encrypt == 0x00) {
                if (chan->sec_level == BT_SECURITY_MEDIUM) {
                        __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
-               } else if (chan->sec_level == BT_SECURITY_HIGH)
+               } else if (chan->sec_level == BT_SECURITY_HIGH ||
+                          chan->sec_level == BT_SECURITY_FIPS)
                        l2cap_chan_close(chan, ECONNREFUSED);
        } else {
                if (chan->sec_level == BT_SECURITY_MEDIUM)
@@ -7226,7 +7274,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
 
        if (hcon->type == LE_LINK) {
                if (!status && encrypt)
-                       smp_distribute_keys(conn, 0);
+                       smp_distribute_keys(conn);
                cancel_delayed_work(&conn->security_timer);
        }
 
@@ -7238,7 +7286,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
                       state_to_string(chan->state));
 
-               if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+               if (chan->scid == L2CAP_CID_A2MP) {
                        l2cap_chan_unlock(chan);
                        continue;
                }
index d58f76bcebd1e6866183360f778881f8e84d49b3..f59e00c2daa9cb5e485cb7832cb80486660ca6bf 100644 (file)
@@ -36,8 +36,6 @@
 
 #include "smp.h"
 
-bool enable_lecoc;
-
 static struct bt_sock_list l2cap_sk_list = {
        .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
 };
@@ -101,12 +99,19 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
                return -EINVAL;
 
+       if (la.l2_cid) {
+               /* When the socket gets created it defaults to
+                * CHAN_CONN_ORIENTED, so we need to overwrite the
+                * default here.
+                */
+               chan->chan_type = L2CAP_CHAN_FIXED;
+               chan->omtu = L2CAP_DEFAULT_MTU;
+       }
+
        if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
-               if (!enable_lecoc && la.l2_psm)
-                       return -EINVAL;
                /* We only allow ATT user space socket */
                if (la.l2_cid &&
-                   la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+                   la.l2_cid != cpu_to_le16(L2CAP_CID_ATT))
                        return -EINVAL;
        }
 
@@ -204,7 +209,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
                 * ATT. Anything else is an invalid combination.
                 */
                if (chan->scid != L2CAP_CID_ATT ||
-                   la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+                   la.l2_cid != cpu_to_le16(L2CAP_CID_ATT))
                        return -EINVAL;
 
                /* We don't have the hdev available here to make a
@@ -220,11 +225,9 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
-               if (!enable_lecoc && la.l2_psm)
-                       return -EINVAL;
                /* We only allow ATT user space socket */
                if (la.l2_cid &&
-                   la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+                   la.l2_cid != cpu_to_le16(L2CAP_CID_ATT))
                        return -EINVAL;
        }
 
@@ -357,17 +360,21 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       if (peer && sk->sk_state != BT_CONNECTED &&
+           sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
+               return -ENOTCONN;
+
        memset(la, 0, sizeof(struct sockaddr_l2));
        addr->sa_family = AF_BLUETOOTH;
        *len = sizeof(struct sockaddr_l2);
 
+       la->l2_psm = chan->psm;
+
        if (peer) {
-               la->l2_psm = chan->psm;
                bacpy(&la->l2_bdaddr, &chan->dst);
                la->l2_cid = cpu_to_le16(chan->dcid);
                la->l2_bdaddr_type = chan->dst_type;
        } else {
-               la->l2_psm = chan->sport;
                bacpy(&la->l2_bdaddr, &chan->src);
                la->l2_cid = cpu_to_le16(chan->scid);
                la->l2_bdaddr_type = chan->src_type;
@@ -432,6 +439,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
                        opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
                              L2CAP_LM_SECURE;
                        break;
+               case BT_SECURITY_FIPS:
+                       opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
+                             L2CAP_LM_SECURE | L2CAP_LM_FIPS;
+                       break;
                default:
                        opt = 0;
                        break;
@@ -445,6 +456,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
 
                if (put_user(opt, (u32 __user *) optval))
                        err = -EFAULT;
+
                break;
 
        case L2CAP_CONNINFO:
@@ -499,6 +511,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
        switch (optname) {
        case BT_SECURITY:
                if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+                   chan->chan_type != L2CAP_CHAN_FIXED &&
                    chan->chan_type != L2CAP_CHAN_RAW) {
                        err = -EINVAL;
                        break;
@@ -560,11 +573,6 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_SNDMTU:
-               if (!enable_lecoc) {
-                       err = -EPROTONOSUPPORT;
-                       break;
-               }
-
                if (!bdaddr_type_is_le(chan->src_type)) {
                        err = -EINVAL;
                        break;
@@ -580,11 +588,6 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_RCVMTU:
-               if (!enable_lecoc) {
-                       err = -EPROTONOSUPPORT;
-                       break;
-               }
-
                if (!bdaddr_type_is_le(chan->src_type)) {
                        err = -EINVAL;
                        break;
@@ -699,6 +702,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
                        break;
                }
 
+               if (opt & L2CAP_LM_FIPS) {
+                       err = -EINVAL;
+                       break;
+               }
+
                if (opt & L2CAP_LM_AUTH)
                        chan->sec_level = BT_SECURITY_LOW;
                if (opt & L2CAP_LM_ENCRYPT)
@@ -750,6 +758,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
        switch (optname) {
        case BT_SECURITY:
                if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
+                   chan->chan_type != L2CAP_CHAN_FIXED &&
                    chan->chan_type != L2CAP_CHAN_RAW) {
                        err = -EINVAL;
                        break;
@@ -895,11 +904,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_SNDMTU:
-               if (!enable_lecoc) {
-                       err = -EPROTONOSUPPORT;
-                       break;
-               }
-
                if (!bdaddr_type_is_le(chan->src_type)) {
                        err = -EINVAL;
                        break;
@@ -912,11 +916,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_RCVMTU:
-               if (!enable_lecoc) {
-                       err = -EPROTONOSUPPORT;
-                       break;
-               }
-
                if (!bdaddr_type_is_le(chan->src_type)) {
                        err = -EINVAL;
                        break;
@@ -1449,6 +1448,11 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
                chan->tx_credits = pchan->tx_credits;
                chan->rx_credits = pchan->rx_credits;
 
+               if (chan->chan_type == L2CAP_CHAN_FIXED) {
+                       chan->scid = pchan->scid;
+                       chan->dcid = pchan->scid;
+               }
+
                security_sk_clone(parent, sk);
        } else {
                switch (sk->sk_type) {
@@ -1614,6 +1618,3 @@ void l2cap_cleanup_sockets(void)
        bt_sock_unregister(BTPROTO_L2CAP);
        proto_unregister(&l2cap_proto);
 }
-
-module_param(enable_lecoc, bool, 0644);
-MODULE_PARM_DESC(enable_lecoc, "Enable support for LE CoC");
index a03ca3ca91bfa77e2663a09f90ce2271addb2331..d2d4e0d5aed017366668bf263538baf332255d20 100644 (file)
@@ -34,7 +34,7 @@
 #include "smp.h"
 
 #define MGMT_VERSION   1
-#define MGMT_REVISION  4
+#define MGMT_REVISION  5
 
 static const u16 mgmt_commands[] = {
        MGMT_OP_READ_INDEX_LIST,
@@ -79,6 +79,10 @@ static const u16 mgmt_commands[] = {
        MGMT_OP_SET_BREDR,
        MGMT_OP_SET_STATIC_ADDRESS,
        MGMT_OP_SET_SCAN_PARAMS,
+       MGMT_OP_SET_SECURE_CONN,
+       MGMT_OP_SET_DEBUG_KEYS,
+       MGMT_OP_SET_PRIVACY,
+       MGMT_OP_LOAD_IRKS,
 };
 
 static const u16 mgmt_events[] = {
@@ -103,6 +107,8 @@ static const u16 mgmt_events[] = {
        MGMT_EV_DEVICE_UNBLOCKED,
        MGMT_EV_DEVICE_UNPAIRED,
        MGMT_EV_PASSKEY_NOTIFY,
+       MGMT_EV_NEW_IRK,
+       MGMT_EV_NEW_CSRK,
 };
 
 #define CACHE_TIMEOUT  msecs_to_jiffies(2 * 1000)
@@ -127,7 +133,7 @@ static u8 mgmt_status_table[] = {
        MGMT_STATUS_FAILED,             /* Hardware Failure */
        MGMT_STATUS_CONNECT_FAILED,     /* Page Timeout */
        MGMT_STATUS_AUTH_FAILED,        /* Authentication Failed */
-       MGMT_STATUS_NOT_PAIRED,         /* PIN or Key Missing */
+       MGMT_STATUS_AUTH_FAILED,        /* PIN or Key Missing */
        MGMT_STATUS_NO_RESOURCES,       /* Memory Full */
        MGMT_STATUS_TIMEOUT,            /* Connection Timeout */
        MGMT_STATUS_NO_RESOURCES,       /* Max Number of Connections */
@@ -207,7 +213,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
 
        hdr = (void *) skb_put(skb, sizeof(*hdr));
 
-       hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
+       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
        hdr->index = cpu_to_le16(index);
        hdr->len = cpu_to_le16(sizeof(*ev));
 
@@ -238,7 +244,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
 
        hdr = (void *) skb_put(skb, sizeof(*hdr));
 
-       hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
+       hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
        hdr->index = cpu_to_le16(index);
        hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
 
@@ -264,7 +270,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
        BT_DBG("sock %p", sk);
 
        rp.version = MGMT_VERSION;
-       rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
+       rp.revision = cpu_to_le16(MGMT_REVISION);
 
        return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
                            sizeof(rp));
@@ -288,8 +294,8 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
        if (!rp)
                return -ENOMEM;
 
-       rp->num_commands = __constant_cpu_to_le16(num_commands);
-       rp->num_events = __constant_cpu_to_le16(num_events);
+       rp->num_commands = cpu_to_le16(num_commands);
+       rp->num_events = cpu_to_le16(num_events);
 
        for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
                put_unaligned_le16(mgmt_commands[i], opcode);
@@ -363,6 +369,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
 
        settings |= MGMT_SETTING_POWERED;
        settings |= MGMT_SETTING_PAIRABLE;
+       settings |= MGMT_SETTING_DEBUG_KEYS;
 
        if (lmp_bredr_capable(hdev)) {
                settings |= MGMT_SETTING_CONNECTABLE;
@@ -376,11 +383,16 @@ static u32 get_supported_settings(struct hci_dev *hdev)
                        settings |= MGMT_SETTING_SSP;
                        settings |= MGMT_SETTING_HS;
                }
+
+               if (lmp_sc_capable(hdev) ||
+                   test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+                       settings |= MGMT_SETTING_SECURE_CONN;
        }
 
        if (lmp_le_capable(hdev)) {
                settings |= MGMT_SETTING_LE;
                settings |= MGMT_SETTING_ADVERTISING;
+               settings |= MGMT_SETTING_PRIVACY;
        }
 
        return settings;
@@ -423,6 +435,15 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
                settings |= MGMT_SETTING_ADVERTISING;
 
+       if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+               settings |= MGMT_SETTING_SECURE_CONN;
+
+       if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
+               settings |= MGMT_SETTING_DEBUG_KEYS;
+
+       if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+               settings |= MGMT_SETTING_PRIVACY;
+
        return settings;
 }
 
@@ -629,14 +650,8 @@ static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
 
        flags |= get_adv_discov_flags(hdev);
 
-       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
-               if (lmp_le_br_capable(hdev))
-                       flags |= LE_AD_SIM_LE_BREDR_CTRL;
-               if (lmp_host_le_br_capable(hdev))
-                       flags |= LE_AD_SIM_LE_BREDR_HOST;
-       } else {
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
                flags |= LE_AD_NO_BREDR;
-       }
 
        if (flags) {
                BT_DBG("adv flags 0x%02x", flags);
@@ -803,6 +818,64 @@ static void update_class(struct hci_request *req)
        hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
 }
 
+static bool get_connectable(struct hci_dev *hdev)
+{
+       struct pending_cmd *cmd;
+
+       /* If there's a pending mgmt command the flag will not yet have
+        * it's final value, so check for this first.
+        */
+       cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+       if (cmd) {
+               struct mgmt_mode *cp = cmd->param;
+               return cp->val;
+       }
+
+       return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+}
+
+static void enable_advertising(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_le_set_adv_param cp;
+       u8 own_addr_type, enable = 0x01;
+       bool connectable;
+
+       /* Clear the HCI_ADVERTISING bit temporarily so that the
+        * hci_update_random_address knows that it's safe to go ahead
+        * and write a new random address. The flag will be set back on
+        * as soon as the SET_ADV_ENABLE HCI command completes.
+        */
+       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+       connectable = get_connectable(hdev);
+
+       /* Set require_privacy to true only when non-connectable
+        * advertising is used. In that case it is fine to use a
+        * non-resolvable private address.
+        */
+       if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
+               return;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.min_interval = cpu_to_le16(0x0800);
+       cp.max_interval = cpu_to_le16(0x0800);
+       cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+       cp.own_address_type = own_addr_type;
+       cp.channel_map = hdev->le_adv_channel_map;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
+static void disable_advertising(struct hci_request *req)
+{
+       u8 enable = 0x00;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
 static void service_cache_off(struct work_struct *work)
 {
        struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -824,12 +897,39 @@ static void service_cache_off(struct work_struct *work)
        hci_req_run(&req, NULL);
 }
 
+static void rpa_expired(struct work_struct *work)
+{
+       struct hci_dev *hdev = container_of(work, struct hci_dev,
+                                           rpa_expired.work);
+       struct hci_request req;
+
+       BT_DBG("");
+
+       set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+
+       if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
+           hci_conn_num(hdev, LE_LINK) > 0)
+               return;
+
+       /* The generation of a new RPA and programming it into the
+        * controller happens in the enable_advertising() function.
+        */
+
+       hci_req_init(&req, hdev);
+
+       disable_advertising(&req);
+       enable_advertising(&req);
+
+       hci_req_run(&req, NULL);
+}
+
 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
 {
        if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
                return;
 
        INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
+       INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
 
        /* Non-mgmt controlled devices get this bit set
         * implicitly so that pairing works for them, however
@@ -935,6 +1035,71 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
                            sizeof(settings));
 }
 
+static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
+{
+       BT_DBG("%s status 0x%02x", hdev->name, status);
+
+       if (hci_conn_count(hdev) == 0) {
+               cancel_delayed_work(&hdev->power_off);
+               queue_work(hdev->req_workqueue, &hdev->power_off.work);
+       }
+}
+
+static int clean_up_hci_state(struct hci_dev *hdev)
+{
+       struct hci_request req;
+       struct hci_conn *conn;
+
+       hci_req_init(&req, hdev);
+
+       if (test_bit(HCI_ISCAN, &hdev->flags) ||
+           test_bit(HCI_PSCAN, &hdev->flags)) {
+               u8 scan = 0x00;
+               hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+       }
+
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+               disable_advertising(&req);
+
+       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+               hci_req_add_le_scan_disable(&req);
+       }
+
+       list_for_each_entry(conn, &hdev->conn_hash.list, list) {
+               struct hci_cp_disconnect dc;
+               struct hci_cp_reject_conn_req rej;
+
+               switch (conn->state) {
+               case BT_CONNECTED:
+               case BT_CONFIG:
+                       dc.handle = cpu_to_le16(conn->handle);
+                       dc.reason = 0x15; /* Terminated due to Power Off */
+                       hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+                       break;
+               case BT_CONNECT:
+                       if (conn->type == LE_LINK)
+                               hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
+                                           0, NULL);
+                       else if (conn->type == ACL_LINK)
+                               hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
+                                           6, &conn->dst);
+                       break;
+               case BT_CONNECT2:
+                       bacpy(&rej.bdaddr, &conn->dst);
+                       rej.reason = 0x15; /* Terminated due to Power Off */
+                       if (conn->type == ACL_LINK)
+                               hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
+                                           sizeof(rej), &rej);
+                       else if (conn->type == SCO_LINK)
+                               hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
+                                           sizeof(rej), &rej);
+                       break;
+               }
+       }
+
+       return hci_req_run(&req, clean_up_hci_complete);
+}
+
 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
                       u16 len)
 {
@@ -978,12 +1143,23 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       if (cp->val)
+       if (cp->val) {
                queue_work(hdev->req_workqueue, &hdev->power_on);
-       else
-               queue_work(hdev->req_workqueue, &hdev->power_off.work);
-
-       err = 0;
+               err = 0;
+       } else {
+               /* Disconnect connections, stop scans, etc */
+               err = clean_up_hci_state(hdev);
+               if (!err)
+                       queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+                                          HCI_POWER_OFF_TIMEOUT);
+
+               /* ENODATA means there were no HCI commands queued */
+               if (err == -ENODATA) {
+                       cancel_delayed_work(&hdev->power_off);
+                       queue_work(hdev->req_workqueue, &hdev->power_off.work);
+                       err = 0;
+               }
+       }
 
 failed:
        hci_dev_unlock(hdev);
@@ -1005,7 +1181,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
        if (hdev)
                hdr->index = cpu_to_le16(hdev->id);
        else
-               hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
+               hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
        hdr->len = cpu_to_le16(data_len);
 
        if (data)
@@ -1317,15 +1493,15 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
                type = PAGE_SCAN_TYPE_INTERLACED;
 
                /* 160 msec page scan interval */
-               acp.interval = __constant_cpu_to_le16(0x0100);
+               acp.interval = cpu_to_le16(0x0100);
        } else {
                type = PAGE_SCAN_TYPE_STANDARD; /* default */
 
                /* default 1.28 sec page scan */
-               acp.interval = __constant_cpu_to_le16(0x0800);
+               acp.interval = cpu_to_le16(0x0800);
        }
 
-       acp.window = __constant_cpu_to_le16(0x0012);
+       acp.window = cpu_to_le16(0x0012);
 
        if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
            __cpu_to_le16(hdev->page_scan_window) != acp.window)
@@ -1336,50 +1512,6 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 }
 
-static u8 get_adv_type(struct hci_dev *hdev)
-{
-       struct pending_cmd *cmd;
-       bool connectable;
-
-       /* If there's a pending mgmt command the flag will not yet have
-        * it's final value, so check for this first.
-        */
-       cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
-       if (cmd) {
-               struct mgmt_mode *cp = cmd->param;
-               connectable = !!cp->val;
-       } else {
-               connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-       }
-
-       return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
-}
-
-static void enable_advertising(struct hci_request *req)
-{
-       struct hci_dev *hdev = req->hdev;
-       struct hci_cp_le_set_adv_param cp;
-       u8 enable = 0x01;
-
-       memset(&cp, 0, sizeof(cp));
-       cp.min_interval = __constant_cpu_to_le16(0x0800);
-       cp.max_interval = __constant_cpu_to_le16(0x0800);
-       cp.type = get_adv_type(hdev);
-       cp.own_address_type = hdev->own_addr_type;
-       cp.channel_map = 0x07;
-
-       hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
-
-       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
-static void disable_advertising(struct hci_request *req)
-{
-       u8 enable = 0x00;
-
-       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
 {
        struct pending_cmd *cmd;
@@ -2065,7 +2197,7 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
        if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
-               err = hci_uuids_clear(hdev);
+               hci_uuids_clear(hdev);
 
                if (enable_service_cache(hdev)) {
                        err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
@@ -2205,6 +2337,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
 {
        struct mgmt_cp_load_link_keys *cp = data;
        u16 key_count, expected_len;
+       bool changed;
        int i;
 
        BT_DBG("request for %s", hdev->name);
@@ -2219,7 +2352,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
                                        sizeof(struct mgmt_link_key_info);
        if (expected_len != len) {
                BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
-                      len, expected_len);
+                      expected_len, len);
                return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
                                  MGMT_STATUS_INVALID_PARAMS);
        }
@@ -2234,7 +2367,7 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        for (i = 0; i < key_count; i++) {
                struct mgmt_link_key_info *key = &cp->keys[i];
 
-               if (key->addr.type != BDADDR_BREDR)
+               if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
                        return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
                                          MGMT_STATUS_INVALID_PARAMS);
        }
@@ -2244,9 +2377,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_link_keys_clear(hdev);
 
        if (cp->debug_keys)
-               set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+               changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
        else
-               clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+               changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+
+       if (changed)
+               new_settings(hdev, NULL);
 
        for (i = 0; i < key_count; i++) {
                struct mgmt_link_key_info *key = &cp->keys[i];
@@ -2306,10 +2442,22 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
-       if (cp->addr.type == BDADDR_BREDR)
+       if (cp->addr.type == BDADDR_BREDR) {
                err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
-       else
-               err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
+       } else {
+               u8 addr_type;
+
+               if (cp->addr.type == BDADDR_LE_PUBLIC)
+                       addr_type = ADDR_LE_DEV_PUBLIC;
+               else
+                       addr_type = ADDR_LE_DEV_RANDOM;
+
+               hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
+
+               hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
+
+               err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+       }
 
        if (err < 0) {
                err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
@@ -2633,6 +2781,16 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
        mgmt_pending_remove(cmd);
 }
 
+void mgmt_smp_complete(struct hci_conn *conn, bool complete)
+{
+       u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
+       struct pending_cmd *cmd;
+
+       cmd = find_pairing(conn);
+       if (cmd)
+               pairing_complete(cmd, status);
+}
+
 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
 {
        struct pending_cmd *cmd;
@@ -2646,7 +2804,7 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status)
                pairing_complete(cmd, mgmt_status(status));
 }
 
-static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
+static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
 {
        struct pending_cmd *cmd;
 
@@ -2697,12 +2855,22 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        else
                auth_type = HCI_AT_DEDICATED_BONDING_MITM;
 
-       if (cp->addr.type == BDADDR_BREDR)
-               conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
-                                  cp->addr.type, sec_level, auth_type);
-       else
-               conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
-                                  cp->addr.type, sec_level, auth_type);
+       if (cp->addr.type == BDADDR_BREDR) {
+               conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
+                                      auth_type);
+       } else {
+               u8 addr_type;
+
+               /* Convert from L2CAP channel address type to HCI address type
+                */
+               if (cp->addr.type == BDADDR_LE_PUBLIC)
+                       addr_type = ADDR_LE_DEV_PUBLIC;
+               else
+                       addr_type = ADDR_LE_DEV_RANDOM;
+
+               conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
+                                     sec_level, auth_type);
+       }
 
        if (IS_ERR(conn)) {
                int status;
@@ -2733,13 +2901,16 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        }
 
        /* For LE, just connecting isn't a proof that the pairing finished */
-       if (cp->addr.type == BDADDR_BREDR)
+       if (cp->addr.type == BDADDR_BREDR) {
                conn->connect_cfm_cb = pairing_complete_cb;
-       else
-               conn->connect_cfm_cb = le_connect_complete_cb;
+               conn->security_cfm_cb = pairing_complete_cb;
+               conn->disconn_cfm_cb = pairing_complete_cb;
+       } else {
+               conn->connect_cfm_cb = le_pairing_complete_cb;
+               conn->security_cfm_cb = le_pairing_complete_cb;
+               conn->disconn_cfm_cb = le_pairing_complete_cb;
+       }
 
-       conn->security_cfm_cb = pairing_complete_cb;
-       conn->disconn_cfm_cb = pairing_complete_cb;
        conn->io_capability = cp->io_cap;
        cmd->user_data = conn;
 
@@ -3071,7 +3242,12 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
-       err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+       if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
+               err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
+                                  0, NULL);
+       else
+               err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+
        if (err < 0)
                mgmt_pending_remove(cmd);
 
@@ -3083,23 +3259,46 @@ unlock:
 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
                               void *data, u16 len)
 {
-       struct mgmt_cp_add_remote_oob_data *cp = data;
-       u8 status;
        int err;
 
        BT_DBG("%s ", hdev->name);
 
        hci_dev_lock(hdev);
 
-       err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
-                                     cp->randomizer);
-       if (err < 0)
-               status = MGMT_STATUS_FAILED;
-       else
-               status = MGMT_STATUS_SUCCESS;
+       if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
+               struct mgmt_cp_add_remote_oob_data *cp = data;
+               u8 status;
 
-       err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
-                          &cp->addr, sizeof(cp->addr));
+               err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
+                                             cp->hash, cp->randomizer);
+               if (err < 0)
+                       status = MGMT_STATUS_FAILED;
+               else
+                       status = MGMT_STATUS_SUCCESS;
+
+               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                  status, &cp->addr, sizeof(cp->addr));
+       } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
+               struct mgmt_cp_add_remote_oob_ext_data *cp = data;
+               u8 status;
+
+               err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
+                                                 cp->hash192,
+                                                 cp->randomizer192,
+                                                 cp->hash256,
+                                                 cp->randomizer256);
+               if (err < 0)
+                       status = MGMT_STATUS_FAILED;
+               else
+                       status = MGMT_STATUS_SUCCESS;
+
+               err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                  status, &cp->addr, sizeof(cp->addr));
+       } else {
+               BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
+               err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
+                                MGMT_STATUS_INVALID_PARAMS);
+       }
 
        hci_dev_unlock(hdev);
        return err;
@@ -3195,7 +3394,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
        struct hci_request req;
        /* General inquiry access code (GIAC) */
        u8 lap[3] = { 0x33, 0x8b, 0x9e };
-       u8 status;
+       u8 status, own_addr_type;
        int err;
 
        BT_DBG("%s", hdev->name);
@@ -3280,18 +3479,31 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                        goto failed;
                }
 
-               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
+               /* If controller is scanning, it means the background scanning
+                * is running. Thus, we should temporarily stop it in order to
+                * set the discovery scanning parameters.
+                */
+               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+                       hci_req_add_le_scan_disable(&req);
+
+               memset(&param_cp, 0, sizeof(param_cp));
+
+               /* All active scans will be done with either a resolvable
+                * private address (when privacy feature has been enabled)
+                * or unresolvable private address.
+                */
+               err = hci_update_random_address(&req, true, &own_addr_type);
+               if (err < 0) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-                                        MGMT_STATUS_BUSY);
+                                        MGMT_STATUS_FAILED);
                        mgmt_pending_remove(cmd);
                        goto failed;
                }
 
-               memset(&param_cp, 0, sizeof(param_cp));
                param_cp.type = LE_SCAN_ACTIVE;
                param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
                param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
-               param_cp.own_address_type = hdev->own_addr_type;
+               param_cp.own_address_type = own_addr_type;
                hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
                            &param_cp);
 
@@ -3361,7 +3573,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
        struct hci_cp_remote_name_req_cancel cp;
        struct inquiry_entry *e;
        struct hci_request req;
-       struct hci_cp_le_set_scan_enable enable_cp;
        int err;
 
        BT_DBG("%s", hdev->name);
@@ -3397,10 +3608,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
                } else {
                        cancel_delayed_work(&hdev->le_scan_disable);
 
-                       memset(&enable_cp, 0, sizeof(enable_cp));
-                       enable_cp.enable = LE_SCAN_DISABLE;
-                       hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
-                                   sizeof(enable_cp), &enable_cp);
+                       hci_req_add_le_scan_disable(&req);
                }
 
                break;
@@ -3457,15 +3665,17 @@ static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (!hci_discovery_active(hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
-                                MGMT_STATUS_FAILED);
+               err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+                                  MGMT_STATUS_FAILED, &cp->addr,
+                                  sizeof(cp->addr));
                goto failed;
        }
 
        e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
        if (!e) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
-                                MGMT_STATUS_INVALID_PARAMS);
+               err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
+                                  MGMT_STATUS_INVALID_PARAMS, &cp->addr,
+                                  sizeof(cp->addr));
                goto failed;
        }
 
@@ -3754,6 +3964,21 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
 
        err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
 
+       /* If background scan is running, restart it so new parameters are
+        * loaded.
+        */
+       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
+           hdev->discovery.state == DISCOVERY_STOPPED) {
+               struct hci_request req;
+
+               hci_req_init(&req, hdev);
+
+               hci_req_add_le_scan_disable(&req);
+               hci_req_add_le_passive_scan(&req);
+
+               hci_req_run(&req, NULL);
+       }
+
        hci_dev_unlock(hdev);
 
        return err;
@@ -3999,15 +4224,269 @@ unlock:
        return err;
 }
 
+static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
+                          void *data, u16 len)
+{
+       struct mgmt_mode *cp = data;
+       struct pending_cmd *cmd;
+       u8 val, status;
+       int err;
+
+       BT_DBG("request for %s", hdev->name);
+
+       status = mgmt_bredr_support(hdev);
+       if (status)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                 status);
+
+       if (!lmp_sc_capable(hdev) &&
+           !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
+
+       if (!hdev_is_powered(hdev)) {
+               bool changed;
+
+               if (cp->val) {
+                       changed = !test_and_set_bit(HCI_SC_ENABLED,
+                                                   &hdev->dev_flags);
+                       if (cp->val == 0x02)
+                               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+                       else
+                               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               } else {
+                       changed = test_and_clear_bit(HCI_SC_ENABLED,
+                                                    &hdev->dev_flags);
+                       clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               }
+
+               err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
+               if (err < 0)
+                       goto failed;
+
+               if (changed)
+                       err = new_settings(hdev, sk);
+
+               goto failed;
+       }
+
+       if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
+               err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+                                MGMT_STATUS_BUSY);
+               goto failed;
+       }
+
+       val = !!cp->val;
+
+       if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+           (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
+               err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
+               goto failed;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto failed;
+       }
+
+       err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
+       if (err < 0) {
+               mgmt_pending_remove(cmd);
+               goto failed;
+       }
+
+       if (cp->val == 0x02)
+               set_bit(HCI_SC_ONLY, &hdev->dev_flags);
+       else
+               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+
+failed:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
+                         void *data, u16 len)
+{
+       struct mgmt_mode *cp = data;
+       bool changed;
+       int err;
+
+       BT_DBG("request for %s", hdev->name);
+
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
+
+       if (cp->val)
+               changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+       else
+               changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
+
+       err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
+       if (err < 0)
+               goto unlock;
+
+       if (changed)
+               err = new_settings(hdev, sk);
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+                      u16 len)
+{
+       struct mgmt_cp_set_privacy *cp = cp_data;
+       bool changed;
+       int err;
+
+       BT_DBG("request for %s", hdev->name);
+
+       if (!lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       if (cp->privacy != 0x00 && cp->privacy != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       if (hdev_is_powered(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
+                                 MGMT_STATUS_REJECTED);
+
+       hci_dev_lock(hdev);
+
+       /* If user space supports this command it is also expected to
+        * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
+        */
+       set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+
+       if (cp->privacy) {
+               changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
+               memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
+               set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+       } else {
+               changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
+               memset(hdev->irk, 0, sizeof(hdev->irk));
+               clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+       }
+
+       err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
+       if (err < 0)
+               goto unlock;
+
+       if (changed)
+               err = new_settings(hdev, sk);
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static bool irk_is_valid(struct mgmt_irk_info *irk)
+{
+       switch (irk->addr.type) {
+       case BDADDR_LE_PUBLIC:
+               return true;
+
+       case BDADDR_LE_RANDOM:
+               /* Two most significant bits shall be set */
+               if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
+                       return false;
+               return true;
+       }
+
+       return false;
+}
+
+static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
+                    u16 len)
+{
+       struct mgmt_cp_load_irks *cp = cp_data;
+       u16 irk_count, expected_len;
+       int i, err;
+
+       BT_DBG("request for %s", hdev->name);
+
+       if (!lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       irk_count = __le16_to_cpu(cp->irk_count);
+
+       expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
+       if (expected_len != len) {
+               BT_ERR("load_irks: expected %u bytes, got %u bytes",
+                      expected_len, len);
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+       }
+
+       BT_DBG("%s irk_count %u", hdev->name, irk_count);
+
+       for (i = 0; i < irk_count; i++) {
+               struct mgmt_irk_info *key = &cp->irks[i];
+
+               if (!irk_is_valid(key))
+                       return cmd_status(sk, hdev->id,
+                                         MGMT_OP_LOAD_IRKS,
+                                         MGMT_STATUS_INVALID_PARAMS);
+       }
+
+       hci_dev_lock(hdev);
+
+       hci_smp_irks_clear(hdev);
+
+       for (i = 0; i < irk_count; i++) {
+               struct mgmt_irk_info *irk = &cp->irks[i];
+               u8 addr_type;
+
+               if (irk->addr.type == BDADDR_LE_PUBLIC)
+                       addr_type = ADDR_LE_DEV_PUBLIC;
+               else
+                       addr_type = ADDR_LE_DEV_RANDOM;
+
+               hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
+                           BDADDR_ANY);
+       }
+
+       set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
+
+       err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
+
+       hci_dev_unlock(hdev);
+
+       return err;
+}
+
 static bool ltk_is_valid(struct mgmt_ltk_info *key)
 {
-       if (key->authenticated != 0x00 && key->authenticated != 0x01)
-               return false;
        if (key->master != 0x00 && key->master != 0x01)
                return false;
-       if (!bdaddr_type_is_le(key->addr.type))
-               return false;
-       return true;
+
+       switch (key->addr.type) {
+       case BDADDR_LE_PUBLIC:
+               return true;
+
+       case BDADDR_LE_RANDOM:
+               /* Two most significant bits shall be set */
+               if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
+                       return false;
+               return true;
+       }
+
+       return false;
 }
 
 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
@@ -4029,7 +4508,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                                        sizeof(struct mgmt_ltk_info);
        if (expected_len != len) {
                BT_ERR("load_keys: expected %u bytes, got %u bytes",
-                      len, expected_len);
+                      expected_len, len);
                return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
                                  MGMT_STATUS_INVALID_PARAMS);
        }
@@ -4063,9 +4542,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
                else
                        type = HCI_SMP_LTK_SLAVE;
 
-               hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
-                           type, 0, key->authenticated, key->val,
-                           key->enc_size, key->ediv, key->rand);
+               hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
+                           key->type, key->val, key->enc_size, key->ediv,
+                           key->rand);
        }
 
        err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
@@ -4115,7 +4594,7 @@ static const struct mgmt_handler {
        { user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
        { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
        { read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
-       { add_remote_oob_data,    false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
+       { add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
        { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
        { start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
        { stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
@@ -4127,6 +4606,10 @@ static const struct mgmt_handler {
        { set_bredr,              false, MGMT_SETTING_SIZE },
        { set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
        { set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
+       { set_secure_conn,        false, MGMT_SETTING_SIZE },
+       { set_debug_keys,         false, MGMT_SETTING_SIZE },
+       { set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
+       { load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
 };
 
 
@@ -4243,6 +4726,17 @@ void mgmt_index_removed(struct hci_dev *hdev)
        mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
 }
 
+/* This function requires the caller holds hdev->lock */
+static void restart_le_auto_conns(struct hci_dev *hdev)
+{
+       struct hci_conn_params *p;
+
+       list_for_each_entry(p, &hdev->le_conn_params, list) {
+               if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
+                       hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
+       }
+}
+
 static void powered_complete(struct hci_dev *hdev, u8 status)
 {
        struct cmd_lookup match = { NULL, hdev };
@@ -4251,6 +4745,8 @@ static void powered_complete(struct hci_dev *hdev, u8 status)
 
        hci_dev_lock(hdev);
 
+       restart_le_auto_conns(hdev);
+
        mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
 
        new_settings(hdev, match.sk);
@@ -4292,11 +4788,6 @@ static int powered_update_hci(struct hci_dev *hdev)
        }
 
        if (lmp_le_capable(hdev)) {
-               /* Set random address to static address if configured */
-               if (bacmp(&hdev->static_addr, BDADDR_ANY))
-                       hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
-                                   &hdev->static_addr);
-
                /* Make sure the controller has a good default for
                 * advertising data. This also applies to the case
                 * where BR/EDR was toggled during the AUTO_OFF phase.
@@ -4422,6 +4913,10 @@ void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
        if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
                return;
 
+       /* Powering off may clear the scan mode - don't let that interfere */
+       if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+               return;
+
        if (discoverable) {
                changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
        } else {
@@ -4455,6 +4950,10 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
        if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
                return;
 
+       /* Powering off may clear the scan mode - don't let that interfere */
+       if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+               return;
+
        if (connectable)
                changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
        else
@@ -4464,6 +4963,18 @@ void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
                new_settings(hdev, NULL);
 }
 
+void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
+{
+       /* Powering off may stop advertising - don't let that interfere */
+       if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
+               return;
+
+       if (advertising)
+               set_bit(HCI_ADVERTISING, &hdev->dev_flags);
+       else
+               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+}
+
 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
 {
        u8 mgmt_err = mgmt_status(status);
@@ -4494,28 +5005,104 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
        mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
 {
        struct mgmt_ev_new_long_term_key ev;
 
        memset(&ev, 0, sizeof(ev));
 
-       ev.store_hint = persistent;
+       /* Devices using resolvable or non-resolvable random addresses
+        * without providing an indentity resolving key don't require
+        * to store long term keys. Their addresses will change the
+        * next time around.
+        *
+        * Only when a remote device provides an identity address
+        * make sure the long term key is stored. If the remote
+        * identity is known, the long term keys are internally
+        * mapped to the identity address. So allow static random
+        * and public addresses here.
+        */
+       if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
+           (key->bdaddr.b[5] & 0xc0) != 0xc0)
+               ev.store_hint = 0x00;
+       else
+               ev.store_hint = persistent;
+
        bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
        ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
-       ev.key.authenticated = key->authenticated;
+       ev.key.type = key->authenticated;
        ev.key.enc_size = key->enc_size;
        ev.key.ediv = key->ediv;
+       ev.key.rand = key->rand;
 
        if (key->type == HCI_SMP_LTK)
                ev.key.master = 1;
 
-       memcpy(ev.key.rand, key->rand, sizeof(key->rand));
        memcpy(ev.key.val, key->val, sizeof(key->val));
 
        mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
+void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
+{
+       struct mgmt_ev_new_irk ev;
+
+       memset(&ev, 0, sizeof(ev));
+
+       /* For identity resolving keys from devices that are already
+        * using a public address or static random address, do not
+        * ask for storing this key. The identity resolving key really
+        * is only mandatory for devices using resovlable random
+        * addresses.
+        *
+        * Storing all identity resolving keys has the downside that
+        * they will be also loaded on next boot of they system. More
+        * identity resolving keys, means more time during scanning is
+        * needed to actually resolve these addresses.
+        */
+       if (bacmp(&irk->rpa, BDADDR_ANY))
+               ev.store_hint = 0x01;
+       else
+               ev.store_hint = 0x00;
+
+       bacpy(&ev.rpa, &irk->rpa);
+       bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
+       ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
+       memcpy(ev.irk.val, irk->val, sizeof(irk->val));
+
+       mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
+                  bool persistent)
+{
+       struct mgmt_ev_new_csrk ev;
+
+       memset(&ev, 0, sizeof(ev));
+
+       /* Devices using resolvable or non-resolvable random addresses
+        * without providing an indentity resolving key don't require
+        * to store signature resolving keys. Their addresses will change
+        * the next time around.
+        *
+        * Only when a remote device provides an identity address
+        * make sure the signature resolving key is stored. So allow
+        * static random and public addresses here.
+        */
+       if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
+           (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
+               ev.store_hint = 0x00;
+       else
+               ev.store_hint = persistent;
+
+       bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
+       ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
+       ev.key.master = csrk->master;
+       memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
+
+       mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
+}
+
 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
                                  u8 data_len)
 {
@@ -4590,11 +5177,29 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
 }
 
 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                             u8 link_type, u8 addr_type, u8 reason)
+                             u8 link_type, u8 addr_type, u8 reason,
+                             bool mgmt_connected)
 {
        struct mgmt_ev_device_disconnected ev;
+       struct pending_cmd *power_off;
        struct sock *sk = NULL;
 
+       power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+       if (power_off) {
+               struct mgmt_mode *cp = power_off->param;
+
+               /* The connection is still in hci_conn_hash so test for 1
+                * instead of 0 to know if this is the last one.
+                */
+               if (!cp->val && hci_conn_count(hdev) == 1) {
+                       cancel_delayed_work(&hdev->power_off);
+                       queue_work(hdev->req_workqueue, &hdev->power_off.work);
+               }
+       }
+
+       if (!mgmt_connected)
+               return;
+
        if (link_type != ACL_LINK && link_type != LE_LINK)
                return;
 
@@ -4649,6 +5254,20 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
                         u8 addr_type, u8 status)
 {
        struct mgmt_ev_connect_failed ev;
+       struct pending_cmd *power_off;
+
+       power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+       if (power_off) {
+               struct mgmt_mode *cp = power_off->param;
+
+               /* The connection is still in hci_conn_hash so test for 1
+                * instead of 0 to know if this is the last one.
+                */
+               if (!cp->val && hci_conn_count(hdev) == 1) {
+                       cancel_delayed_work(&hdev->power_off);
+                       queue_work(hdev->req_workqueue, &hdev->power_off.work);
+               }
+       }
 
        bacpy(&ev.addr.bdaddr, bdaddr);
        ev.addr.type = link_to_bdaddr(link_type, addr_type);
@@ -4707,7 +5326,7 @@ void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 }
 
 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                             u8 link_type, u8 addr_type, __le32 value,
+                             u8 link_type, u8 addr_type, u32 value,
                              u8 confirm_hint)
 {
        struct mgmt_ev_user_confirm_request ev;
@@ -4717,7 +5336,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
        bacpy(&ev.addr.bdaddr, bdaddr);
        ev.addr.type = link_to_bdaddr(link_type, addr_type);
        ev.confirm_hint = confirm_hint;
-       ev.value = value;
+       ev.value = cpu_to_le32(value);
 
        return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
                          NULL);
@@ -4910,6 +5529,43 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
        hci_req_run(&req, NULL);
 }
 
+void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+{
+       struct cmd_lookup match = { NULL, hdev };
+       bool changed = false;
+
+       if (status) {
+               u8 mgmt_err = mgmt_status(status);
+
+               if (enable) {
+                       if (test_and_clear_bit(HCI_SC_ENABLED,
+                                              &hdev->dev_flags))
+                               new_settings(hdev, NULL);
+                       clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+               }
+
+               mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
+                                    cmd_status_rsp, &mgmt_err);
+               return;
+       }
+
+       if (enable) {
+               changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+       } else {
+               changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
+               clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
+       }
+
+       mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
+                            settings_rsp, &match);
+
+       if (changed)
+               new_settings(hdev, match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+}
+
 static void sk_lookup(struct pending_cmd *cmd, void *data)
 {
        struct cmd_lookup *match = data;
@@ -4964,8 +5620,9 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
                   cmd ? cmd->sk : NULL);
 }
 
-void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
-                                            u8 *randomizer, u8 status)
+void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
+                                      u8 *randomizer192, u8 *hash256,
+                                      u8 *randomizer256, u8 status)
 {
        struct pending_cmd *cmd;
 
@@ -4979,13 +5636,32 @@ void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
                cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
                           mgmt_status(status));
        } else {
-               struct mgmt_rp_read_local_oob_data rp;
+               if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
+                   hash256 && randomizer256) {
+                       struct mgmt_rp_read_local_oob_ext_data rp;
+
+                       memcpy(rp.hash192, hash192, sizeof(rp.hash192));
+                       memcpy(rp.randomizer192, randomizer192,
+                              sizeof(rp.randomizer192));
 
-               memcpy(rp.hash, hash, sizeof(rp.hash));
-               memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
+                       memcpy(rp.hash256, hash256, sizeof(rp.hash256));
+                       memcpy(rp.randomizer256, randomizer256,
+                              sizeof(rp.randomizer256));
+
+                       cmd_complete(cmd->sk, hdev->id,
+                                    MGMT_OP_READ_LOCAL_OOB_DATA, 0,
+                                    &rp, sizeof(rp));
+               } else {
+                       struct mgmt_rp_read_local_oob_data rp;
 
-               cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                            0, &rp, sizeof(rp));
+                       memcpy(rp.hash, hash192, sizeof(rp.hash));
+                       memcpy(rp.randomizer, randomizer192,
+                              sizeof(rp.randomizer));
+
+                       cmd_complete(cmd->sk, hdev->id,
+                                    MGMT_OP_READ_LOCAL_OOB_DATA, 0,
+                                    &rp, sizeof(rp));
+               }
        }
 
        mgmt_pending_remove(cmd);
@@ -4997,6 +5673,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 {
        char buf[512];
        struct mgmt_ev_device_found *ev = (void *) buf;
+       struct smp_irk *irk;
        size_t ev_size;
 
        if (!hci_discovery_active(hdev))
@@ -5008,13 +5685,20 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 
        memset(buf, 0, sizeof(buf));
 
-       bacpy(&ev->addr.bdaddr, bdaddr);
-       ev->addr.type = link_to_bdaddr(link_type, addr_type);
+       irk = hci_get_irk(hdev, bdaddr, addr_type);
+       if (irk) {
+               bacpy(&ev->addr.bdaddr, &irk->bdaddr);
+               ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
+       } else {
+               bacpy(&ev->addr.bdaddr, bdaddr);
+               ev->addr.type = link_to_bdaddr(link_type, addr_type);
+       }
+
        ev->rssi = rssi;
        if (cfm_name)
-               ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
+               ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
        if (!ssp)
-               ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
+               ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
 
        if (eir_len > 0)
                memcpy(ev->eir, eir, eir_len);
index facd8a79c0383eb8898fca8dc905c87ed623202a..633cceeb943eefe798bcc4e8d5a4d85af00b4514 100644 (file)
@@ -216,6 +216,7 @@ static int rfcomm_check_security(struct rfcomm_dlc *d)
 
        switch (d->sec_level) {
        case BT_SECURITY_HIGH:
+       case BT_SECURITY_FIPS:
                auth_type = HCI_AT_GENERAL_BONDING_MITM;
                break;
        case BT_SECURITY_MEDIUM:
@@ -359,6 +360,11 @@ static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci)
        return NULL;
 }
 
+static int rfcomm_check_channel(u8 channel)
+{
+       return channel < 1 || channel > 30;
+}
+
 static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel)
 {
        struct rfcomm_session *s;
@@ -368,7 +374,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
        BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d",
               d, d->state, src, dst, channel);
 
-       if (channel < 1 || channel > 30)
+       if (rfcomm_check_channel(channel))
                return -EINVAL;
 
        if (d->state != BT_OPEN && d->state != BT_CLOSED)
@@ -425,6 +431,20 @@ int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 chann
        return r;
 }
 
+static void __rfcomm_dlc_disconn(struct rfcomm_dlc *d)
+{
+       struct rfcomm_session *s = d->session;
+
+       d->state = BT_DISCONN;
+       if (skb_queue_empty(&d->tx_queue)) {
+               rfcomm_send_disc(s, d->dlci);
+               rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT);
+       } else {
+               rfcomm_queue_disc(d);
+               rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2);
+       }
+}
+
 static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
 {
        struct rfcomm_session *s = d->session;
@@ -437,32 +457,29 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
        switch (d->state) {
        case BT_CONNECT:
        case BT_CONFIG:
+       case BT_OPEN:
+       case BT_CONNECT2:
                if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
                        set_bit(RFCOMM_AUTH_REJECT, &d->flags);
                        rfcomm_schedule();
-                       break;
+                       return 0;
                }
-               /* Fall through */
+       }
 
+       switch (d->state) {
+       case BT_CONNECT:
        case BT_CONNECTED:
-               d->state = BT_DISCONN;
-               if (skb_queue_empty(&d->tx_queue)) {
-                       rfcomm_send_disc(s, d->dlci);
-                       rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT);
-               } else {
-                       rfcomm_queue_disc(d);
-                       rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2);
-               }
+               __rfcomm_dlc_disconn(d);
                break;
 
-       case BT_OPEN:
-       case BT_CONNECT2:
-               if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
-                       set_bit(RFCOMM_AUTH_REJECT, &d->flags);
-                       rfcomm_schedule();
+       case BT_CONFIG:
+               if (s->state != BT_BOUND) {
+                       __rfcomm_dlc_disconn(d);
                        break;
                }
-               /* Fall through */
+               /* if closing a dlc in a session that hasn't been started,
+                * just close and unlink the dlc
+                */
 
        default:
                rfcomm_dlc_clear_timer(d);
@@ -513,6 +530,25 @@ no_session:
        return r;
 }
 
+struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel)
+{
+       struct rfcomm_session *s;
+       struct rfcomm_dlc *dlc = NULL;
+       u8 dlci;
+
+       if (rfcomm_check_channel(channel))
+               return ERR_PTR(-EINVAL);
+
+       rfcomm_lock();
+       s = rfcomm_session_get(src, dst);
+       if (s) {
+               dlci = __dlci(!s->initiator, channel);
+               dlc = rfcomm_dlc_get(s, dlci);
+       }
+       rfcomm_unlock();
+       return dlc;
+}
+
 int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
 {
        int len = skb->len;
@@ -533,6 +569,20 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
        return len;
 }
 
+void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb)
+{
+       int len = skb->len;
+
+       BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len);
+
+       rfcomm_make_uih(skb, d->addr);
+       skb_queue_tail(&d->tx_queue, skb);
+
+       if (d->state == BT_CONNECTED &&
+           !test_bit(RFCOMM_TX_THROTTLED, &d->flags))
+               rfcomm_schedule();
+}
+
 void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
 {
        BT_DBG("dlc %p state %ld", d, d->state);
@@ -718,7 +768,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
 
        bacpy(&addr.l2_bdaddr, dst);
        addr.l2_family = AF_BLUETOOTH;
-       addr.l2_psm    = __constant_cpu_to_le16(RFCOMM_PSM);
+       addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);
        addr.l2_cid    = 0;
        addr.l2_bdaddr_type = BDADDR_BREDR;
        *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK);
@@ -1943,12 +1993,11 @@ static void rfcomm_process_sessions(void)
                        continue;
                }
 
-               if (s->state == BT_LISTEN) {
+               switch (s->state) {
+               case BT_LISTEN:
                        rfcomm_accept_connection(s);
                        continue;
-               }
 
-               switch (s->state) {
                case BT_BOUND:
                        s = rfcomm_check_connection(s);
                        break;
@@ -1983,7 +2032,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
        /* Bind socket */
        bacpy(&addr.l2_bdaddr, ba);
        addr.l2_family = AF_BLUETOOTH;
-       addr.l2_psm    = __constant_cpu_to_le16(RFCOMM_PSM);
+       addr.l2_psm    = cpu_to_le16(RFCOMM_PSM);
        addr.l2_cid    = 0;
        addr.l2_bdaddr_type = BDADDR_BREDR;
        err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
@@ -2085,7 +2134,8 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
                                set_bit(RFCOMM_SEC_PENDING, &d->flags);
                                rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
                                continue;
-                       } else if (d->sec_level == BT_SECURITY_HIGH) {
+                       } else if (d->sec_level == BT_SECURITY_HIGH ||
+                                  d->sec_level == BT_SECURITY_FIPS) {
                                set_bit(RFCOMM_ENC_DROP, &d->flags);
                                continue;
                        }
index 3c2d3e4aa2f58a271bea6632451edd21b92668a9..eabd25ab5ad96a14b07624ef6fdfa48715384f45 100644 (file)
@@ -105,13 +105,18 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
 }
 
 /* ---- Socket functions ---- */
-static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
+static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src)
 {
        struct sock *sk = NULL;
 
        sk_for_each(sk, &rfcomm_sk_list.head) {
-               if (rfcomm_pi(sk)->channel == channel &&
-                               !bacmp(&rfcomm_pi(sk)->src, src))
+               if (rfcomm_pi(sk)->channel != channel)
+                       continue;
+
+               if (bacmp(&rfcomm_pi(sk)->src, src))
+                       continue;
+
+               if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN)
                        break;
        }
 
@@ -331,6 +336,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
 {
        struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
        struct sock *sk = sock->sk;
+       int chan = sa->rc_channel;
        int err = 0;
 
        BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
@@ -352,12 +358,12 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
 
        write_lock(&rfcomm_sk_list.lock);
 
-       if (sa->rc_channel && __rfcomm_get_sock_by_addr(sa->rc_channel, &sa->rc_bdaddr)) {
+       if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) {
                err = -EADDRINUSE;
        } else {
                /* Save source address */
                bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
-               rfcomm_pi(sk)->channel = sa->rc_channel;
+               rfcomm_pi(sk)->channel = chan;
                sk->sk_state = BT_BOUND;
        }
 
@@ -439,7 +445,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
                write_lock(&rfcomm_sk_list.lock);
 
                for (channel = 1; channel < 31; channel++)
-                       if (!__rfcomm_get_sock_by_addr(channel, src)) {
+                       if (!__rfcomm_get_listen_sock_by_addr(channel, src)) {
                                rfcomm_pi(sk)->channel = channel;
                                err = 0;
                                break;
@@ -528,6 +534,10 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       if (peer && sk->sk_state != BT_CONNECTED &&
+           sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
+               return -ENOTCONN;
+
        memset(sa, 0, sizeof(*sa));
        sa->rc_family  = AF_BLUETOOTH;
        sa->rc_channel = rfcomm_pi(sk)->channel;
@@ -648,6 +658,11 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
                        break;
                }
 
+               if (opt & RFCOMM_LM_FIPS) {
+                       err = -EINVAL;
+                       break;
+               }
+
                if (opt & RFCOMM_LM_AUTH)
                        rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
                if (opt & RFCOMM_LM_ENCRYPT)
@@ -762,7 +777,11 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
                        break;
                case BT_SECURITY_HIGH:
                        opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
-                                                       RFCOMM_LM_SECURE;
+                             RFCOMM_LM_SECURE;
+                       break;
+               case BT_SECURITY_FIPS:
+                       opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
+                             RFCOMM_LM_SECURE | RFCOMM_LM_FIPS;
                        break;
                default:
                        opt = 0;
@@ -774,6 +793,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
 
                if (put_user(opt, (u32 __user *) optval))
                        err = -EFAULT;
+
                break;
 
        case RFCOMM_CONNINFO:
index f9c0980abeeac9eaf1d94d70f3e001fd6e1f0870..403ec09f480a2a72983b0b1b9db0222fd6e86742 100644 (file)
@@ -40,6 +40,7 @@
 #define RFCOMM_TTY_MAJOR 216           /* device node major id of the usb/bluetooth.c driver */
 #define RFCOMM_TTY_MINOR 0
 
+static DEFINE_MUTEX(rfcomm_ioctl_mutex);
 static struct tty_driver *rfcomm_tty_driver;
 
 struct rfcomm_dev {
@@ -51,6 +52,8 @@ struct rfcomm_dev {
        unsigned long           flags;
        int                     err;
 
+       unsigned long           status;         /* don't export to userspace */
+
        bdaddr_t                src;
        bdaddr_t                dst;
        u8                      channel;
@@ -58,7 +61,6 @@ struct rfcomm_dev {
        uint                    modem_status;
 
        struct rfcomm_dlc       *dlc;
-       wait_queue_head_t       conn_wait;
 
        struct device           *tty_dev;
 
@@ -83,10 +85,6 @@ static void rfcomm_dev_destruct(struct tty_port *port)
 
        BT_DBG("dev %p dlc %p", dev, dlc);
 
-       spin_lock(&rfcomm_dev_lock);
-       list_del(&dev->list);
-       spin_unlock(&rfcomm_dev_lock);
-
        rfcomm_dlc_lock(dlc);
        /* Detach DLC if it's owned by this dev */
        if (dlc->owner == dev)
@@ -95,7 +93,12 @@ static void rfcomm_dev_destruct(struct tty_port *port)
 
        rfcomm_dlc_put(dlc);
 
-       tty_unregister_device(rfcomm_tty_driver, dev->id);
+       if (dev->tty_dev)
+               tty_unregister_device(rfcomm_tty_driver, dev->id);
+
+       spin_lock(&rfcomm_dev_lock);
+       list_del(&dev->list);
+       spin_unlock(&rfcomm_dev_lock);
 
        kfree(dev);
 
@@ -104,60 +107,24 @@ static void rfcomm_dev_destruct(struct tty_port *port)
        module_put(THIS_MODULE);
 }
 
-static struct device *rfcomm_get_device(struct rfcomm_dev *dev)
-{
-       struct hci_dev *hdev;
-       struct hci_conn *conn;
-
-       hdev = hci_get_route(&dev->dst, &dev->src);
-       if (!hdev)
-               return NULL;
-
-       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
-
-       hci_dev_put(hdev);
-
-       return conn ? &conn->dev : NULL;
-}
-
 /* device-specific initialization: open the dlc */
 static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
 {
        struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
-       DEFINE_WAIT(wait);
        int err;
 
        err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
        if (err)
-               return err;
-
-       while (1) {
-               prepare_to_wait(&dev->conn_wait, &wait, TASK_INTERRUPTIBLE);
-
-               if (dev->dlc->state == BT_CLOSED) {
-                       err = -dev->err;
-                       break;
-               }
-
-               if (dev->dlc->state == BT_CONNECTED)
-                       break;
-
-               if (signal_pending(current)) {
-                       err = -ERESTARTSYS;
-                       break;
-               }
-
-               tty_unlock(tty);
-               schedule();
-               tty_lock(tty);
-       }
-       finish_wait(&dev->conn_wait, &wait);
+               set_bit(TTY_IO_ERROR, &tty->flags);
+       return err;
+}
 
-       if (!err)
-               device_move(dev->tty_dev, rfcomm_get_device(dev),
-                           DPM_ORDER_DEV_AFTER_PARENT);
+/* we block the open until the dlc->state becomes BT_CONNECTED */
+static int rfcomm_dev_carrier_raised(struct tty_port *port)
+{
+       struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
 
-       return err;
+       return (dev->dlc->state == BT_CONNECTED);
 }
 
 /* device-specific cleanup: close the dlc */
@@ -176,9 +143,10 @@ static const struct tty_port_operations rfcomm_port_ops = {
        .destruct = rfcomm_dev_destruct,
        .activate = rfcomm_dev_activate,
        .shutdown = rfcomm_dev_shutdown,
+       .carrier_raised = rfcomm_dev_carrier_raised,
 };
 
-static struct rfcomm_dev *__rfcomm_dev_get(int id)
+static struct rfcomm_dev *__rfcomm_dev_lookup(int id)
 {
        struct rfcomm_dev *dev;
 
@@ -195,20 +163,41 @@ static struct rfcomm_dev *rfcomm_dev_get(int id)
 
        spin_lock(&rfcomm_dev_lock);
 
-       dev = __rfcomm_dev_get(id);
+       dev = __rfcomm_dev_lookup(id);
 
-       if (dev) {
-               if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
-                       dev = NULL;
-               else
-                       tty_port_get(&dev->port);
-       }
+       if (dev && !tty_port_get(&dev->port))
+               dev = NULL;
 
        spin_unlock(&rfcomm_dev_lock);
 
        return dev;
 }
 
+static void rfcomm_reparent_device(struct rfcomm_dev *dev)
+{
+       struct hci_dev *hdev;
+       struct hci_conn *conn;
+
+       hdev = hci_get_route(&dev->dst, &dev->src);
+       if (!hdev)
+               return;
+
+       /* The lookup results are unsafe to access without the
+        * hci device lock (FIXME: why is this not documented?)
+        */
+       hci_dev_lock(hdev);
+       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst);
+
+       /* Just because the acl link is in the hash table is no
+        * guarantee the sysfs device has been added ...
+        */
+       if (conn && device_is_registered(&conn->dev))
+               device_move(dev->tty_dev, &conn->dev, DPM_ORDER_DEV_AFTER_PARENT);
+
+       hci_dev_unlock(hdev);
+       hci_dev_put(hdev);
+}
+
 static ssize_t show_address(struct device *tty_dev, struct device_attribute *attr, char *buf)
 {
        struct rfcomm_dev *dev = dev_get_drvdata(tty_dev);
@@ -224,17 +213,16 @@ static ssize_t show_channel(struct device *tty_dev, struct device_attribute *att
 static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
 static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
 
-static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
+static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req,
+                                          struct rfcomm_dlc *dlc)
 {
        struct rfcomm_dev *dev, *entry;
        struct list_head *head = &rfcomm_dev_list;
        int err = 0;
 
-       BT_DBG("id %d channel %d", req->dev_id, req->channel);
-
        dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL);
        if (!dev)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        spin_lock(&rfcomm_dev_lock);
 
@@ -282,7 +270,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
 
        tty_port_init(&dev->port);
        dev->port.ops = &rfcomm_port_ops;
-       init_waitqueue_head(&dev->conn_wait);
 
        skb_queue_head_init(&dev->pending);
 
@@ -318,22 +305,37 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
           holds reference to this module. */
        __module_get(THIS_MODULE);
 
+       spin_unlock(&rfcomm_dev_lock);
+       return dev;
+
 out:
        spin_unlock(&rfcomm_dev_lock);
+       kfree(dev);
+       return ERR_PTR(err);
+}
+
+static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
+{
+       struct rfcomm_dev *dev;
+       struct device *tty;
+
+       BT_DBG("id %d channel %d", req->dev_id, req->channel);
 
-       if (err < 0)
-               goto free;
+       dev = __rfcomm_dev_add(req, dlc);
+       if (IS_ERR(dev)) {
+               rfcomm_dlc_put(dlc);
+               return PTR_ERR(dev);
+       }
 
-       dev->tty_dev = tty_port_register_device(&dev->port, rfcomm_tty_driver,
+       tty = tty_port_register_device(&dev->port, rfcomm_tty_driver,
                        dev->id, NULL);
-       if (IS_ERR(dev->tty_dev)) {
-               err = PTR_ERR(dev->tty_dev);
-               spin_lock(&rfcomm_dev_lock);
-               list_del(&dev->list);
-               spin_unlock(&rfcomm_dev_lock);
-               goto free;
+       if (IS_ERR(tty)) {
+               tty_port_put(&dev->port);
+               return PTR_ERR(tty);
        }
 
+       dev->tty_dev = tty;
+       rfcomm_reparent_device(dev);
        dev_set_drvdata(dev->tty_dev, dev);
 
        if (device_create_file(dev->tty_dev, &dev_attr_address) < 0)
@@ -343,24 +345,23 @@ out:
                BT_ERR("Failed to create channel attribute");
 
        return dev->id;
-
-free:
-       kfree(dev);
-       return err;
 }
 
 /* ---- Send buffer ---- */
-static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
+static inline unsigned int rfcomm_room(struct rfcomm_dev *dev)
 {
-       /* We can't let it be zero, because we don't get a callback
-          when tx_credits becomes nonzero, hence we'd never wake up */
-       return dlc->mtu * (dlc->tx_credits?:1);
+       struct rfcomm_dlc *dlc = dev->dlc;
+
+       /* Limit the outstanding number of packets not yet sent to 40 */
+       int pending = 40 - atomic_read(&dev->wmem_alloc);
+
+       return max(0, pending) * dlc->mtu;
 }
 
 static void rfcomm_wfree(struct sk_buff *skb)
 {
        struct rfcomm_dev *dev = (void *) skb->sk;
-       atomic_sub(skb->truesize, &dev->wmem_alloc);
+       atomic_dec(&dev->wmem_alloc);
        if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
                tty_port_tty_wakeup(&dev->port);
        tty_port_put(&dev->port);
@@ -369,28 +370,24 @@ static void rfcomm_wfree(struct sk_buff *skb)
 static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
 {
        tty_port_get(&dev->port);
-       atomic_add(skb->truesize, &dev->wmem_alloc);
+       atomic_inc(&dev->wmem_alloc);
        skb->sk = (void *) dev;
        skb->destructor = rfcomm_wfree;
 }
 
 static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority)
 {
-       if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) {
-               struct sk_buff *skb = alloc_skb(size, priority);
-               if (skb) {
-                       rfcomm_set_owner_w(skb, dev);
-                       return skb;
-               }
-       }
-       return NULL;
+       struct sk_buff *skb = alloc_skb(size, priority);
+       if (skb)
+               rfcomm_set_owner_w(skb, dev);
+       return skb;
 }
 
 /* ---- Device IOCTLs ---- */
 
 #define NOCAP_FLAGS ((1 << RFCOMM_REUSE_DLC) | (1 << RFCOMM_RELEASE_ONHUP))
 
-static int rfcomm_create_dev(struct sock *sk, void __user *arg)
+static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
 {
        struct rfcomm_dev_req req;
        struct rfcomm_dlc *dlc;
@@ -412,16 +409,22 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)
                dlc = rfcomm_pi(sk)->dlc;
                rfcomm_dlc_hold(dlc);
        } else {
+               /* Validate the channel is unused */
+               dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
+               if (IS_ERR(dlc))
+                       return PTR_ERR(dlc);
+               else if (dlc) {
+                       rfcomm_dlc_put(dlc);
+                       return -EBUSY;
+               }
                dlc = rfcomm_dlc_alloc(GFP_KERNEL);
                if (!dlc)
                        return -ENOMEM;
        }
 
        id = rfcomm_dev_add(&req, dlc);
-       if (id < 0) {
-               rfcomm_dlc_put(dlc);
+       if (id < 0)
                return id;
-       }
 
        if (req.flags & (1 << RFCOMM_REUSE_DLC)) {
                /* DLC is now used by device.
@@ -432,7 +435,7 @@ static int rfcomm_create_dev(struct sock *sk, void __user *arg)
        return id;
 }
 
-static int rfcomm_release_dev(void __user *arg)
+static int __rfcomm_release_dev(void __user *arg)
 {
        struct rfcomm_dev_req req;
        struct rfcomm_dev *dev;
@@ -452,6 +455,12 @@ static int rfcomm_release_dev(void __user *arg)
                return -EPERM;
        }
 
+       /* only release once */
+       if (test_and_set_bit(RFCOMM_DEV_RELEASED, &dev->status)) {
+               tty_port_put(&dev->port);
+               return -EALREADY;
+       }
+
        if (req.flags & (1 << RFCOMM_HANGUP_NOW))
                rfcomm_dlc_close(dev->dlc, 0);
 
@@ -462,14 +471,35 @@ static int rfcomm_release_dev(void __user *arg)
                tty_kref_put(tty);
        }
 
-       if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
-           !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
+       if (!test_bit(RFCOMM_TTY_OWNED, &dev->status))
                tty_port_put(&dev->port);
 
        tty_port_put(&dev->port);
        return 0;
 }
 
+static int rfcomm_create_dev(struct sock *sk, void __user *arg)
+{
+       int ret;
+
+       mutex_lock(&rfcomm_ioctl_mutex);
+       ret = __rfcomm_create_dev(sk, arg);
+       mutex_unlock(&rfcomm_ioctl_mutex);
+
+       return ret;
+}
+
+static int rfcomm_release_dev(void __user *arg)
+{
+       int ret;
+
+       mutex_lock(&rfcomm_ioctl_mutex);
+       ret = __rfcomm_release_dev(arg);
+       mutex_unlock(&rfcomm_ioctl_mutex);
+
+       return ret;
+}
+
 static int rfcomm_get_dev_list(void __user *arg)
 {
        struct rfcomm_dev *dev;
@@ -497,7 +527,7 @@ static int rfcomm_get_dev_list(void __user *arg)
        spin_lock(&rfcomm_dev_lock);
 
        list_for_each_entry(dev, &rfcomm_dev_list, list) {
-               if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags))
+               if (!tty_port_get(&dev->port))
                        continue;
                (di + n)->id      = dev->id;
                (di + n)->flags   = dev->flags;
@@ -505,6 +535,7 @@ static int rfcomm_get_dev_list(void __user *arg)
                (di + n)->channel = dev->channel;
                bacpy(&(di + n)->src, &dev->src);
                bacpy(&(di + n)->dst, &dev->dst);
+               tty_port_put(&dev->port);
                if (++n >= dev_num)
                        break;
        }
@@ -601,9 +632,11 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
        BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
 
        dev->err = err;
-       wake_up_interruptible(&dev->conn_wait);
+       if (dlc->state == BT_CONNECTED) {
+               rfcomm_reparent_device(dev);
 
-       if (dlc->state == BT_CLOSED)
+               wake_up_interruptible(&dev->port.open_wait);
+       } else if (dlc->state == BT_CLOSED)
                tty_port_tty_hangup(&dev->port, false);
 }
 
@@ -703,8 +736,10 @@ static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
         * when the last process closes the tty. The behaviour is expected by
         * userspace.
         */
-       if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
+       if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
+               set_bit(RFCOMM_TTY_OWNED, &dev->status);
                tty_port_put(&dev->port);
+       }
 
        return 0;
 }
@@ -750,7 +785,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
        struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
        struct rfcomm_dlc *dlc = dev->dlc;
        struct sk_buff *skb;
-       int err = 0, sent = 0, size;
+       int sent = 0, size;
 
        BT_DBG("tty %p count %d", tty, count);
 
@@ -758,7 +793,6 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
                size = min_t(uint, count, dlc->mtu);
 
                skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC);
-
                if (!skb)
                        break;
 
@@ -766,32 +800,24 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
 
                memcpy(skb_put(skb, size), buf + sent, size);
 
-               err = rfcomm_dlc_send(dlc, skb);
-               if (err < 0) {
-                       kfree_skb(skb);
-                       break;
-               }
+               rfcomm_dlc_send_noerror(dlc, skb);
 
                sent  += size;
                count -= size;
        }
 
-       return sent ? sent : err;
+       return sent;
 }
 
 static int rfcomm_tty_write_room(struct tty_struct *tty)
 {
        struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
-       int room;
+       int room = 0;
 
-       BT_DBG("tty %p", tty);
-
-       if (!dev || !dev->dlc)
-               return 0;
+       if (dev && dev->dlc)
+               room = rfcomm_room(dev);
 
-       room = rfcomm_room(dev->dlc) - atomic_read(&dev->wmem_alloc);
-       if (room < 0)
-               room = 0;
+       BT_DBG("tty %p room %d", tty, room);
 
        return room;
 }
@@ -1125,7 +1151,7 @@ int __init rfcomm_init_ttys(void)
        rfcomm_tty_driver->subtype      = SERIAL_TYPE_NORMAL;
        rfcomm_tty_driver->flags        = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
        rfcomm_tty_driver->init_termios = tty_std_termios;
-       rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+       rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
        rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
        tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
 
index 24fa3964b3c84da299287d6c913771c579d6a9a5..ab1e6fcca4c5b07ea6ba1020430aed6a707453d0 100644 (file)
@@ -676,20 +676,20 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
                bacpy(&cp.bdaddr, &conn->dst);
                cp.pkt_type = cpu_to_le16(conn->pkt_type);
 
-               cp.tx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
-               cp.rx_bandwidth   = __constant_cpu_to_le32(0x00001f40);
+               cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
+               cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
                cp.content_format = cpu_to_le16(setting);
 
                switch (setting & SCO_AIRMODE_MASK) {
                case SCO_AIRMODE_TRANSP:
                        if (conn->pkt_type & ESCO_2EV3)
-                               cp.max_latency = __constant_cpu_to_le16(0x0008);
+                               cp.max_latency = cpu_to_le16(0x0008);
                        else
-                               cp.max_latency = __constant_cpu_to_le16(0x000D);
+                               cp.max_latency = cpu_to_le16(0x000D);
                        cp.retrans_effort = 0x02;
                        break;
                case SCO_AIRMODE_CVSD:
-                       cp.max_latency = __constant_cpu_to_le16(0xffff);
+                       cp.max_latency = cpu_to_le16(0xffff);
                        cp.retrans_effort = 0xff;
                        break;
                }
index 45007362683b4e4cdfcf8131d79ff38aa95f5733..dfb4e1161c10fbb62b6ac43220949992a5075dfc 100644 (file)
 
 #define AUTH_REQ_MASK   0x07
 
-static inline void swap128(u8 src[16], u8 dst[16])
+static inline void swap128(const u8 src[16], u8 dst[16])
 {
        int i;
        for (i = 0; i < 16; i++)
                dst[15 - i] = src[i];
 }
 
-static inline void swap56(u8 src[7], u8 dst[7])
+static inline void swap56(const u8 src[7], u8 dst[7])
 {
        int i;
        for (i = 0; i < 7; i++)
@@ -53,6 +53,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
 {
        struct blkcipher_desc desc;
        struct scatterlist sg;
+       uint8_t tmp[16], data[16];
        int err;
 
        if (tfm == NULL) {
@@ -63,21 +64,89 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        desc.tfm = tfm;
        desc.flags = 0;
 
-       err = crypto_blkcipher_setkey(tfm, k, 16);
+       /* The most significant octet of key corresponds to k[0] */
+       swap128(k, tmp);
+
+       err = crypto_blkcipher_setkey(tfm, tmp, 16);
        if (err) {
                BT_ERR("cipher setkey failed: %d", err);
                return err;
        }
 
-       sg_init_one(&sg, r, 16);
+       /* Most significant octet of plaintextData corresponds to data[0] */
+       swap128(r, data);
+
+       sg_init_one(&sg, data, 16);
 
        err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
        if (err)
                BT_ERR("Encrypt data error %d", err);
 
+       /* Most significant octet of encryptedData corresponds to data[0] */
+       swap128(data, r);
+
        return err;
 }
 
+static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3])
+{
+       u8 _res[16];
+       int err;
+
+       /* r' = padding || r */
+       memcpy(_res, r, 3);
+       memset(_res + 3, 0, 13);
+
+       err = smp_e(tfm, irk, _res);
+       if (err) {
+               BT_ERR("Encrypt error");
+               return err;
+       }
+
+       /* The output of the random address function ah is:
+        *      ah(h, r) = e(k, r') mod 2^24
+        * The output of the security function e is then truncated to 24 bits
+        * by taking the least significant 24 bits of the output of e as the
+        * result of ah.
+        */
+       memcpy(res, _res, 3);
+
+       return 0;
+}
+
+bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16],
+                    bdaddr_t *bdaddr)
+{
+       u8 hash[3];
+       int err;
+
+       BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
+
+       err = smp_ah(tfm, irk, &bdaddr->b[3], hash);
+       if (err)
+               return false;
+
+       return !memcmp(bdaddr->b, hash, 3);
+}
+
+int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa)
+{
+       int err;
+
+       get_random_bytes(&rpa->b[3], 3);
+
+       rpa->b[5] &= 0x3f;      /* Clear two most significant bits */
+       rpa->b[5] |= 0x40;      /* Set second most significant bit */
+
+       err = smp_ah(tfm, irk, &rpa->b[3], rpa->b);
+       if (err < 0)
+               return err;
+
+       BT_DBG("RPA %pMR", rpa);
+
+       return 0;
+}
+
 static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
                  u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
                  u8 _rat, bdaddr_t *ra, u8 res[16])
@@ -88,16 +157,15 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
        memset(p1, 0, 16);
 
        /* p1 = pres || preq || _rat || _iat */
-       swap56(pres, p1);
-       swap56(preq, p1 + 7);
-       p1[14] = _rat;
-       p1[15] = _iat;
-
-       memset(p2, 0, 16);
+       p1[0] = _iat;
+       p1[1] = _rat;
+       memcpy(p1 + 2, preq, 7);
+       memcpy(p1 + 9, pres, 7);
 
        /* p2 = padding || ia || ra */
-       baswap((bdaddr_t *) (p2 + 4), ia);
-       baswap((bdaddr_t *) (p2 + 10), ra);
+       memcpy(p2, ra, 6);
+       memcpy(p2 + 6, ia, 6);
+       memset(p2 + 12, 0, 4);
 
        /* res = r XOR p1 */
        u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
@@ -126,8 +194,8 @@ static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16],
        int err;
 
        /* Just least significant octets from r1 and r2 are considered */
-       memcpy(_r, r1 + 8, 8);
-       memcpy(_r + 8, r2 + 8, 8);
+       memcpy(_r, r2, 8);
+       memcpy(_r + 8, r1, 8);
 
        err = smp_e(tfm, k, _r);
        if (err)
@@ -154,7 +222,7 @@ static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
 
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->len = cpu_to_le16(sizeof(code) + dlen);
-       lh->cid = __constant_cpu_to_le16(L2CAP_CID_SMP);
+       lh->cid = cpu_to_le16(L2CAP_CID_SMP);
 
        memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
 
@@ -203,31 +271,45 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
                              struct smp_cmd_pairing *req,
                              struct smp_cmd_pairing *rsp, __u8 authreq)
 {
-       u8 dist_keys = 0;
+       struct smp_chan *smp = conn->smp_chan;
+       struct hci_conn *hcon = conn->hcon;
+       struct hci_dev *hdev = hcon->hdev;
+       u8 local_dist = 0, remote_dist = 0;
 
        if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->dev_flags)) {
-               dist_keys = SMP_DIST_ENC_KEY;
+               local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
+               remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN;
                authreq |= SMP_AUTH_BONDING;
        } else {
                authreq &= ~SMP_AUTH_BONDING;
        }
 
+       if (test_bit(HCI_RPA_RESOLVING, &hdev->dev_flags))
+               remote_dist |= SMP_DIST_ID_KEY;
+
+       if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
+               local_dist |= SMP_DIST_ID_KEY;
+
        if (rsp == NULL) {
                req->io_capability = conn->hcon->io_capability;
                req->oob_flag = SMP_OOB_NOT_PRESENT;
                req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
-               req->init_key_dist = 0;
-               req->resp_key_dist = dist_keys;
+               req->init_key_dist = local_dist;
+               req->resp_key_dist = remote_dist;
                req->auth_req = (authreq & AUTH_REQ_MASK);
+
+               smp->remote_key_dist = remote_dist;
                return;
        }
 
        rsp->io_capability = conn->hcon->io_capability;
        rsp->oob_flag = SMP_OOB_NOT_PRESENT;
        rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
-       rsp->init_key_dist = 0;
-       rsp->resp_key_dist = req->resp_key_dist & dist_keys;
+       rsp->init_key_dist = req->init_key_dist & remote_dist;
+       rsp->resp_key_dist = req->resp_key_dist & local_dist;
        rsp->auth_req = (authreq & AUTH_REQ_MASK);
+
+       smp->remote_key_dist = rsp->init_key_dist;
 }
 
 static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
@@ -305,6 +387,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
        if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
                method = JUST_WORKS;
 
+       /* Don't confirm locally initiated pairing attempts */
+       if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR,
+                                          &smp->smp_flags))
+               method = JUST_WORKS;
+
        /* If Just Works, Continue with Zero TK */
        if (method == JUST_WORKS) {
                set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
@@ -325,16 +412,14 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
                        method = REQ_PASSKEY;
        }
 
-       /* Generate random passkey. Not valid until confirmed. */
+       /* Generate random passkey. */
        if (method == CFM_PASSKEY) {
-               u8 key[16];
-
-               memset(key, 0, sizeof(key));
+               memset(smp->tk, 0, sizeof(smp->tk));
                get_random_bytes(&passkey, sizeof(passkey));
                passkey %= 1000000;
-               put_unaligned_le32(passkey, key);
-               swap128(key, smp->tk);
+               put_unaligned_le32(passkey, smp->tk);
                BT_DBG("PassKey: %d", passkey);
+               set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
        }
 
        hci_dev_lock(hcon->hdev);
@@ -342,10 +427,14 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
        if (method == REQ_PASSKEY)
                ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst,
                                                hcon->type, hcon->dst_type);
-       else
+       else if (method == JUST_CFM)
                ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst,
                                                hcon->type, hcon->dst_type,
-                                               cpu_to_le32(passkey), 0);
+                                               passkey, 1);
+       else
+               ret = mgmt_user_passkey_notify(hcon->hdev, &hcon->dst,
+                                               hcon->type, hcon->dst_type,
+                                               passkey, 0);
 
        hci_dev_unlock(hcon->hdev);
 
@@ -356,29 +445,24 @@ static void confirm_work(struct work_struct *work)
 {
        struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
        struct l2cap_conn *conn = smp->conn;
-       struct crypto_blkcipher *tfm;
+       struct hci_dev *hdev = conn->hcon->hdev;
+       struct crypto_blkcipher *tfm = hdev->tfm_aes;
        struct smp_cmd_pairing_confirm cp;
        int ret;
-       u8 res[16], reason;
+       u8 reason;
 
        BT_DBG("conn %p", conn);
 
-       tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(tfm)) {
-               reason = SMP_UNSPECIFIED;
-               goto error;
-       }
+       /* Prevent mutual access to hdev->tfm_aes */
+       hci_dev_lock(hdev);
 
-       smp->tfm = tfm;
+       ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+                    conn->hcon->init_addr_type, &conn->hcon->init_addr,
+                    conn->hcon->resp_addr_type, &conn->hcon->resp_addr,
+                    cp.confirm_val);
+
+       hci_dev_unlock(hdev);
 
-       if (conn->hcon->out)
-               ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
-                            conn->hcon->src_type, &conn->hcon->src,
-                            conn->hcon->dst_type, &conn->hcon->dst, res);
-       else
-               ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
-                            conn->hcon->dst_type, &conn->hcon->dst,
-                            conn->hcon->src_type, &conn->hcon->src, res);
        if (ret) {
                reason = SMP_UNSPECIFIED;
                goto error;
@@ -386,7 +470,6 @@ static void confirm_work(struct work_struct *work)
 
        clear_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
 
-       swap128(res, cp.confirm_val);
        smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
 
        return;
@@ -400,8 +483,9 @@ static void random_work(struct work_struct *work)
        struct smp_chan *smp = container_of(work, struct smp_chan, random);
        struct l2cap_conn *conn = smp->conn;
        struct hci_conn *hcon = conn->hcon;
-       struct crypto_blkcipher *tfm = smp->tfm;
-       u8 reason, confirm[16], res[16], key[16];
+       struct hci_dev *hdev = hcon->hdev;
+       struct crypto_blkcipher *tfm = hdev->tfm_aes;
+       u8 reason, confirm[16];
        int ret;
 
        if (IS_ERR_OR_NULL(tfm)) {
@@ -411,21 +495,20 @@ static void random_work(struct work_struct *work)
 
        BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
 
-       if (hcon->out)
-               ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
-                            hcon->src_type, &hcon->src,
-                            hcon->dst_type, &hcon->dst, res);
-       else
-               ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
-                            hcon->dst_type, &hcon->dst,
-                            hcon->src_type, &hcon->src, res);
+       /* Prevent mutual access to hdev->tfm_aes */
+       hci_dev_lock(hdev);
+
+       ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+                    hcon->init_addr_type, &hcon->init_addr,
+                    hcon->resp_addr_type, &hcon->resp_addr, confirm);
+
+       hci_dev_unlock(hdev);
+
        if (ret) {
                reason = SMP_UNSPECIFIED;
                goto error;
        }
 
-       swap128(res, confirm);
-
        if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
                BT_ERR("Pairing failed (confirmation values mismatch)");
                reason = SMP_CONFIRM_FAILED;
@@ -433,14 +516,11 @@ static void random_work(struct work_struct *work)
        }
 
        if (hcon->out) {
-               u8 stk[16], rand[8];
-               __le16 ediv;
-
-               memset(rand, 0, sizeof(rand));
-               ediv = 0;
+               u8 stk[16];
+               __le64 rand = 0;
+               __le16 ediv = 0;
 
-               smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
-               swap128(key, stk);
+               smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, stk);
 
                memset(stk + smp->enc_key_size, 0,
                       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
@@ -453,23 +533,20 @@ static void random_work(struct work_struct *work)
                hci_le_start_enc(hcon, ediv, rand, stk);
                hcon->enc_key_size = smp->enc_key_size;
        } else {
-               u8 stk[16], r[16], rand[8];
-               __le16 ediv;
-
-               memset(rand, 0, sizeof(rand));
-               ediv = 0;
+               u8 stk[16];
+               __le64 rand = 0;
+               __le16 ediv = 0;
 
-               swap128(smp->prnd, r);
-               smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
+               smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+                            smp->prnd);
 
-               smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
-               swap128(key, stk);
+               smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, stk);
 
                memset(stk + smp->enc_key_size, 0,
                       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
 
                hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
-                           HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
+                           HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size,
                            ediv, rand);
        }
 
@@ -502,11 +579,33 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
 void smp_chan_destroy(struct l2cap_conn *conn)
 {
        struct smp_chan *smp = conn->smp_chan;
+       bool complete;
 
        BUG_ON(!smp);
 
-       if (smp->tfm)
-               crypto_free_blkcipher(smp->tfm);
+       complete = test_bit(SMP_FLAG_COMPLETE, &smp->smp_flags);
+       mgmt_smp_complete(conn->hcon, complete);
+
+       kfree(smp->csrk);
+       kfree(smp->slave_csrk);
+
+       /* If pairing failed clean up any keys we might have */
+       if (!complete) {
+               if (smp->ltk) {
+                       list_del(&smp->ltk->list);
+                       kfree(smp->ltk);
+               }
+
+               if (smp->slave_ltk) {
+                       list_del(&smp->slave_ltk->list);
+                       kfree(smp->slave_ltk);
+               }
+
+               if (smp->remote_irk) {
+                       list_del(&smp->remote_irk->list);
+                       kfree(smp->remote_irk);
+               }
+       }
 
        kfree(smp);
        conn->smp_chan = NULL;
@@ -519,7 +618,6 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
        struct l2cap_conn *conn = hcon->smp_conn;
        struct smp_chan *smp;
        u32 value;
-       u8 key[16];
 
        BT_DBG("");
 
@@ -531,10 +629,9 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
        switch (mgmt_op) {
        case MGMT_OP_USER_PASSKEY_REPLY:
                value = le32_to_cpu(passkey);
-               memset(key, 0, sizeof(key));
+               memset(smp->tk, 0, sizeof(smp->tk));
                BT_DBG("PassKey: %d", value);
-               put_unaligned_le32(value, key);
-               swap128(key, smp->tk);
+               put_unaligned_le32(value, smp->tk);
                /* Fall Through */
        case MGMT_OP_USER_CONFIRM_REPLY:
                set_bit(SMP_FLAG_TK_VALID, &smp->smp_flags);
@@ -565,6 +662,9 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        BT_DBG("conn %p", conn);
 
+       if (skb->len < sizeof(*req))
+               return SMP_UNSPECIFIED;
+
        if (conn->hcon->link_mode & HCI_LM_MASTER)
                return SMP_CMD_NOTSUPP;
 
@@ -604,6 +704,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
        if (ret)
                return SMP_UNSPECIFIED;
 
+       clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags);
+
        return 0;
 }
 
@@ -617,6 +719,9 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
 
        BT_DBG("conn %p", conn);
 
+       if (skb->len < sizeof(*rsp))
+               return SMP_UNSPECIFIED;
+
        if (!(conn->hcon->link_mode & HCI_LM_MASTER))
                return SMP_CMD_NOTSUPP;
 
@@ -633,6 +738,11 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
        smp->prsp[0] = SMP_CMD_PAIRING_RSP;
        memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
 
+       /* Update remote key distribution in case the remote cleared
+        * some bits that we had enabled in our request.
+        */
+       smp->remote_key_dist &= rsp->resp_key_dist;
+
        if ((req->auth_req & SMP_AUTH_BONDING) &&
            (rsp->auth_req & SMP_AUTH_BONDING))
                auth = SMP_AUTH_BONDING;
@@ -646,10 +756,8 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
        set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
 
        /* Can't compose response until we have been confirmed */
-       if (!test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
-               return 0;
-
-       queue_work(hdev->workqueue, &smp->confirm);
+       if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
+               queue_work(hdev->workqueue, &smp->confirm);
 
        return 0;
 }
@@ -661,20 +769,19 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
 
        BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
 
+       if (skb->len < sizeof(smp->pcnf))
+               return SMP_UNSPECIFIED;
+
        memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
        skb_pull(skb, sizeof(smp->pcnf));
 
-       if (conn->hcon->out) {
-               u8 random[16];
-
-               swap128(smp->prnd, random);
-               smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
-                            random);
-       } else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
+       if (conn->hcon->out)
+               smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+                            smp->prnd);
+       else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags))
                queue_work(hdev->workqueue, &smp->confirm);
-       } else {
+       else
                set_bit(SMP_FLAG_CFM_PENDING, &smp->smp_flags);
-       }
 
        return 0;
 }
@@ -686,7 +793,10 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
 
        BT_DBG("conn %p", conn);
 
-       swap128(skb->data, smp->rrnd);
+       if (skb->len < sizeof(smp->rrnd))
+               return SMP_UNSPECIFIED;
+
+       memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd));
        skb_pull(skb, sizeof(smp->rrnd));
 
        queue_work(hdev->workqueue, &smp->random);
@@ -699,7 +809,8 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
        struct smp_ltk *key;
        struct hci_conn *hcon = conn->hcon;
 
-       key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type);
+       key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type,
+                                  hcon->out);
        if (!key)
                return 0;
 
@@ -724,6 +835,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        BT_DBG("conn %p", conn);
 
+       if (skb->len < sizeof(*rp))
+               return SMP_UNSPECIFIED;
+
        if (!(conn->hcon->link_mode & HCI_LM_MASTER))
                return SMP_CMD_NOTSUPP;
 
@@ -747,6 +861,8 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 
        smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
 
+       clear_bit(SMP_FLAG_INITIATOR, &smp->smp_flags);
+
        return 0;
 }
 
@@ -764,11 +880,15 @@ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level)
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 {
        struct l2cap_conn *conn = hcon->l2cap_data;
-       struct smp_chan *smp = conn->smp_chan;
+       struct smp_chan *smp;
        __u8 authreq;
 
        BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
 
+       /* This may be NULL if there's an unexpected disconnection */
+       if (!conn)
+               return 1;
+
        if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
                return 1;
 
@@ -788,6 +908,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 
        authreq = seclevel_to_authreq(sec_level);
 
+       /* hcon->auth_type is set by pair_device in mgmt.c. If the MITM
+        * flag is set we should also set it for the SMP request.
+        */
+       if ((hcon->auth_type & 0x01))
+               authreq |= SMP_AUTH_MITM;
+
        if (hcon->link_mode & HCI_LM_MASTER) {
                struct smp_cmd_pairing cp;
 
@@ -802,6 +928,8 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
                smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
        }
 
+       set_bit(SMP_FLAG_INITIATOR, &smp->smp_flags);
+
 done:
        hcon->pending_sec_level = sec_level;
 
@@ -813,6 +941,15 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
        struct smp_cmd_encrypt_info *rp = (void *) skb->data;
        struct smp_chan *smp = conn->smp_chan;
 
+       BT_DBG("conn %p", conn);
+
+       if (skb->len < sizeof(*rp))
+               return SMP_UNSPECIFIED;
+
+       /* Ignore this PDU if it wasn't requested */
+       if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
+               return 0;
+
        skb_pull(skb, sizeof(*rp));
 
        memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
@@ -826,16 +963,138 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
        struct smp_chan *smp = conn->smp_chan;
        struct hci_dev *hdev = conn->hcon->hdev;
        struct hci_conn *hcon = conn->hcon;
+       struct smp_ltk *ltk;
        u8 authenticated;
 
+       BT_DBG("conn %p", conn);
+
+       if (skb->len < sizeof(*rp))
+               return SMP_UNSPECIFIED;
+
+       /* Ignore this PDU if it wasn't requested */
+       if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
+               return 0;
+
+       /* Mark the information as received */
+       smp->remote_key_dist &= ~SMP_DIST_ENC_KEY;
+
        skb_pull(skb, sizeof(*rp));
 
        hci_dev_lock(hdev);
        authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
-       hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK, 1,
-                   authenticated, smp->tk, smp->enc_key_size,
-                   rp->ediv, rp->rand);
-       smp_distribute_keys(conn, 1);
+       ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK,
+                         authenticated, smp->tk, smp->enc_key_size,
+                         rp->ediv, rp->rand);
+       smp->ltk = ltk;
+       if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
+               smp_distribute_keys(conn);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+       struct smp_cmd_ident_info *info = (void *) skb->data;
+       struct smp_chan *smp = conn->smp_chan;
+
+       BT_DBG("");
+
+       if (skb->len < sizeof(*info))
+               return SMP_UNSPECIFIED;
+
+       /* Ignore this PDU if it wasn't requested */
+       if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
+               return 0;
+
+       skb_pull(skb, sizeof(*info));
+
+       memcpy(smp->irk, info->irk, 16);
+
+       return 0;
+}
+
+static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
+                                  struct sk_buff *skb)
+{
+       struct smp_cmd_ident_addr_info *info = (void *) skb->data;
+       struct smp_chan *smp = conn->smp_chan;
+       struct hci_conn *hcon = conn->hcon;
+       bdaddr_t rpa;
+
+       BT_DBG("");
+
+       if (skb->len < sizeof(*info))
+               return SMP_UNSPECIFIED;
+
+       /* Ignore this PDU if it wasn't requested */
+       if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
+               return 0;
+
+       /* Mark the information as received */
+       smp->remote_key_dist &= ~SMP_DIST_ID_KEY;
+
+       skb_pull(skb, sizeof(*info));
+
+       /* Strictly speaking the Core Specification (4.1) allows sending
+        * an empty address which would force us to rely on just the IRK
+        * as "identity information". However, since such
+        * implementations are not known of and in order to not over
+        * complicate our implementation, simply pretend that we never
+        * received an IRK for such a device.
+        */
+       if (!bacmp(&info->bdaddr, BDADDR_ANY)) {
+               BT_ERR("Ignoring IRK with no identity address");
+               smp_distribute_keys(conn);
+               return 0;
+       }
+
+       bacpy(&smp->id_addr, &info->bdaddr);
+       smp->id_addr_type = info->addr_type;
+
+       if (hci_bdaddr_is_rpa(&hcon->dst, hcon->dst_type))
+               bacpy(&rpa, &hcon->dst);
+       else
+               bacpy(&rpa, BDADDR_ANY);
+
+       smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr,
+                                     smp->id_addr_type, smp->irk, &rpa);
+
+       smp_distribute_keys(conn);
+
+       return 0;
+}
+
+static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
+{
+       struct smp_cmd_sign_info *rp = (void *) skb->data;
+       struct smp_chan *smp = conn->smp_chan;
+       struct hci_dev *hdev = conn->hcon->hdev;
+       struct smp_csrk *csrk;
+
+       BT_DBG("conn %p", conn);
+
+       if (skb->len < sizeof(*rp))
+               return SMP_UNSPECIFIED;
+
+       /* Ignore this PDU if it wasn't requested */
+       if (!(smp->remote_key_dist & SMP_DIST_SIGN))
+               return 0;
+
+       /* Mark the information as received */
+       smp->remote_key_dist &= ~SMP_DIST_SIGN;
+
+       skb_pull(skb, sizeof(*rp));
+
+       hci_dev_lock(hdev);
+       csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
+       if (csrk) {
+               csrk->master = 0x01;
+               memcpy(csrk->val, rp->csrk, sizeof(csrk->val));
+       }
+       smp->csrk = csrk;
+       if (!(smp->remote_key_dist & SMP_DIST_SIGN))
+               smp_distribute_keys(conn);
        hci_dev_unlock(hdev);
 
        return 0;
@@ -915,10 +1174,15 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
                break;
 
        case SMP_CMD_IDENT_INFO:
+               reason = smp_cmd_ident_info(conn, skb);
+               break;
+
        case SMP_CMD_IDENT_ADDR_INFO:
+               reason = smp_cmd_ident_addr_info(conn, skb);
+               break;
+
        case SMP_CMD_SIGN_INFO:
-               /* Just ignored */
-               reason = 0;
+               reason = smp_cmd_sign_info(conn, skb);
                break;
 
        default:
@@ -937,26 +1201,78 @@ done:
        return err;
 }
 
-int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
+static void smp_notify_keys(struct l2cap_conn *conn)
+{
+       struct smp_chan *smp = conn->smp_chan;
+       struct hci_conn *hcon = conn->hcon;
+       struct hci_dev *hdev = hcon->hdev;
+       struct smp_cmd_pairing *req = (void *) &smp->preq[1];
+       struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
+       bool persistent;
+
+       if (smp->remote_irk) {
+               mgmt_new_irk(hdev, smp->remote_irk);
+               /* Now that user space can be considered to know the
+                * identity address track the connection based on it
+                * from now on.
+                */
+               bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
+               hcon->dst_type = smp->remote_irk->addr_type;
+               l2cap_conn_update_id_addr(hcon);
+       }
+
+       /* The LTKs and CSRKs should be persistent only if both sides
+        * had the bonding bit set in their authentication requests.
+        */
+       persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING);
+
+       if (smp->csrk) {
+               smp->csrk->bdaddr_type = hcon->dst_type;
+               bacpy(&smp->csrk->bdaddr, &hcon->dst);
+               mgmt_new_csrk(hdev, smp->csrk, persistent);
+       }
+
+       if (smp->slave_csrk) {
+               smp->slave_csrk->bdaddr_type = hcon->dst_type;
+               bacpy(&smp->slave_csrk->bdaddr, &hcon->dst);
+               mgmt_new_csrk(hdev, smp->slave_csrk, persistent);
+       }
+
+       if (smp->ltk) {
+               smp->ltk->bdaddr_type = hcon->dst_type;
+               bacpy(&smp->ltk->bdaddr, &hcon->dst);
+               mgmt_new_ltk(hdev, smp->ltk, persistent);
+       }
+
+       if (smp->slave_ltk) {
+               smp->slave_ltk->bdaddr_type = hcon->dst_type;
+               bacpy(&smp->slave_ltk->bdaddr, &hcon->dst);
+               mgmt_new_ltk(hdev, smp->slave_ltk, persistent);
+       }
+}
+
+int smp_distribute_keys(struct l2cap_conn *conn)
 {
        struct smp_cmd_pairing *req, *rsp;
        struct smp_chan *smp = conn->smp_chan;
+       struct hci_conn *hcon = conn->hcon;
+       struct hci_dev *hdev = hcon->hdev;
        __u8 *keydist;
 
-       BT_DBG("conn %p force %d", conn, force);
+       BT_DBG("conn %p", conn);
 
-       if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+       if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
                return 0;
 
        rsp = (void *) &smp->prsp[1];
 
        /* The responder sends its keys first */
-       if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
+       if (hcon->out && (smp->remote_key_dist & 0x07))
                return 0;
 
        req = (void *) &smp->preq[1];
 
-       if (conn->hcon->out) {
+       if (hcon->out) {
                keydist = &rsp->init_key_dist;
                *keydist &= req->init_key_dist;
        } else {
@@ -964,28 +1280,30 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
                *keydist &= req->resp_key_dist;
        }
 
-
        BT_DBG("keydist 0x%x", *keydist);
 
        if (*keydist & SMP_DIST_ENC_KEY) {
                struct smp_cmd_encrypt_info enc;
                struct smp_cmd_master_ident ident;
-               struct hci_conn *hcon = conn->hcon;
+               struct smp_ltk *ltk;
                u8 authenticated;
                __le16 ediv;
+               __le64 rand;
 
                get_random_bytes(enc.ltk, sizeof(enc.ltk));
                get_random_bytes(&ediv, sizeof(ediv));
-               get_random_bytes(ident.rand, sizeof(ident.rand));
+               get_random_bytes(&rand, sizeof(rand));
 
                smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
 
                authenticated = hcon->sec_level == BT_SECURITY_HIGH;
-               hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
-                           HCI_SMP_LTK_SLAVE, 1, authenticated,
-                           enc.ltk, smp->enc_key_size, ediv, ident.rand);
+               ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
+                                 HCI_SMP_LTK_SLAVE, authenticated, enc.ltk,
+                                 smp->enc_key_size, ediv, rand);
+               smp->slave_ltk = ltk;
 
                ident.ediv = ediv;
+               ident.rand = rand;
 
                smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
 
@@ -996,14 +1314,18 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
                struct smp_cmd_ident_addr_info addrinfo;
                struct smp_cmd_ident_info idinfo;
 
-               /* Send a dummy key */
-               get_random_bytes(idinfo.irk, sizeof(idinfo.irk));
+               memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk));
 
                smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
 
-               /* Just public address */
-               memset(&addrinfo, 0, sizeof(addrinfo));
-               bacpy(&addrinfo.bdaddr, &conn->hcon->src);
+               /* The hci_conn contains the local identity address
+                * after the connection has been established.
+                *
+                * This is true even when the connection has been
+                * established using a resolvable random address.
+                */
+               bacpy(&addrinfo.bdaddr, &hcon->src);
+               addrinfo.addr_type = hcon->src_type;
 
                smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
                             &addrinfo);
@@ -1013,20 +1335,33 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
 
        if (*keydist & SMP_DIST_SIGN) {
                struct smp_cmd_sign_info sign;
+               struct smp_csrk *csrk;
 
-               /* Send a dummy key */
+               /* Generate a new random key */
                get_random_bytes(sign.csrk, sizeof(sign.csrk));
 
+               csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
+               if (csrk) {
+                       csrk->master = 0x00;
+                       memcpy(csrk->val, sign.csrk, sizeof(csrk->val));
+               }
+               smp->slave_csrk = csrk;
+
                smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
 
                *keydist &= ~SMP_DIST_SIGN;
        }
 
-       if (conn->hcon->out || force) {
-               clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
-               cancel_delayed_work_sync(&conn->security_timer);
-               smp_chan_destroy(conn);
-       }
+       /* If there are still keys to be received wait for them */
+       if ((smp->remote_key_dist & 0x07))
+               return 0;
+
+       clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags);
+       cancel_delayed_work_sync(&conn->security_timer);
+       set_bit(SMP_FLAG_COMPLETE, &smp->smp_flags);
+       smp_notify_keys(conn);
+
+       smp_chan_destroy(conn);
 
        return 0;
 }
index a700bcb490d7bedb24c55b0b97b58bc6280c5552..1277147a915070e3a5256debdd98316fa1d916c5 100644 (file)
@@ -78,7 +78,7 @@ struct smp_cmd_encrypt_info {
 #define SMP_CMD_MASTER_IDENT   0x07
 struct smp_cmd_master_ident {
        __le16  ediv;
-       __u8    rand[8];
+       __le64  rand;
 } __packed;
 
 #define SMP_CMD_IDENT_INFO     0x08
@@ -118,6 +118,8 @@ struct smp_cmd_security_req {
 #define SMP_FLAG_TK_VALID      1
 #define SMP_FLAG_CFM_PENDING   2
 #define SMP_FLAG_MITM_AUTH     3
+#define SMP_FLAG_COMPLETE      4
+#define SMP_FLAG_INITIATOR     5
 
 struct smp_chan {
        struct l2cap_conn *conn;
@@ -128,20 +130,31 @@ struct smp_chan {
        u8              pcnf[16]; /* SMP Pairing Confirm */
        u8              tk[16]; /* SMP Temporary Key */
        u8              enc_key_size;
+       u8              remote_key_dist;
+       bdaddr_t        id_addr;
+       u8              id_addr_type;
+       u8              irk[16];
+       struct smp_csrk *csrk;
+       struct smp_csrk *slave_csrk;
+       struct smp_ltk  *ltk;
+       struct smp_ltk  *slave_ltk;
+       struct smp_irk  *remote_irk;
        unsigned long   smp_flags;
-       struct crypto_blkcipher *tfm;
        struct work_struct confirm;
        struct work_struct random;
-
 };
 
 /* SMP Commands */
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
 int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
-int smp_distribute_keys(struct l2cap_conn *conn, __u8 force);
+int smp_distribute_keys(struct l2cap_conn *conn);
 int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
 
 void smp_chan_destroy(struct l2cap_conn *conn);
 
+bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16],
+                    bdaddr_t *bdaddr);
+int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa);
+
 #endif /* __SMP_H */
index 8fe8b71b487add263a711d39e52f683675708dd6..3e2da2cb72db1725f064ec21d3ce6ab8765532c1 100644 (file)
@@ -88,18 +88,11 @@ out:
 static int br_dev_init(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
-       int i;
 
-       br->stats = alloc_percpu(struct pcpu_sw_netstats);
+       br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!br->stats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *br_dev_stats;
-               br_dev_stats = per_cpu_ptr(br->stats, i);
-               u64_stats_init(&br_dev_stats->syncp);
-       }
-
        return 0;
 }
 
@@ -143,9 +136,9 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
                const struct pcpu_sw_netstats *bstats
                        = per_cpu_ptr(br->stats, cpu);
                do {
-                       start = u64_stats_fetch_begin_bh(&bstats->syncp);
+                       start = u64_stats_fetch_begin_irq(&bstats->syncp);
                        memcpy(&tmp, bstats, sizeof(tmp));
-               } while (u64_stats_fetch_retry_bh(&bstats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
                sum.tx_bytes   += tmp.tx_bytes;
                sum.tx_packets += tmp.tx_packets;
                sum.rx_bytes   += tmp.rx_bytes;
@@ -225,16 +218,16 @@ static void br_netpoll_cleanup(struct net_device *dev)
                br_netpoll_disable(p);
 }
 
-static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+static int __br_netpoll_enable(struct net_bridge_port *p)
 {
        struct netpoll *np;
        int err;
 
-       np = kzalloc(sizeof(*p->np), gfp);
+       np = kzalloc(sizeof(*p->np), GFP_KERNEL);
        if (!np)
                return -ENOMEM;
 
-       err = __netpoll_setup(np, p->dev, gfp);
+       err = __netpoll_setup(np, p->dev);
        if (err) {
                kfree(np);
                return err;
@@ -244,16 +237,15 @@ static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
        return err;
 }
 
-int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+int br_netpoll_enable(struct net_bridge_port *p)
 {
        if (!p->br->dev->npinfo)
                return 0;
 
-       return __br_netpoll_enable(p, gfp);
+       return __br_netpoll_enable(p);
 }
 
-static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
-                           gfp_t gfp)
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
 {
        struct net_bridge *br = netdev_priv(dev);
        struct net_bridge_port *p;
@@ -262,7 +254,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
        list_for_each_entry(p, &br->port_list, list) {
                if (!p->dev)
                        continue;
-               err = __br_netpoll_enable(p, gfp);
+               err = __br_netpoll_enable(p);
                if (err)
                        goto fail;
        }
@@ -374,7 +366,7 @@ void br_dev_setup(struct net_device *dev)
        br->bridge_id.prio[0] = 0x80;
        br->bridge_id.prio[1] = 0x00;
 
-       memcpy(br->group_addr, eth_reserved_addr_base, ETH_ALEN);
+       ether_addr_copy(br->group_addr, eth_reserved_addr_base);
 
        br->stp_enabled = BR_NO_STP;
        br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
index d3409e6b5453e3e8f90b26ec51bb398033a865ac..056b67b0e2778fdce7bd80a2bf4ede2d552c0e1d 100644 (file)
@@ -35,16 +35,11 @@ static inline int should_deliver(const struct net_bridge_port *p,
                p->state == BR_STATE_FORWARDING;
 }
 
-static inline unsigned int packet_length(const struct sk_buff *skb)
-{
-       return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
-}
-
 int br_dev_queue_push_xmit(struct sk_buff *skb)
 {
        /* ip_fragment doesn't copy the MAC header */
        if (nf_bridge_maybe_copy_header(skb) ||
-           (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) {
+           !is_skb_forwardable(skb->dev, skb)) {
                kfree_skb(skb);
        } else {
                skb_push(skb, ETH_HLEN);
@@ -71,7 +66,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
        skb->dev = to->dev;
 
        if (unlikely(netpoll_tx_running(to->br->dev))) {
-               if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
+               if (!is_skb_forwardable(skb->dev, skb))
                        kfree_skb(skb);
                else {
                        skb_push(skb, ETH_HLEN);
index 54d207d3a31ced4e2d23e0e4fba132f52bb9dc6c..5262b8617eb9cc21b1070e48d1c1efda584aef6f 100644 (file)
@@ -366,7 +366,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
        if (err)
                goto err2;
 
-       err = br_netpoll_enable(p, GFP_KERNEL);
+       err = br_netpoll_enable(p);
        if (err)
                goto err3;
 
index 93067ecdb9a212321c8780bd65153d60e8203907..7b757b5dc773fc2dcaedfa8f7c97e7884a622d89 100644 (file)
@@ -363,7 +363,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(eth->h_source, br->dev->dev_addr);
        eth->h_dest[0] = 1;
        eth->h_dest[1] = 0;
        eth->h_dest[2] = 0x5e;
@@ -433,7 +433,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(eth->h_source, br->dev->dev_addr);
        eth->h_proto = htons(ETH_P_IPV6);
        skb_put(skb, sizeof(*eth));
 
index b008c59a92c4be8dbc5812606ad76777e17d90ae..80e1b0f60a30214002684a42b1bab1a02e9d9962 100644 (file)
@@ -167,7 +167,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
        dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-       rt->dst.flags   = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE;
+       rt->dst.flags   = DST_NOXFRM | DST_FAKE_RTABLE;
        rt->dst.ops = &fake_dst_ops;
 }
 
@@ -506,7 +506,7 @@ bridged_dnat:
                                               1);
                                return 0;
                        }
-                       memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
+                       ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
                        skb->pkt_type = PACKET_HOST;
                }
        } else {
index 3ba11bc99b65db2b14754dc84deeed5274623f6f..06811d79f89f9e7712344d99fdc97194c62f0aef 100644 (file)
@@ -46,12 +46,12 @@ typedef __u16 port_id;
 struct bridge_id
 {
        unsigned char   prio[2];
-       unsigned char   addr[6];
+       unsigned char   addr[ETH_ALEN];
 };
 
 struct mac_addr
 {
-       unsigned char   addr[6];
+       unsigned char   addr[ETH_ALEN];
 };
 
 struct br_ip
@@ -349,7 +349,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                netpoll_send_skb(np, skb);
 }
 
-int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
+int br_netpoll_enable(struct net_bridge_port *p);
 void br_netpoll_disable(struct net_bridge_port *p);
 #else
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
@@ -357,7 +357,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
 {
 }
 
-static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+static inline int br_netpoll_enable(struct net_bridge_port *p)
 {
        return 0;
 }
index f23c74b3a95327722916405cee92d5d61048c8a2..91510712c7a729df3d57c3f118ece9eb3da3fe20 100644 (file)
@@ -99,9 +99,9 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
        v->num_vlans--;
        if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
                if (v->port_idx)
-                       rcu_assign_pointer(v->parent.port->vlan_info, NULL);
+                       RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
                else
-                       rcu_assign_pointer(v->parent.br->vlan_info, NULL);
+                       RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
                kfree_rcu(v, rcu);
        }
        return 0;
@@ -113,9 +113,9 @@ static void __vlan_flush(struct net_port_vlans *v)
        v->pvid = 0;
        bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
        if (v->port_idx)
-               rcu_assign_pointer(v->parent.port->vlan_info, NULL);
+               RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
        else
-               rcu_assign_pointer(v->parent.br->vlan_info, NULL);
+               RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
        kfree_rcu(v, rcu);
 }
 
index 3fb3c848affef74249a1fd9ed610aea7d1db5764..9024283d2bca8206d6976bb7bb72523b514bd68d 100644 (file)
@@ -28,7 +28,7 @@ static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
        uint32_t cmp[2] = { 0, 0 };
        int key = ((const unsigned char *)mac)[5];
 
-       memcpy(((char *) cmp) + 2, mac, ETH_ALEN);
+       ether_addr_copy(((char *) cmp) + 2, mac);
        start = wh->table[key];
        limit = wh->table[key + 1];
        if (ip) {
index c59f7bfae6e2c3dc8a2e4f3725f43498ae2542a2..4e0b0c3593250bd8a1be0cdafca49ce7e4684f94 100644 (file)
@@ -22,7 +22,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
        if (!skb_make_writable(skb, 0))
                return EBT_DROP;
 
-       memcpy(eth_hdr(skb)->h_dest, info->mac, ETH_ALEN);
+       ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
        return info->target;
 }
 
index 46624bb6d9be5f0ca26b2b845f44f1a857499291..203964997a515a6bc4d961e59b365a5354df6518 100644 (file)
@@ -25,10 +25,10 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
 
        if (par->hooknum != NF_BR_BROUTING)
                /* rcu_read_lock()ed by nf_hook_slow */
-               memcpy(eth_hdr(skb)->h_dest,
-                      br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
+               ether_addr_copy(eth_hdr(skb)->h_dest,
+                               br_port_get_rcu(par->in)->br->dev->dev_addr);
        else
-               memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
+               ether_addr_copy(eth_hdr(skb)->h_dest, par->in->dev_addr);
        skb->pkt_type = PACKET_HOST;
        return info->target;
 }
index 0f6b118d6cb21cc19c891c37d974ddd4ad2bcd6a..e56ccd060d2680da042322cea43177a27744b7ca 100644 (file)
@@ -24,7 +24,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
        if (!skb_make_writable(skb, 0))
                return EBT_DROP;
 
-       memcpy(eth_hdr(skb)->h_source, info->mac, ETH_ALEN);
+       ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
        if (!(info->target & NAT_ARP_BIT) &&
            eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
                const struct arphdr *ap;
index 0676f2b199d672eaf61157cdf78b75ee74afb434..82750f9158655225ad7dab9e903932d38f97b8a5 100644 (file)
@@ -2082,7 +2082,6 @@ bad:
        pr_err("osdc handle_map corrupt msg\n");
        ceph_msg_dump(msg);
        up_write(&osdc->map_sem);
-       return;
 }
 
 /*
@@ -2281,7 +2280,6 @@ done_err:
 
 bad:
        pr_err("osdc handle_watch_notify corrupt msg\n");
-       return;
 }
 
 /*
index 45fa2f11f84dcc7f0efe12711c2d51526360a08d..48d81e4a256e2c06f0018e5d6b9e005ecf942332 100644 (file)
@@ -1245,7 +1245,7 @@ static int __dev_open(struct net_device *dev)
         * If we don't do this there is a chance ndo_poll_controller
         * or ndo_poll may be running while we open the device
         */
-       netpoll_rx_disable(dev);
+       netpoll_poll_disable(dev);
 
        ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
        ret = notifier_to_errno(ret);
@@ -1260,7 +1260,7 @@ static int __dev_open(struct net_device *dev)
        if (!ret && ops->ndo_open)
                ret = ops->ndo_open(dev);
 
-       netpoll_rx_enable(dev);
+       netpoll_poll_enable(dev);
 
        if (ret)
                clear_bit(__LINK_STATE_START, &dev->state);
@@ -1313,6 +1313,9 @@ static int __dev_close_many(struct list_head *head)
        might_sleep();
 
        list_for_each_entry(dev, head, close_list) {
+               /* Temporarily disable netpoll until the interface is down */
+               netpoll_poll_disable(dev);
+
                call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 
                clear_bit(__LINK_STATE_START, &dev->state);
@@ -1343,6 +1346,7 @@ static int __dev_close_many(struct list_head *head)
 
                dev->flags &= ~IFF_UP;
                net_dmaengine_put();
+               netpoll_poll_enable(dev);
        }
 
        return 0;
@@ -1353,14 +1357,10 @@ static int __dev_close(struct net_device *dev)
        int retval;
        LIST_HEAD(single);
 
-       /* Temporarily disable netpoll until the interface is down */
-       netpoll_rx_disable(dev);
-
        list_add(&dev->close_list, &single);
        retval = __dev_close_many(&single);
        list_del(&single);
 
-       netpoll_rx_enable(dev);
        return retval;
 }
 
@@ -1398,14 +1398,9 @@ int dev_close(struct net_device *dev)
        if (dev->flags & IFF_UP) {
                LIST_HEAD(single);
 
-               /* Block netpoll rx while the interface is going down */
-               netpoll_rx_disable(dev);
-
                list_add(&dev->close_list, &single);
                dev_close_many(&single);
                list_del(&single);
-
-               netpoll_rx_enable(dev);
        }
        return 0;
 }
@@ -1645,8 +1640,7 @@ static inline void net_timestamp_set(struct sk_buff *skb)
                        __net_timestamp(SKB);           \
        }                                               \
 
-static inline bool is_skb_forwardable(struct net_device *dev,
-                                     struct sk_buff *skb)
+bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
 {
        unsigned int len;
 
@@ -1665,6 +1659,7 @@ static inline bool is_skb_forwardable(struct net_device *dev,
 
        return false;
 }
+EXPORT_SYMBOL_GPL(is_skb_forwardable);
 
 /**
  * dev_forward_skb - loopback an skb to another netif
@@ -2885,6 +2880,7 @@ recursion_alert:
        rc = -ENETDOWN;
        rcu_read_unlock_bh();
 
+       atomic_long_inc(&dev->tx_dropped);
        kfree_skb(skb);
        return rc;
 out:
@@ -2957,7 +2953,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                flow_table = rcu_dereference(rxqueue->rps_flow_table);
                if (!flow_table)
                        goto out;
-               flow_id = skb->rxhash & flow_table->mask;
+               flow_id = skb_get_hash(skb) & flow_table->mask;
                rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
                                                        rxq_index, flow_id);
                if (rc < 0)
@@ -2991,6 +2987,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        struct rps_sock_flow_table *sock_flow_table;
        int cpu = -1;
        u16 tcpu;
+       u32 hash;
 
        if (skb_rx_queue_recorded(skb)) {
                u16 index = skb_get_rx_queue(skb);
@@ -3019,7 +3016,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        }
 
        skb_reset_network_header(skb);
-       if (!skb_get_hash(skb))
+       hash = skb_get_hash(skb);
+       if (!hash)
                goto done;
 
        flow_table = rcu_dereference(rxqueue->rps_flow_table);
@@ -3028,11 +3026,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                u16 next_cpu;
                struct rps_dev_flow *rflow;
 
-               rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
+               rflow = &flow_table->flows[hash & flow_table->mask];
                tcpu = rflow->cpu;
 
-               next_cpu = sock_flow_table->ents[skb->rxhash &
-                   sock_flow_table->mask];
+               next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
 
                /*
                 * If the desired CPU (where last recvmsg was done) is
@@ -3061,7 +3058,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        }
 
        if (map) {
-               tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
+               tcpu = map->cpus[((u64) hash * map->len) >> 32];
 
                if (cpu_online(tcpu)) {
                        cpu = tcpu;
@@ -3236,10 +3233,6 @@ static int netif_rx_internal(struct sk_buff *skb)
 {
        int ret;
 
-       /* if netpoll wants it, pretend we never saw it */
-       if (netpoll_rx(skb))
-               return NET_RX_DROP;
-
        net_timestamp_check(netdev_tstamp_prequeue, skb);
 
        trace_netif_rx(skb);
@@ -3500,11 +3493,11 @@ EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
 {
        switch (skb->protocol) {
-       case __constant_htons(ETH_P_ARP):
-       case __constant_htons(ETH_P_IP):
-       case __constant_htons(ETH_P_IPV6):
-       case __constant_htons(ETH_P_8021Q):
-       case __constant_htons(ETH_P_8021AD):
+       case htons(ETH_P_ARP):
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+       case htons(ETH_P_8021Q):
+       case htons(ETH_P_8021AD):
                return true;
        default:
                return false;
@@ -3525,10 +3518,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 
        trace_netif_receive_skb(skb);
 
-       /* if we've gotten here through NAPI, check netpoll */
-       if (netpoll_receive_skb(skb))
-               goto out;
-
        orig_dev = skb->dev;
 
        skb_reset_network_header(skb);
@@ -3655,7 +3644,6 @@ drop:
 
 unlock:
        rcu_read_unlock();
-out:
        return ret;
 }
 
@@ -3845,10 +3833,10 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
                diffs |= p->vlan_tci ^ skb->vlan_tci;
                if (maclen == ETH_HLEN)
                        diffs |= compare_ether_header(skb_mac_header(p),
-                                                     skb_gro_mac_header(skb));
+                                                     skb_mac_header(skb));
                else if (!diffs)
                        diffs = memcmp(skb_mac_header(p),
-                                      skb_gro_mac_header(skb),
+                                      skb_mac_header(skb),
                                       maclen);
                NAPI_GRO_CB(p)->same_flow = !diffs;
        }
@@ -3871,6 +3859,27 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
        }
 }
 
+static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
+{
+       struct skb_shared_info *pinfo = skb_shinfo(skb);
+
+       BUG_ON(skb->end - skb->tail < grow);
+
+       memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
+
+       skb->data_len -= grow;
+       skb->tail += grow;
+
+       pinfo->frags[0].page_offset += grow;
+       skb_frag_size_sub(&pinfo->frags[0], grow);
+
+       if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
+               skb_frag_unref(skb, 0);
+               memmove(pinfo->frags, pinfo->frags + 1,
+                       --pinfo->nr_frags * sizeof(pinfo->frags[0]));
+       }
+}
+
 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct sk_buff **pp = NULL;
@@ -3879,14 +3888,14 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        struct list_head *head = &offload_base;
        int same_flow;
        enum gro_result ret;
+       int grow;
 
-       if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
+       if (!(skb->dev->features & NETIF_F_GRO))
                goto normal;
 
        if (skb_is_gso(skb) || skb_has_frag_list(skb))
                goto normal;
 
-       skb_gro_reset_offset(skb);
        gro_list_prepare(napi, skb);
        NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
 
@@ -3950,27 +3959,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        ret = GRO_HELD;
 
 pull:
-       if (skb_headlen(skb) < skb_gro_offset(skb)) {
-               int grow = skb_gro_offset(skb) - skb_headlen(skb);
-
-               BUG_ON(skb->end - skb->tail < grow);
-
-               memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
-
-               skb->tail += grow;
-               skb->data_len -= grow;
-
-               skb_shinfo(skb)->frags[0].page_offset += grow;
-               skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
-
-               if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
-                       skb_frag_unref(skb, 0);
-                       memmove(skb_shinfo(skb)->frags,
-                               skb_shinfo(skb)->frags + 1,
-                               --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
-               }
-       }
-
+       grow = skb_gro_offset(skb) - skb_headlen(skb);
+       if (grow > 0)
+               gro_pull_from_frag0(skb, grow);
 ok:
        return ret;
 
@@ -4038,6 +4029,8 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        trace_napi_gro_receive_entry(skb);
 
+       skb_gro_reset_offset(skb);
+
        return napi_skb_finish(dev_gro_receive(napi, skb), skb);
 }
 EXPORT_SYMBOL(napi_gro_receive);
@@ -4066,12 +4059,16 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
 }
 EXPORT_SYMBOL(napi_get_frags);
 
-static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
-                              gro_result_t ret)
+static gro_result_t napi_frags_finish(struct napi_struct *napi,
+                                     struct sk_buff *skb,
+                                     gro_result_t ret)
 {
        switch (ret) {
        case GRO_NORMAL:
-               if (netif_receive_skb_internal(skb))
+       case GRO_HELD:
+               __skb_push(skb, ETH_HLEN);
+               skb->protocol = eth_type_trans(skb, skb->dev);
+               if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
                        ret = GRO_DROP;
                break;
 
@@ -4080,7 +4077,6 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
                napi_reuse_skb(napi, skb);
                break;
 
-       case GRO_HELD:
        case GRO_MERGED:
                break;
        }
@@ -4088,17 +4084,41 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *
        return ret;
 }
 
+/* Upper GRO stack assumes network header starts at gro_offset=0
+ * Drivers could call both napi_gro_frags() and napi_gro_receive()
+ * We copy ethernet header into skb->data to have a common layout.
+ */
 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
 {
        struct sk_buff *skb = napi->skb;
+       const struct ethhdr *eth;
+       unsigned int hlen = sizeof(*eth);
 
        napi->skb = NULL;
 
-       if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
-               napi_reuse_skb(napi, skb);
-               return NULL;
+       skb_reset_mac_header(skb);
+       skb_gro_reset_offset(skb);
+
+       eth = skb_gro_header_fast(skb, 0);
+       if (unlikely(skb_gro_header_hard(skb, hlen))) {
+               eth = skb_gro_header_slow(skb, hlen, 0);
+               if (unlikely(!eth)) {
+                       napi_reuse_skb(napi, skb);
+                       return NULL;
+               }
+       } else {
+               gro_pull_from_frag0(skb, hlen);
+               NAPI_GRO_CB(skb)->frag0 += hlen;
+               NAPI_GRO_CB(skb)->frag0_len -= hlen;
        }
-       skb->protocol = eth_type_trans(skb, skb->dev);
+       __skb_pull(skb, hlen);
+
+       /*
+        * This works because the only protocols we care about don't require
+        * special handling.
+        * We'll fix it up properly in napi_frags_finish()
+        */
+       skb->protocol = eth->h_proto;
 
        return skb;
 }
@@ -6251,6 +6271,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
                netdev_stats_to_stats64(storage, &dev->stats);
        }
        storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
+       storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
index ad30d626a5bd3f5cbd585d0641ab79aa29c3c446..3733381190ec23c5b0e0a5fd181cab27a8bdb312 100644 (file)
@@ -1,11 +1,16 @@
 /*
  * Linux Socket Filter - Kernel level socket filtering
  *
- * Author:
- *     Jay Schulist <jschlst@samba.org>
+ * Based on the design of the Berkeley Packet Filter. The new
+ * internal format has been designed by PLUMgrid:
  *
- * Based on the design of:
- *     - The Berkeley Packet Filter
+ *     Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
+ *
+ * Authors:
+ *
+ *     Jay Schulist <jschlst@samba.org>
+ *     Alexei Starovoitov <ast@plumgrid.com>
+ *     Daniel Borkmann <dborkman@redhat.com>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -108,304 +113,1045 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sk_filter);
 
+/* Base function for offset calculation. Needs to go into .text section,
+ * therefore keeping it non-static as well; will also be used by JITs
+ * anyway later on, so do not let the compiler omit it.
+ */
+noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       return 0;
+}
+
 /**
- *     sk_run_filter - run a filter on a socket
- *     @skb: buffer to run the filter on
+ *     __sk_run_filter - run a filter on a given context
+ *     @ctx: buffer to run the filter on
  *     @fentry: filter to apply
  *
- * Decode and apply filter instructions to the skb->data.
- * Return length to keep, 0 for none. @skb is the data we are
- * filtering, @filter is the array of filter instructions.
- * Because all jumps are guaranteed to be before last instruction,
- * and last instruction guaranteed to be a RET, we dont need to check
- * flen. (We used to pass to this function the length of filter)
+ * Decode and apply filter instructions to the skb->data. Return length to
+ * keep, 0 for none. @ctx is the data we are operating on, @filter is the
+ * array of filter instructions.
  */
-unsigned int sk_run_filter(const struct sk_buff *skb,
-                          const struct sock_filter *fentry)
+unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
 {
+       u64 stack[MAX_BPF_STACK / sizeof(u64)];
+       u64 regs[MAX_BPF_REG], tmp;
        void *ptr;
-       u32 A = 0;                      /* Accumulator */
-       u32 X = 0;                      /* Index Register */
-       u32 mem[BPF_MEMWORDS];          /* Scratch Memory Store */
-       u32 tmp;
-       int k;
+       int off;
 
-       /*
-        * Process array of filter instructions.
-        */
-       for (;; fentry++) {
-#if defined(CONFIG_X86_32)
-#define        K (fentry->k)
-#else
-               const u32 K = fentry->k;
-#endif
-
-               switch (fentry->code) {
-               case BPF_S_ALU_ADD_X:
-                       A += X;
-                       continue;
-               case BPF_S_ALU_ADD_K:
-                       A += K;
-                       continue;
-               case BPF_S_ALU_SUB_X:
-                       A -= X;
-                       continue;
-               case BPF_S_ALU_SUB_K:
-                       A -= K;
-                       continue;
-               case BPF_S_ALU_MUL_X:
-                       A *= X;
-                       continue;
-               case BPF_S_ALU_MUL_K:
-                       A *= K;
-                       continue;
-               case BPF_S_ALU_DIV_X:
-                       if (X == 0)
-                               return 0;
-                       A /= X;
-                       continue;
-               case BPF_S_ALU_DIV_K:
-                       A /= K;
-                       continue;
-               case BPF_S_ALU_MOD_X:
-                       if (X == 0)
-                               return 0;
-                       A %= X;
-                       continue;
-               case BPF_S_ALU_MOD_K:
-                       A %= K;
-                       continue;
-               case BPF_S_ALU_AND_X:
-                       A &= X;
-                       continue;
-               case BPF_S_ALU_AND_K:
-                       A &= K;
-                       continue;
-               case BPF_S_ALU_OR_X:
-                       A |= X;
-                       continue;
-               case BPF_S_ALU_OR_K:
-                       A |= K;
-                       continue;
-               case BPF_S_ANC_ALU_XOR_X:
-               case BPF_S_ALU_XOR_X:
-                       A ^= X;
-                       continue;
-               case BPF_S_ALU_XOR_K:
-                       A ^= K;
-                       continue;
-               case BPF_S_ALU_LSH_X:
-                       A <<= X;
-                       continue;
-               case BPF_S_ALU_LSH_K:
-                       A <<= K;
-                       continue;
-               case BPF_S_ALU_RSH_X:
-                       A >>= X;
-                       continue;
-               case BPF_S_ALU_RSH_K:
-                       A >>= K;
-                       continue;
-               case BPF_S_ALU_NEG:
-                       A = -A;
-                       continue;
-               case BPF_S_JMP_JA:
-                       fentry += K;
-                       continue;
-               case BPF_S_JMP_JGT_K:
-                       fentry += (A > K) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_JMP_JGE_K:
-                       fentry += (A >= K) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_JMP_JEQ_K:
-                       fentry += (A == K) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_JMP_JSET_K:
-                       fentry += (A & K) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_JMP_JGT_X:
-                       fentry += (A > X) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_JMP_JGE_X:
-                       fentry += (A >= X) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_JMP_JEQ_X:
-                       fentry += (A == X) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_JMP_JSET_X:
-                       fentry += (A & X) ? fentry->jt : fentry->jf;
-                       continue;
-               case BPF_S_LD_W_ABS:
-                       k = K;
-load_w:
-                       ptr = load_pointer(skb, k, 4, &tmp);
-                       if (ptr != NULL) {
-                               A = get_unaligned_be32(ptr);
-                               continue;
-                       }
-                       return 0;
-               case BPF_S_LD_H_ABS:
-                       k = K;
-load_h:
-                       ptr = load_pointer(skb, k, 2, &tmp);
-                       if (ptr != NULL) {
-                               A = get_unaligned_be16(ptr);
-                               continue;
+#define K  insn->imm
+#define A  regs[insn->a_reg]
+#define X  regs[insn->x_reg]
+#define R0 regs[0]
+
+#define CONT    ({insn++; goto select_insn; })
+#define CONT_JMP ({insn++; goto select_insn; })
+
+       static const void *jumptable[256] = {
+               [0 ... 255] = &&default_label,
+               /* Now overwrite non-defaults ... */
+#define DL(A, B, C)    [A|B|C] = &&A##_##B##_##C
+               DL(BPF_ALU, BPF_ADD, BPF_X),
+               DL(BPF_ALU, BPF_ADD, BPF_K),
+               DL(BPF_ALU, BPF_SUB, BPF_X),
+               DL(BPF_ALU, BPF_SUB, BPF_K),
+               DL(BPF_ALU, BPF_AND, BPF_X),
+               DL(BPF_ALU, BPF_AND, BPF_K),
+               DL(BPF_ALU, BPF_OR, BPF_X),
+               DL(BPF_ALU, BPF_OR, BPF_K),
+               DL(BPF_ALU, BPF_LSH, BPF_X),
+               DL(BPF_ALU, BPF_LSH, BPF_K),
+               DL(BPF_ALU, BPF_RSH, BPF_X),
+               DL(BPF_ALU, BPF_RSH, BPF_K),
+               DL(BPF_ALU, BPF_XOR, BPF_X),
+               DL(BPF_ALU, BPF_XOR, BPF_K),
+               DL(BPF_ALU, BPF_MUL, BPF_X),
+               DL(BPF_ALU, BPF_MUL, BPF_K),
+               DL(BPF_ALU, BPF_MOV, BPF_X),
+               DL(BPF_ALU, BPF_MOV, BPF_K),
+               DL(BPF_ALU, BPF_DIV, BPF_X),
+               DL(BPF_ALU, BPF_DIV, BPF_K),
+               DL(BPF_ALU, BPF_MOD, BPF_X),
+               DL(BPF_ALU, BPF_MOD, BPF_K),
+               DL(BPF_ALU, BPF_NEG, 0),
+               DL(BPF_ALU, BPF_END, BPF_TO_BE),
+               DL(BPF_ALU, BPF_END, BPF_TO_LE),
+               DL(BPF_ALU64, BPF_ADD, BPF_X),
+               DL(BPF_ALU64, BPF_ADD, BPF_K),
+               DL(BPF_ALU64, BPF_SUB, BPF_X),
+               DL(BPF_ALU64, BPF_SUB, BPF_K),
+               DL(BPF_ALU64, BPF_AND, BPF_X),
+               DL(BPF_ALU64, BPF_AND, BPF_K),
+               DL(BPF_ALU64, BPF_OR, BPF_X),
+               DL(BPF_ALU64, BPF_OR, BPF_K),
+               DL(BPF_ALU64, BPF_LSH, BPF_X),
+               DL(BPF_ALU64, BPF_LSH, BPF_K),
+               DL(BPF_ALU64, BPF_RSH, BPF_X),
+               DL(BPF_ALU64, BPF_RSH, BPF_K),
+               DL(BPF_ALU64, BPF_XOR, BPF_X),
+               DL(BPF_ALU64, BPF_XOR, BPF_K),
+               DL(BPF_ALU64, BPF_MUL, BPF_X),
+               DL(BPF_ALU64, BPF_MUL, BPF_K),
+               DL(BPF_ALU64, BPF_MOV, BPF_X),
+               DL(BPF_ALU64, BPF_MOV, BPF_K),
+               DL(BPF_ALU64, BPF_ARSH, BPF_X),
+               DL(BPF_ALU64, BPF_ARSH, BPF_K),
+               DL(BPF_ALU64, BPF_DIV, BPF_X),
+               DL(BPF_ALU64, BPF_DIV, BPF_K),
+               DL(BPF_ALU64, BPF_MOD, BPF_X),
+               DL(BPF_ALU64, BPF_MOD, BPF_K),
+               DL(BPF_ALU64, BPF_NEG, 0),
+               DL(BPF_JMP, BPF_CALL, 0),
+               DL(BPF_JMP, BPF_JA, 0),
+               DL(BPF_JMP, BPF_JEQ, BPF_X),
+               DL(BPF_JMP, BPF_JEQ, BPF_K),
+               DL(BPF_JMP, BPF_JNE, BPF_X),
+               DL(BPF_JMP, BPF_JNE, BPF_K),
+               DL(BPF_JMP, BPF_JGT, BPF_X),
+               DL(BPF_JMP, BPF_JGT, BPF_K),
+               DL(BPF_JMP, BPF_JGE, BPF_X),
+               DL(BPF_JMP, BPF_JGE, BPF_K),
+               DL(BPF_JMP, BPF_JSGT, BPF_X),
+               DL(BPF_JMP, BPF_JSGT, BPF_K),
+               DL(BPF_JMP, BPF_JSGE, BPF_X),
+               DL(BPF_JMP, BPF_JSGE, BPF_K),
+               DL(BPF_JMP, BPF_JSET, BPF_X),
+               DL(BPF_JMP, BPF_JSET, BPF_K),
+               DL(BPF_JMP, BPF_EXIT, 0),
+               DL(BPF_STX, BPF_MEM, BPF_B),
+               DL(BPF_STX, BPF_MEM, BPF_H),
+               DL(BPF_STX, BPF_MEM, BPF_W),
+               DL(BPF_STX, BPF_MEM, BPF_DW),
+               DL(BPF_STX, BPF_XADD, BPF_W),
+               DL(BPF_STX, BPF_XADD, BPF_DW),
+               DL(BPF_ST, BPF_MEM, BPF_B),
+               DL(BPF_ST, BPF_MEM, BPF_H),
+               DL(BPF_ST, BPF_MEM, BPF_W),
+               DL(BPF_ST, BPF_MEM, BPF_DW),
+               DL(BPF_LDX, BPF_MEM, BPF_B),
+               DL(BPF_LDX, BPF_MEM, BPF_H),
+               DL(BPF_LDX, BPF_MEM, BPF_W),
+               DL(BPF_LDX, BPF_MEM, BPF_DW),
+               DL(BPF_LD, BPF_ABS, BPF_W),
+               DL(BPF_LD, BPF_ABS, BPF_H),
+               DL(BPF_LD, BPF_ABS, BPF_B),
+               DL(BPF_LD, BPF_IND, BPF_W),
+               DL(BPF_LD, BPF_IND, BPF_H),
+               DL(BPF_LD, BPF_IND, BPF_B),
+#undef DL
+       };
+
+       regs[FP_REG]  = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
+       regs[ARG1_REG] = (u64) (unsigned long) ctx;
+
+select_insn:
+       goto *jumptable[insn->code];
+
+       /* ALU */
+#define ALU(OPCODE, OP)                        \
+       BPF_ALU64_##OPCODE##_BPF_X:     \
+               A = A OP X;             \
+               CONT;                   \
+       BPF_ALU_##OPCODE##_BPF_X:       \
+               A = (u32) A OP (u32) X; \
+               CONT;                   \
+       BPF_ALU64_##OPCODE##_BPF_K:     \
+               A = A OP K;             \
+               CONT;                   \
+       BPF_ALU_##OPCODE##_BPF_K:       \
+               A = (u32) A OP (u32) K; \
+               CONT;
+
+       ALU(BPF_ADD,  +)
+       ALU(BPF_SUB,  -)
+       ALU(BPF_AND,  &)
+       ALU(BPF_OR,   |)
+       ALU(BPF_LSH, <<)
+       ALU(BPF_RSH, >>)
+       ALU(BPF_XOR,  ^)
+       ALU(BPF_MUL,  *)
+#undef ALU
+       BPF_ALU_BPF_NEG_0:
+               A = (u32) -A;
+               CONT;
+       BPF_ALU64_BPF_NEG_0:
+               A = -A;
+               CONT;
+       BPF_ALU_BPF_MOV_BPF_X:
+               A = (u32) X;
+               CONT;
+       BPF_ALU_BPF_MOV_BPF_K:
+               A = (u32) K;
+               CONT;
+       BPF_ALU64_BPF_MOV_BPF_X:
+               A = X;
+               CONT;
+       BPF_ALU64_BPF_MOV_BPF_K:
+               A = K;
+               CONT;
+       BPF_ALU64_BPF_ARSH_BPF_X:
+               (*(s64 *) &A) >>= X;
+               CONT;
+       BPF_ALU64_BPF_ARSH_BPF_K:
+               (*(s64 *) &A) >>= K;
+               CONT;
+       BPF_ALU64_BPF_MOD_BPF_X:
+               tmp = A;
+               if (X)
+                       A = do_div(tmp, X);
+               CONT;
+       BPF_ALU_BPF_MOD_BPF_X:
+               tmp = (u32) A;
+               if (X)
+                       A = do_div(tmp, (u32) X);
+               CONT;
+       BPF_ALU64_BPF_MOD_BPF_K:
+               tmp = A;
+               if (K)
+                       A = do_div(tmp, K);
+               CONT;
+       BPF_ALU_BPF_MOD_BPF_K:
+               tmp = (u32) A;
+               if (K)
+                       A = do_div(tmp, (u32) K);
+               CONT;
+       BPF_ALU64_BPF_DIV_BPF_X:
+               if (X)
+                       do_div(A, X);
+               CONT;
+       BPF_ALU_BPF_DIV_BPF_X:
+               tmp = (u32) A;
+               if (X)
+                       do_div(tmp, (u32) X);
+               A = (u32) tmp;
+               CONT;
+       BPF_ALU64_BPF_DIV_BPF_K:
+               if (K)
+                       do_div(A, K);
+               CONT;
+       BPF_ALU_BPF_DIV_BPF_K:
+               tmp = (u32) A;
+               if (K)
+                       do_div(tmp, (u32) K);
+               A = (u32) tmp;
+               CONT;
+       BPF_ALU_BPF_END_BPF_TO_BE:
+               switch (K) {
+               case 16:
+                       A = (__force u16) cpu_to_be16(A);
+                       break;
+               case 32:
+                       A = (__force u32) cpu_to_be32(A);
+                       break;
+               case 64:
+                       A = (__force u64) cpu_to_be64(A);
+                       break;
+               }
+               CONT;
+       BPF_ALU_BPF_END_BPF_TO_LE:
+               switch (K) {
+               case 16:
+                       A = (__force u16) cpu_to_le16(A);
+                       break;
+               case 32:
+                       A = (__force u32) cpu_to_le32(A);
+                       break;
+               case 64:
+                       A = (__force u64) cpu_to_le64(A);
+                       break;
+               }
+               CONT;
+
+       /* CALL */
+       BPF_JMP_BPF_CALL_0:
+               /* Function call scratches R1-R5 registers, preserves R6-R9,
+                * and stores return value into R0.
+                */
+               R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3],
+                                                  regs[4], regs[5]);
+               CONT;
+
+       /* JMP */
+       BPF_JMP_BPF_JA_0:
+               insn += insn->off;
+               CONT;
+       BPF_JMP_BPF_JEQ_BPF_X:
+               if (A == X) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JEQ_BPF_K:
+               if (A == K) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JNE_BPF_X:
+               if (A != X) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JNE_BPF_K:
+               if (A != K) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JGT_BPF_X:
+               if (A > X) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JGT_BPF_K:
+               if (A > K) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JGE_BPF_X:
+               if (A >= X) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JGE_BPF_K:
+               if (A >= K) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JSGT_BPF_X:
+               if (((s64)A) > ((s64)X)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JSGT_BPF_K:
+               if (((s64)A) > ((s64)K)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JSGE_BPF_X:
+               if (((s64)A) >= ((s64)X)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JSGE_BPF_K:
+               if (((s64)A) >= ((s64)K)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JSET_BPF_X:
+               if (A & X) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_JSET_BPF_K:
+               if (A & K) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       BPF_JMP_BPF_EXIT_0:
+               return R0;
+
+       /* STX and ST and LDX*/
+#define LDST(SIZEOP, SIZE)                                     \
+       BPF_STX_BPF_MEM_##SIZEOP:                               \
+               *(SIZE *)(unsigned long) (A + insn->off) = X;   \
+               CONT;                                           \
+       BPF_ST_BPF_MEM_##SIZEOP:                                \
+               *(SIZE *)(unsigned long) (A + insn->off) = K;   \
+               CONT;                                           \
+       BPF_LDX_BPF_MEM_##SIZEOP:                               \
+               A = *(SIZE *)(unsigned long) (X + insn->off);   \
+               CONT;
+
+       LDST(BPF_B,   u8)
+       LDST(BPF_H,  u16)
+       LDST(BPF_W,  u32)
+       LDST(BPF_DW, u64)
+#undef LDST
+       BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */
+               atomic_add((u32) X, (atomic_t *)(unsigned long)
+                          (A + insn->off));
+               CONT;
+       BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */
+               atomic64_add((u64) X, (atomic64_t *)(unsigned long)
+                            (A + insn->off));
+               CONT;
+       BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
+               off = K;
+load_word:
+               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
+                * appearing in the programs where ctx == skb. All programs
+                * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter()
+                * saves it in R6, internal BPF verifier will check that
+                * R6 == ctx.
+                *
+                * BPF_ABS and BPF_IND are wrappers of function calls, so
+                * they scratch R1-R5 registers, preserve R6-R9, and store
+                * return value into R0.
+                *
+                * Implicit input:
+                *   ctx
+                *
+                * Explicit input:
+                *   X == any register
+                *   K == 32-bit immediate
+                *
+                * Output:
+                *   R0 - 8/16/32-bit skb data converted to cpu endianness
+                */
+               ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
+               if (likely(ptr != NULL)) {
+                       R0 = get_unaligned_be32(ptr);
+                       CONT;
+               }
+               return 0;
+       BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
+               off = K;
+load_half:
+               ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
+               if (likely(ptr != NULL)) {
+                       R0 = get_unaligned_be16(ptr);
+                       CONT;
+               }
+               return 0;
+       BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */
+               off = K;
+load_byte:
+               ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
+               if (likely(ptr != NULL)) {
+                       R0 = *(u8 *)ptr;
+                       CONT;
+               }
+               return 0;
+       BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
+               off = K + X;
+               goto load_word;
+       BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
+               off = K + X;
+               goto load_half;
+       BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */
+               off = K + X;
+               goto load_byte;
+
+       default_label:
+               /* If we ever reach this, we have a bug somewhere. */
+               WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
+               return 0;
+#undef CONT_JMP
+#undef CONT
+
+#undef R0
+#undef X
+#undef A
+#undef K
+}
+
+u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
+                             const struct sock_filter_int *insni)
+    __attribute__ ((alias ("__sk_run_filter")));
+
+u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
+                         const struct sock_filter_int *insni)
+    __attribute__ ((alias ("__sk_run_filter")));
+EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
+
+/* Helper to find the offset of pkt_type in sk_buff structure. We want
+ * to make sure its still a 3bit field starting at a byte boundary;
+ * taken from arch/x86/net/bpf_jit_comp.c.
+ */
+#define PKT_TYPE_MAX   7
+static unsigned int pkt_type_offset(void)
+{
+       struct sk_buff skb_probe = { .pkt_type = ~0, };
+       u8 *ct = (u8 *) &skb_probe;
+       unsigned int off;
+
+       for (off = 0; off < sizeof(struct sk_buff); off++) {
+               if (ct[off] == PKT_TYPE_MAX)
+                       return off;
+       }
+
+       pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
+       return -1;
+}
+
+static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+
+       return __skb_get_poff(skb);
+}
+
+static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct nlattr *nla;
+
+       if (skb_is_nonlinear(skb))
+               return 0;
+
+       if (A > skb->len - sizeof(struct nlattr))
+               return 0;
+
+       nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X);
+       if (nla)
+               return (void *) nla - (void *) skb->data;
+
+       return 0;
+}
+
+static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *)(long) ctx;
+       struct nlattr *nla;
+
+       if (skb_is_nonlinear(skb))
+               return 0;
+
+       if (A > skb->len - sizeof(struct nlattr))
+               return 0;
+
+       nla = (struct nlattr *) &skb->data[A];
+       if (nla->nla_len > A - skb->len)
+               return 0;
+
+       nla = nla_find_nested(nla, X);
+       if (nla)
+               return (void *) nla - (void *) skb->data;
+
+       return 0;
+}
+
+static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5)
+{
+       return raw_smp_processor_id();
+}
+
+/* Register mappings for user programs. */
+#define A_REG          0
+#define X_REG          7
+#define TMP_REG                8
+#define ARG2_REG       2
+#define ARG3_REG       3
+
+static bool convert_bpf_extensions(struct sock_filter *fp,
+                                  struct sock_filter_int **insnp)
+{
+       struct sock_filter_int *insn = *insnp;
+
+       switch (fp->k) {
+       case SKF_AD_OFF + SKF_AD_PROTOCOL:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
+
+               insn->code = BPF_LDX | BPF_MEM | BPF_H;
+               insn->a_reg = A_REG;
+               insn->x_reg = CTX_REG;
+               insn->off = offsetof(struct sk_buff, protocol);
+               insn++;
+
+               /* A = ntohs(A) [emitting a nop or swap16] */
+               insn->code = BPF_ALU | BPF_END | BPF_FROM_BE;
+               insn->a_reg = A_REG;
+               insn->imm = 16;
+               break;
+
+       case SKF_AD_OFF + SKF_AD_PKTTYPE:
+               insn->code = BPF_LDX | BPF_MEM | BPF_B;
+               insn->a_reg = A_REG;
+               insn->x_reg = CTX_REG;
+               insn->off = pkt_type_offset();
+               if (insn->off < 0)
+                       return false;
+               insn++;
+
+               insn->code = BPF_ALU | BPF_AND | BPF_K;
+               insn->a_reg = A_REG;
+               insn->imm = PKT_TYPE_MAX;
+               break;
+
+       case SKF_AD_OFF + SKF_AD_IFINDEX:
+       case SKF_AD_OFF + SKF_AD_HATYPE:
+               if (FIELD_SIZEOF(struct sk_buff, dev) == 8)
+                       insn->code = BPF_LDX | BPF_MEM | BPF_DW;
+               else
+                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
+               insn->a_reg = TMP_REG;
+               insn->x_reg = CTX_REG;
+               insn->off = offsetof(struct sk_buff, dev);
+               insn++;
+
+               insn->code = BPF_JMP | BPF_JNE | BPF_K;
+               insn->a_reg = TMP_REG;
+               insn->imm = 0;
+               insn->off = 1;
+               insn++;
+
+               insn->code = BPF_JMP | BPF_EXIT;
+               insn++;
+
+               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
+
+               insn->a_reg = A_REG;
+               insn->x_reg = TMP_REG;
+
+               if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) {
+                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
+                       insn->off = offsetof(struct net_device, ifindex);
+               } else {
+                       insn->code = BPF_LDX | BPF_MEM | BPF_H;
+                       insn->off = offsetof(struct net_device, type);
+               }
+               break;
+
+       case SKF_AD_OFF + SKF_AD_MARK:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+
+               insn->code = BPF_LDX | BPF_MEM | BPF_W;
+               insn->a_reg = A_REG;
+               insn->x_reg = CTX_REG;
+               insn->off = offsetof(struct sk_buff, mark);
+               break;
+
+       case SKF_AD_OFF + SKF_AD_RXHASH:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+
+               insn->code = BPF_LDX | BPF_MEM | BPF_W;
+               insn->a_reg = A_REG;
+               insn->x_reg = CTX_REG;
+               insn->off = offsetof(struct sk_buff, hash);
+               break;
+
+       case SKF_AD_OFF + SKF_AD_QUEUE:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
+
+               insn->code = BPF_LDX | BPF_MEM | BPF_H;
+               insn->a_reg = A_REG;
+               insn->x_reg = CTX_REG;
+               insn->off = offsetof(struct sk_buff, queue_mapping);
+               break;
+
+       case SKF_AD_OFF + SKF_AD_VLAN_TAG:
+       case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+
+               insn->code = BPF_LDX | BPF_MEM | BPF_H;
+               insn->a_reg = A_REG;
+               insn->x_reg = CTX_REG;
+               insn->off = offsetof(struct sk_buff, vlan_tci);
+               insn++;
+
+               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
+               if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) {
+                       insn->code = BPF_ALU | BPF_AND | BPF_K;
+                       insn->a_reg = A_REG;
+                       insn->imm = ~VLAN_TAG_PRESENT;
+               } else {
+                       insn->code = BPF_ALU | BPF_RSH | BPF_K;
+                       insn->a_reg = A_REG;
+                       insn->imm = 12;
+                       insn++;
+
+                       insn->code = BPF_ALU | BPF_AND | BPF_K;
+                       insn->a_reg = A_REG;
+                       insn->imm = 1;
+               }
+               break;
+
+       case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
+       case SKF_AD_OFF + SKF_AD_NLATTR:
+       case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
+       case SKF_AD_OFF + SKF_AD_CPU:
+               /* arg1 = ctx */
+               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+               insn->a_reg = ARG1_REG;
+               insn->x_reg = CTX_REG;
+               insn++;
+
+               /* arg2 = A */
+               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+               insn->a_reg = ARG2_REG;
+               insn->x_reg = A_REG;
+               insn++;
+
+               /* arg3 = X */
+               insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+               insn->a_reg = ARG3_REG;
+               insn->x_reg = X_REG;
+               insn++;
+
+               /* Emit call(ctx, arg2=A, arg3=X) */
+               insn->code = BPF_JMP | BPF_CALL;
+               switch (fp->k) {
+               case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
+                       insn->imm = __skb_get_pay_offset - __bpf_call_base;
+                       break;
+               case SKF_AD_OFF + SKF_AD_NLATTR:
+                       insn->imm = __skb_get_nlattr - __bpf_call_base;
+                       break;
+               case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
+                       insn->imm = __skb_get_nlattr_nest - __bpf_call_base;
+                       break;
+               case SKF_AD_OFF + SKF_AD_CPU:
+                       insn->imm = __get_raw_cpu_id - __bpf_call_base;
+                       break;
+               }
+               break;
+
+       case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
+               insn->code = BPF_ALU | BPF_XOR | BPF_X;
+               insn->a_reg = A_REG;
+               insn->x_reg = X_REG;
+               break;
+
+       default:
+               /* This is just a dummy call to avoid letting the compiler
+                * evict __bpf_call_base() as an optimization. Placed here
+                * where no-one bothers.
+                */
+               BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
+               return false;
+       }
+
+       *insnp = insn;
+       return true;
+}
+
+/**
+ *     sk_convert_filter - convert filter program
+ *     @prog: the user passed filter program
+ *     @len: the length of the user passed filter program
+ *     @new_prog: buffer where converted program will be stored
+ *     @new_len: pointer to store length of converted program
+ *
+ * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
+ * Conversion workflow:
+ *
+ * 1) First pass for calculating the new program length:
+ *   sk_convert_filter(old_prog, old_len, NULL, &new_len)
+ *
+ * 2) 2nd pass to remap in two passes: 1st pass finds new
+ *    jump offsets, 2nd pass remapping:
+ *   new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
+ *   sk_convert_filter(old_prog, old_len, new_prog, &new_len);
+ *
+ * User BPF's register A is mapped to our BPF register 6, user BPF
+ * register X is mapped to BPF register 7; frame pointer is always
+ * register 10; Context 'void *ctx' is stored in register 1, that is,
+ * for socket filters: ctx == 'struct sk_buff *', for seccomp:
+ * ctx == 'struct seccomp_data *'.
+ */
+int sk_convert_filter(struct sock_filter *prog, int len,
+                     struct sock_filter_int *new_prog, int *new_len)
+{
+       int new_flen = 0, pass = 0, target, i;
+       struct sock_filter_int *new_insn;
+       struct sock_filter *fp;
+       int *addrs = NULL;
+       u8 bpf_src;
+
+       BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
+       BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG);
+
+       if (len <= 0 || len >= BPF_MAXINSNS)
+               return -EINVAL;
+
+       if (new_prog) {
+               addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
+               if (!addrs)
+                       return -ENOMEM;
+       }
+
+do_pass:
+       new_insn = new_prog;
+       fp = prog;
+
+       if (new_insn) {
+               new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+               new_insn->a_reg = CTX_REG;
+               new_insn->x_reg = ARG1_REG;
+       }
+       new_insn++;
+
+       for (i = 0; i < len; fp++, i++) {
+               struct sock_filter_int tmp_insns[6] = { };
+               struct sock_filter_int *insn = tmp_insns;
+
+               if (addrs)
+                       addrs[i] = new_insn - new_prog;
+
+               switch (fp->code) {
+               /* All arithmetic insns and skb loads map as-is. */
+               case BPF_ALU | BPF_ADD | BPF_X:
+               case BPF_ALU | BPF_ADD | BPF_K:
+               case BPF_ALU | BPF_SUB | BPF_X:
+               case BPF_ALU | BPF_SUB | BPF_K:
+               case BPF_ALU | BPF_AND | BPF_X:
+               case BPF_ALU | BPF_AND | BPF_K:
+               case BPF_ALU | BPF_OR | BPF_X:
+               case BPF_ALU | BPF_OR | BPF_K:
+               case BPF_ALU | BPF_LSH | BPF_X:
+               case BPF_ALU | BPF_LSH | BPF_K:
+               case BPF_ALU | BPF_RSH | BPF_X:
+               case BPF_ALU | BPF_RSH | BPF_K:
+               case BPF_ALU | BPF_XOR | BPF_X:
+               case BPF_ALU | BPF_XOR | BPF_K:
+               case BPF_ALU | BPF_MUL | BPF_X:
+               case BPF_ALU | BPF_MUL | BPF_K:
+               case BPF_ALU | BPF_DIV | BPF_X:
+               case BPF_ALU | BPF_DIV | BPF_K:
+               case BPF_ALU | BPF_MOD | BPF_X:
+               case BPF_ALU | BPF_MOD | BPF_K:
+               case BPF_ALU | BPF_NEG:
+               case BPF_LD | BPF_ABS | BPF_W:
+               case BPF_LD | BPF_ABS | BPF_H:
+               case BPF_LD | BPF_ABS | BPF_B:
+               case BPF_LD | BPF_IND | BPF_W:
+               case BPF_LD | BPF_IND | BPF_H:
+               case BPF_LD | BPF_IND | BPF_B:
+                       /* Check for overloaded BPF extension and
+                        * directly convert it if found, otherwise
+                        * just move on with mapping.
+                        */
+                       if (BPF_CLASS(fp->code) == BPF_LD &&
+                           BPF_MODE(fp->code) == BPF_ABS &&
+                           convert_bpf_extensions(fp, &insn))
+                               break;
+
+                       insn->code = fp->code;
+                       insn->a_reg = A_REG;
+                       insn->x_reg = X_REG;
+                       insn->imm = fp->k;
+                       break;
+
+               /* Jump opcodes map as-is, but offsets need adjustment. */
+               case BPF_JMP | BPF_JA:
+                       target = i + fp->k + 1;
+                       insn->code = fp->code;
+#define EMIT_JMP                                                       \
+       do {                                                            \
+               if (target >= len || target < 0)                        \
+                       goto err;                                       \
+               insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0;   \
+               /* Adjust pc relative offset for 2nd or 3rd insn. */    \
+               insn->off -= insn - tmp_insns;                          \
+       } while (0)
+
+                       EMIT_JMP;
+                       break;
+
+               case BPF_JMP | BPF_JEQ | BPF_K:
+               case BPF_JMP | BPF_JEQ | BPF_X:
+               case BPF_JMP | BPF_JSET | BPF_K:
+               case BPF_JMP | BPF_JSET | BPF_X:
+               case BPF_JMP | BPF_JGT | BPF_K:
+               case BPF_JMP | BPF_JGT | BPF_X:
+               case BPF_JMP | BPF_JGE | BPF_K:
+               case BPF_JMP | BPF_JGE | BPF_X:
+                       if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
+                               /* BPF immediates are signed, zero extend
+                                * immediate into tmp register and use it
+                                * in compare insn.
+                                */
+                               insn->code = BPF_ALU | BPF_MOV | BPF_K;
+                               insn->a_reg = TMP_REG;
+                               insn->imm = fp->k;
+                               insn++;
+
+                               insn->a_reg = A_REG;
+                               insn->x_reg = TMP_REG;
+                               bpf_src = BPF_X;
+                       } else {
+                               insn->a_reg = A_REG;
+                               insn->x_reg = X_REG;
+                               insn->imm = fp->k;
+                               bpf_src = BPF_SRC(fp->code);
                        }
-                       return 0;
-               case BPF_S_LD_B_ABS:
-                       k = K;
-load_b:
-                       ptr = load_pointer(skb, k, 1, &tmp);
-                       if (ptr != NULL) {
-                               A = *(u8 *)ptr;
-                               continue;
+
+                       /* Common case where 'jump_false' is next insn. */
+                       if (fp->jf == 0) {
+                               insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
+                               target = i + fp->jt + 1;
+                               EMIT_JMP;
+                               break;
                        }
-                       return 0;
-               case BPF_S_LD_W_LEN:
-                       A = skb->len;
-                       continue;
-               case BPF_S_LDX_W_LEN:
-                       X = skb->len;
-                       continue;
-               case BPF_S_LD_W_IND:
-                       k = X + K;
-                       goto load_w;
-               case BPF_S_LD_H_IND:
-                       k = X + K;
-                       goto load_h;
-               case BPF_S_LD_B_IND:
-                       k = X + K;
-                       goto load_b;
-               case BPF_S_LDX_B_MSH:
-                       ptr = load_pointer(skb, K, 1, &tmp);
-                       if (ptr != NULL) {
-                               X = (*(u8 *)ptr & 0xf) << 2;
-                               continue;
+
+                       /* Convert JEQ into JNE when 'jump_true' is next insn. */
+                       if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
+                               insn->code = BPF_JMP | BPF_JNE | bpf_src;
+                               target = i + fp->jf + 1;
+                               EMIT_JMP;
+                               break;
                        }
-                       return 0;
-               case BPF_S_LD_IMM:
-                       A = K;
-                       continue;
-               case BPF_S_LDX_IMM:
-                       X = K;
-                       continue;
-               case BPF_S_LD_MEM:
-                       A = mem[K];
-                       continue;
-               case BPF_S_LDX_MEM:
-                       X = mem[K];
-                       continue;
-               case BPF_S_MISC_TAX:
-                       X = A;
-                       continue;
-               case BPF_S_MISC_TXA:
-                       A = X;
-                       continue;
-               case BPF_S_RET_K:
-                       return K;
-               case BPF_S_RET_A:
-                       return A;
-               case BPF_S_ST:
-                       mem[K] = A;
-                       continue;
-               case BPF_S_STX:
-                       mem[K] = X;
-                       continue;
-               case BPF_S_ANC_PROTOCOL:
-                       A = ntohs(skb->protocol);
-                       continue;
-               case BPF_S_ANC_PKTTYPE:
-                       A = skb->pkt_type;
-                       continue;
-               case BPF_S_ANC_IFINDEX:
-                       if (!skb->dev)
-                               return 0;
-                       A = skb->dev->ifindex;
-                       continue;
-               case BPF_S_ANC_MARK:
-                       A = skb->mark;
-                       continue;
-               case BPF_S_ANC_QUEUE:
-                       A = skb->queue_mapping;
-                       continue;
-               case BPF_S_ANC_HATYPE:
-                       if (!skb->dev)
-                               return 0;
-                       A = skb->dev->type;
-                       continue;
-               case BPF_S_ANC_RXHASH:
-                       A = skb->rxhash;
-                       continue;
-               case BPF_S_ANC_CPU:
-                       A = raw_smp_processor_id();
-                       continue;
-               case BPF_S_ANC_VLAN_TAG:
-                       A = vlan_tx_tag_get(skb);
-                       continue;
-               case BPF_S_ANC_VLAN_TAG_PRESENT:
-                       A = !!vlan_tx_tag_present(skb);
-                       continue;
-               case BPF_S_ANC_PAY_OFFSET:
-                       A = __skb_get_poff(skb);
-                       continue;
-               case BPF_S_ANC_NLATTR: {
-                       struct nlattr *nla;
-
-                       if (skb_is_nonlinear(skb))
-                               return 0;
-                       if (A > skb->len - sizeof(struct nlattr))
-                               return 0;
-
-                       nla = nla_find((struct nlattr *)&skb->data[A],
-                                      skb->len - A, X);
-                       if (nla)
-                               A = (void *)nla - (void *)skb->data;
-                       else
-                               A = 0;
-                       continue;
-               }
-               case BPF_S_ANC_NLATTR_NEST: {
-                       struct nlattr *nla;
-
-                       if (skb_is_nonlinear(skb))
-                               return 0;
-                       if (A > skb->len - sizeof(struct nlattr))
-                               return 0;
-
-                       nla = (struct nlattr *)&skb->data[A];
-                       if (nla->nla_len > A - skb->len)
-                               return 0;
-
-                       nla = nla_find_nested(nla, X);
-                       if (nla)
-                               A = (void *)nla - (void *)skb->data;
-                       else
-                               A = 0;
-                       continue;
-               }
-#ifdef CONFIG_SECCOMP_FILTER
-               case BPF_S_ANC_SECCOMP_LD_W:
-                       A = seccomp_bpf_load(fentry->k);
-                       continue;
-#endif
+
+                       /* Other jumps are mapped into two insns: Jxx and JA. */
+                       target = i + fp->jt + 1;
+                       insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
+                       EMIT_JMP;
+                       insn++;
+
+                       insn->code = BPF_JMP | BPF_JA;
+                       target = i + fp->jf + 1;
+                       EMIT_JMP;
+                       break;
+
+               /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+               case BPF_LDX | BPF_MSH | BPF_B:
+                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+                       insn->a_reg = TMP_REG;
+                       insn->x_reg = A_REG;
+                       insn++;
+
+                       insn->code = BPF_LD | BPF_ABS | BPF_B;
+                       insn->a_reg = A_REG;
+                       insn->imm = fp->k;
+                       insn++;
+
+                       insn->code = BPF_ALU | BPF_AND | BPF_K;
+                       insn->a_reg = A_REG;
+                       insn->imm = 0xf;
+                       insn++;
+
+                       insn->code = BPF_ALU | BPF_LSH | BPF_K;
+                       insn->a_reg = A_REG;
+                       insn->imm = 2;
+                       insn++;
+
+                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+                       insn->a_reg = X_REG;
+                       insn->x_reg = A_REG;
+                       insn++;
+
+                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+                       insn->a_reg = A_REG;
+                       insn->x_reg = TMP_REG;
+                       break;
+
+               /* RET_K, RET_A are remaped into 2 insns. */
+               case BPF_RET | BPF_A:
+               case BPF_RET | BPF_K:
+                       insn->code = BPF_ALU | BPF_MOV |
+                                    (BPF_RVAL(fp->code) == BPF_K ?
+                                     BPF_K : BPF_X);
+                       insn->a_reg = 0;
+                       insn->x_reg = A_REG;
+                       insn->imm = fp->k;
+                       insn++;
+
+                       insn->code = BPF_JMP | BPF_EXIT;
+                       break;
+
+               /* Store to stack. */
+               case BPF_ST:
+               case BPF_STX:
+                       insn->code = BPF_STX | BPF_MEM | BPF_W;
+                       insn->a_reg = FP_REG;
+                       insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG;
+                       insn->off = -(BPF_MEMWORDS - fp->k) * 4;
+                       break;
+
+               /* Load from stack. */
+               case BPF_LD | BPF_MEM:
+               case BPF_LDX | BPF_MEM:
+                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
+                       insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
+                                     A_REG : X_REG;
+                       insn->x_reg = FP_REG;
+                       insn->off = -(BPF_MEMWORDS - fp->k) * 4;
+                       break;
+
+               /* A = K or X = K */
+               case BPF_LD | BPF_IMM:
+               case BPF_LDX | BPF_IMM:
+                       insn->code = BPF_ALU | BPF_MOV | BPF_K;
+                       insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
+                                     A_REG : X_REG;
+                       insn->imm = fp->k;
+                       break;
+
+               /* X = A */
+               case BPF_MISC | BPF_TAX:
+                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+                       insn->a_reg = X_REG;
+                       insn->x_reg = A_REG;
+                       break;
+
+               /* A = X */
+               case BPF_MISC | BPF_TXA:
+                       insn->code = BPF_ALU64 | BPF_MOV | BPF_X;
+                       insn->a_reg = A_REG;
+                       insn->x_reg = X_REG;
+                       break;
+
+               /* A = skb->len or X = skb->len */
+               case BPF_LD | BPF_W | BPF_LEN:
+               case BPF_LDX | BPF_W | BPF_LEN:
+                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
+                       insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ?
+                                     A_REG : X_REG;
+                       insn->x_reg = CTX_REG;
+                       insn->off = offsetof(struct sk_buff, len);
+                       break;
+
+               /* access seccomp_data fields */
+               case BPF_LDX | BPF_ABS | BPF_W:
+                       insn->code = BPF_LDX | BPF_MEM | BPF_W;
+                       insn->a_reg = A_REG;
+                       insn->x_reg = CTX_REG;
+                       insn->off = fp->k;
+                       break;
+
                default:
-                       WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
-                                      fentry->code, fentry->jt,
-                                      fentry->jf, fentry->k);
-                       return 0;
+                       goto err;
                }
+
+               insn++;
+               if (new_prog)
+                       memcpy(new_insn, tmp_insns,
+                              sizeof(*insn) * (insn - tmp_insns));
+
+               new_insn += insn - tmp_insns;
+       }
+
+       if (!new_prog) {
+               /* Only calculating new length. */
+               *new_len = new_insn - new_prog;
+               return 0;
        }
 
+       pass++;
+       if (new_flen != new_insn - new_prog) {
+               new_flen = new_insn - new_prog;
+               if (pass > 2)
+                       goto err;
+
+               goto do_pass;
+       }
+
+       kfree(addrs);
+       BUG_ON(*new_len != new_flen);
        return 0;
+err:
+       kfree(addrs);
+       return -EINVAL;
 }
-EXPORT_SYMBOL(sk_run_filter);
 
-/*
- * Security :
+/* Security:
+ *
  * A BPF program is able to use 16 cells of memory to store intermediate
- * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()).
+ *
  * As we dont want to clear mem[] array for each packet going through
  * sk_run_filter(), we check that filter loaded by user never try to read
  * a cell if not previously written, and we check all branches to be sure
@@ -629,30 +1375,197 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
 }
 EXPORT_SYMBOL(sk_chk_filter);
 
+static int sk_store_orig_filter(struct sk_filter *fp,
+                               const struct sock_fprog *fprog)
+{
+       unsigned int fsize = sk_filter_proglen(fprog);
+       struct sock_fprog_kern *fkprog;
+
+       fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
+       if (!fp->orig_prog)
+               return -ENOMEM;
+
+       fkprog = fp->orig_prog;
+       fkprog->len = fprog->len;
+       fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
+       if (!fkprog->filter) {
+               kfree(fp->orig_prog);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void sk_release_orig_filter(struct sk_filter *fp)
+{
+       struct sock_fprog_kern *fprog = fp->orig_prog;
+
+       if (fprog) {
+               kfree(fprog->filter);
+               kfree(fprog);
+       }
+}
+
 /**
  *     sk_filter_release_rcu - Release a socket filter by rcu_head
  *     @rcu: rcu_head that contains the sk_filter to free
  */
-void sk_filter_release_rcu(struct rcu_head *rcu)
+static void sk_filter_release_rcu(struct rcu_head *rcu)
 {
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
+       sk_release_orig_filter(fp);
        bpf_jit_free(fp);
 }
-EXPORT_SYMBOL(sk_filter_release_rcu);
 
-static int __sk_prepare_filter(struct sk_filter *fp)
+/**
+ *     sk_filter_release - release a socket filter
+ *     @fp: filter to remove
+ *
+ *     Remove a filter from a socket and release its resources.
+ */
+static void sk_filter_release(struct sk_filter *fp)
+{
+       if (atomic_dec_and_test(&fp->refcnt))
+               call_rcu(&fp->rcu, sk_filter_release_rcu);
+}
+
+void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
+{
+       atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
+       sk_filter_release(fp);
+}
+
+void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+{
+       atomic_inc(&fp->refcnt);
+       atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
+}
+
+static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
+                                             struct sock *sk,
+                                             unsigned int len)
+{
+       struct sk_filter *fp_new;
+
+       if (sk == NULL)
+               return krealloc(fp, len, GFP_KERNEL);
+
+       fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
+       if (fp_new) {
+               memcpy(fp_new, fp, sizeof(struct sk_filter));
+               /* As we're kepping orig_prog in fp_new along,
+                * we need to make sure we're not evicting it
+                * from the old fp.
+                */
+               fp->orig_prog = NULL;
+               sk_filter_uncharge(sk, fp);
+       }
+
+       return fp_new;
+}
+
+static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
+                                            struct sock *sk)
+{
+       struct sock_filter *old_prog;
+       struct sk_filter *old_fp;
+       int i, err, new_len, old_len = fp->len;
+
+       /* We are free to overwrite insns et al right here as it
+        * won't be used at this point in time anymore internally
+        * after the migration to the internal BPF instruction
+        * representation.
+        */
+       BUILD_BUG_ON(sizeof(struct sock_filter) !=
+                    sizeof(struct sock_filter_int));
+
+       /* For now, we need to unfiddle BPF_S_* identifiers in place.
+        * This can sooner or later on be subject to removal, e.g. when
+        * JITs have been converted.
+        */
+       for (i = 0; i < fp->len; i++)
+               sk_decode_filter(&fp->insns[i], &fp->insns[i]);
+
+       /* Conversion cannot happen on overlapping memory areas,
+        * so we need to keep the user BPF around until the 2nd
+        * pass. At this time, the user BPF is stored in fp->insns.
+        */
+       old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
+                          GFP_KERNEL);
+       if (!old_prog) {
+               err = -ENOMEM;
+               goto out_err;
+       }
+
+       /* 1st pass: calculate the new program length. */
+       err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
+       if (err)
+               goto out_err_free;
+
+       /* Expand fp for appending the new filter representation. */
+       old_fp = fp;
+       fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
+       if (!fp) {
+               /* The old_fp is still around in case we couldn't
+                * allocate new memory, so uncharge on that one.
+                */
+               fp = old_fp;
+               err = -ENOMEM;
+               goto out_err_free;
+       }
+
+       fp->bpf_func = sk_run_filter_int_skb;
+       fp->len = new_len;
+
+       /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
+       err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
+       if (err)
+               /* 2nd sk_convert_filter() can fail only if it fails
+                * to allocate memory, remapping must succeed. Note,
+                * that at this time old_fp has already been released
+                * by __sk_migrate_realloc().
+                */
+               goto out_err_free;
+
+       kfree(old_prog);
+       return fp;
+
+out_err_free:
+       kfree(old_prog);
+out_err:
+       /* Rollback filter setup. */
+       if (sk != NULL)
+               sk_filter_uncharge(sk, fp);
+       else
+               kfree(fp);
+       return ERR_PTR(err);
+}
+
+static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
+                                            struct sock *sk)
 {
        int err;
 
-       fp->bpf_func = sk_run_filter;
+       fp->bpf_func = NULL;
+       fp->jited = 0;
 
        err = sk_chk_filter(fp->insns, fp->len);
        if (err)
-               return err;
+               return ERR_PTR(err);
 
+       /* Probe if we can JIT compile the filter and if so, do
+        * the compilation of the filter.
+        */
        bpf_jit_compile(fp);
-       return 0;
+
+       /* JIT compiler couldn't process this filter, so do the
+        * internal BPF translation for the optimized interpreter.
+        */
+       if (!fp->jited)
+               fp = __sk_migrate_filter(fp, sk);
+
+       return fp;
 }
 
 /**
@@ -668,9 +1581,8 @@ static int __sk_prepare_filter(struct sk_filter *fp)
 int sk_unattached_filter_create(struct sk_filter **pfp,
                                struct sock_fprog *fprog)
 {
+       unsigned int fsize = sk_filter_proglen(fprog);
        struct sk_filter *fp;
-       unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
-       int err;
 
        /* Make sure new filter is there and in the right amounts. */
        if (fprog->filter == NULL)
@@ -679,20 +1591,26 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
        fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
+
        memcpy(fp->insns, fprog->filter, fsize);
 
        atomic_set(&fp->refcnt, 1);
        fp->len = fprog->len;
+       /* Since unattached filters are not copied back to user
+        * space through sk_get_filter(), we do not need to hold
+        * a copy here, and can spare us the work.
+        */
+       fp->orig_prog = NULL;
 
-       err = __sk_prepare_filter(fp);
-       if (err)
-               goto free_mem;
+       /* __sk_prepare_filter() already takes care of uncharging
+        * memory in case something goes wrong.
+        */
+       fp = __sk_prepare_filter(fp, NULL);
+       if (IS_ERR(fp))
+               return PTR_ERR(fp);
 
        *pfp = fp;
        return 0;
-free_mem:
-       kfree(fp);
-       return err;
 }
 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
 
@@ -715,7 +1633,7 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
        struct sk_filter *fp, *old_fp;
-       unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+       unsigned int fsize = sk_filter_proglen(fprog);
        unsigned int sk_fsize = sk_filter_size(fprog->len);
        int err;
 
@@ -729,6 +1647,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
+
        if (copy_from_user(fp->insns, fprog->filter, fsize)) {
                sock_kfree_s(sk, fp, sk_fsize);
                return -EFAULT;
@@ -737,18 +1656,26 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        atomic_set(&fp->refcnt, 1);
        fp->len = fprog->len;
 
-       err = __sk_prepare_filter(fp);
+       err = sk_store_orig_filter(fp, fprog);
        if (err) {
                sk_filter_uncharge(sk, fp);
-               return err;
+               return -ENOMEM;
        }
 
+       /* __sk_prepare_filter() already takes care of uncharging
+        * memory in case something goes wrong.
+        */
+       fp = __sk_prepare_filter(fp, sk);
+       if (IS_ERR(fp))
+               return PTR_ERR(fp);
+
        old_fp = rcu_dereference_protected(sk->sk_filter,
                                           sock_owned_by_user(sk));
        rcu_assign_pointer(sk->sk_filter, fp);
 
        if (old_fp)
                sk_filter_uncharge(sk, old_fp);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(sk_attach_filter);
@@ -768,6 +1695,7 @@ int sk_detach_filter(struct sock *sk)
                sk_filter_uncharge(sk, filter);
                ret = 0;
        }
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(sk_detach_filter);
@@ -850,34 +1778,41 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
        to->k = filt->k;
 }
 
-int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
+int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
+                 unsigned int len)
 {
+       struct sock_fprog_kern *fprog;
        struct sk_filter *filter;
-       int i, ret;
+       int ret = 0;
 
        lock_sock(sk);
        filter = rcu_dereference_protected(sk->sk_filter,
-                       sock_owned_by_user(sk));
-       ret = 0;
+                                          sock_owned_by_user(sk));
        if (!filter)
                goto out;
-       ret = filter->len;
+
+       /* We're copying the filter that has been originally attached,
+        * so no conversion/decode needed anymore.
+        */
+       fprog = filter->orig_prog;
+
+       ret = fprog->len;
        if (!len)
+               /* User space only enquires number of filter blocks. */
                goto out;
+
        ret = -EINVAL;
-       if (len < filter->len)
+       if (len < fprog->len)
                goto out;
 
        ret = -EFAULT;
-       for (i = 0; i < filter->len; i++) {
-               struct sock_filter fb;
-
-               sk_decode_filter(&filter->insns[i], &fb);
-               if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
-                       goto out;
-       }
+       if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
+               goto out;
 
-       ret = filter->len;
+       /* Instead of bytes, the API requests to return the number
+        * of filter blocks.
+        */
+       ret = fprog->len;
 out:
        release_sock(sk);
        return ret;
index dfa602ceb8cd846a2fdbbd24ac5367356ebb3906..31cfb365e0c689ffa528bf2f96072c6bcbc82799 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/flow.h>
 #include <linux/atomic.h>
 #include <linux/security.h>
+#include <net/net_namespace.h>
 
 struct flow_cache_entry {
        union {
@@ -38,37 +39,14 @@ struct flow_cache_entry {
        struct flow_cache_object        *object;
 };
 
-struct flow_cache_percpu {
-       struct hlist_head               *hash_table;
-       int                             hash_count;
-       u32                             hash_rnd;
-       int                             hash_rnd_recalc;
-       struct tasklet_struct           flush_tasklet;
-};
-
 struct flow_flush_info {
        struct flow_cache               *cache;
        atomic_t                        cpuleft;
        struct completion               completion;
 };
 
-struct flow_cache {
-       u32                             hash_shift;
-       struct flow_cache_percpu __percpu *percpu;
-       struct notifier_block           hotcpu_notifier;
-       int                             low_watermark;
-       int                             high_watermark;
-       struct timer_list               rnd_timer;
-};
-
-atomic_t flow_cache_genid = ATOMIC_INIT(0);
-EXPORT_SYMBOL(flow_cache_genid);
-static struct flow_cache flow_cache_global;
 static struct kmem_cache *flow_cachep __read_mostly;
 
-static DEFINE_SPINLOCK(flow_cache_gc_lock);
-static LIST_HEAD(flow_cache_gc_list);
-
 #define flow_cache_hash_size(cache)    (1 << (cache)->hash_shift)
 #define FLOW_HASH_RND_PERIOD           (10 * 60 * HZ)
 
@@ -84,16 +62,18 @@ static void flow_cache_new_hashrnd(unsigned long arg)
        add_timer(&fc->rnd_timer);
 }
 
-static int flow_entry_valid(struct flow_cache_entry *fle)
+static int flow_entry_valid(struct flow_cache_entry *fle,
+                               struct netns_xfrm *xfrm)
 {
-       if (atomic_read(&flow_cache_genid) != fle->genid)
+       if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
                return 0;
        if (fle->object && !fle->object->ops->check(fle->object))
                return 0;
        return 1;
 }
 
-static void flow_entry_kill(struct flow_cache_entry *fle)
+static void flow_entry_kill(struct flow_cache_entry *fle,
+                               struct netns_xfrm *xfrm)
 {
        if (fle->object)
                fle->object->ops->delete(fle->object);
@@ -104,26 +84,28 @@ static void flow_cache_gc_task(struct work_struct *work)
 {
        struct list_head gc_list;
        struct flow_cache_entry *fce, *n;
+       struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
+                                               flow_cache_gc_work);
 
        INIT_LIST_HEAD(&gc_list);
-       spin_lock_bh(&flow_cache_gc_lock);
-       list_splice_tail_init(&flow_cache_gc_list, &gc_list);
-       spin_unlock_bh(&flow_cache_gc_lock);
+       spin_lock_bh(&xfrm->flow_cache_gc_lock);
+       list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
+       spin_unlock_bh(&xfrm->flow_cache_gc_lock);
 
        list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
-               flow_entry_kill(fce);
+               flow_entry_kill(fce, xfrm);
 }
-static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
 
 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
-                                    int deleted, struct list_head *gc_list)
+                                    int deleted, struct list_head *gc_list,
+                                    struct netns_xfrm *xfrm)
 {
        if (deleted) {
                fcp->hash_count -= deleted;
-               spin_lock_bh(&flow_cache_gc_lock);
-               list_splice_tail(gc_list, &flow_cache_gc_list);
-               spin_unlock_bh(&flow_cache_gc_lock);
-               schedule_work(&flow_cache_gc_work);
+               spin_lock_bh(&xfrm->flow_cache_gc_lock);
+               list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
+               spin_unlock_bh(&xfrm->flow_cache_gc_lock);
+               schedule_work(&xfrm->flow_cache_gc_work);
        }
 }
 
@@ -135,6 +117,8 @@ static void __flow_cache_shrink(struct flow_cache *fc,
        struct hlist_node *tmp;
        LIST_HEAD(gc_list);
        int i, deleted = 0;
+       struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
+                                               flow_cache_global);
 
        for (i = 0; i < flow_cache_hash_size(fc); i++) {
                int saved = 0;
@@ -142,7 +126,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
                hlist_for_each_entry_safe(fle, tmp,
                                          &fcp->hash_table[i], u.hlist) {
                        if (saved < shrink_to &&
-                           flow_entry_valid(fle)) {
+                           flow_entry_valid(fle, xfrm)) {
                                saved++;
                        } else {
                                deleted++;
@@ -152,7 +136,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
                }
        }
 
-       flow_cache_queue_garbage(fcp, deleted, &gc_list);
+       flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
 }
 
 static void flow_cache_shrink(struct flow_cache *fc,
@@ -208,7 +192,7 @@ struct flow_cache_object *
 flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                  flow_resolve_t resolver, void *ctx)
 {
-       struct flow_cache *fc = &flow_cache_global;
+       struct flow_cache *fc = &net->xfrm.flow_cache_global;
        struct flow_cache_percpu *fcp;
        struct flow_cache_entry *fle, *tfle;
        struct flow_cache_object *flo;
@@ -258,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                        hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
                        fcp->hash_count++;
                }
-       } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
+       } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
                flo = fle->object;
                if (!flo)
                        goto ret_object;
@@ -279,7 +263,7 @@ nocache:
        }
        flo = resolver(net, key, family, dir, flo, ctx);
        if (fle) {
-               fle->genid = atomic_read(&flow_cache_genid);
+               fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
                if (!IS_ERR(flo))
                        fle->object = flo;
                else
@@ -303,12 +287,14 @@ static void flow_cache_flush_tasklet(unsigned long data)
        struct hlist_node *tmp;
        LIST_HEAD(gc_list);
        int i, deleted = 0;
+       struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
+                                               flow_cache_global);
 
        fcp = this_cpu_ptr(fc->percpu);
        for (i = 0; i < flow_cache_hash_size(fc); i++) {
                hlist_for_each_entry_safe(fle, tmp,
                                          &fcp->hash_table[i], u.hlist) {
-                       if (flow_entry_valid(fle))
+                       if (flow_entry_valid(fle, xfrm))
                                continue;
 
                        deleted++;
@@ -317,7 +303,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
                }
        }
 
-       flow_cache_queue_garbage(fcp, deleted, &gc_list);
+       flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
 
        if (atomic_dec_and_test(&info->cpuleft))
                complete(&info->completion);
@@ -351,10 +337,9 @@ static void flow_cache_flush_per_cpu(void *data)
        tasklet_schedule(tasklet);
 }
 
-void flow_cache_flush(void)
+void flow_cache_flush(struct net *net)
 {
        struct flow_flush_info info;
-       static DEFINE_MUTEX(flow_flush_sem);
        cpumask_var_t mask;
        int i, self;
 
@@ -365,8 +350,8 @@ void flow_cache_flush(void)
 
        /* Don't want cpus going down or up during this. */
        get_online_cpus();
-       mutex_lock(&flow_flush_sem);
-       info.cache = &flow_cache_global;
+       mutex_lock(&net->xfrm.flow_flush_sem);
+       info.cache = &net->xfrm.flow_cache_global;
        for_each_online_cpu(i)
                if (!flow_cache_percpu_empty(info.cache, i))
                        cpumask_set_cpu(i, mask);
@@ -386,21 +371,23 @@ void flow_cache_flush(void)
        wait_for_completion(&info.completion);
 
 done:
-       mutex_unlock(&flow_flush_sem);
+       mutex_unlock(&net->xfrm.flow_flush_sem);
        put_online_cpus();
        free_cpumask_var(mask);
 }
 
 static void flow_cache_flush_task(struct work_struct *work)
 {
-       flow_cache_flush();
-}
+       struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
+                                               flow_cache_gc_work);
+       struct net *net = container_of(xfrm, struct net, xfrm);
 
-static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+       flow_cache_flush(net);
+}
 
-void flow_cache_flush_deferred(void)
+void flow_cache_flush_deferred(struct net *net)
 {
-       schedule_work(&flow_cache_flush_work);
+       schedule_work(&net->xfrm.flow_cache_flush_work);
 }
 
 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
@@ -425,7 +412,8 @@ static int flow_cache_cpu(struct notifier_block *nfb,
                          unsigned long action,
                          void *hcpu)
 {
-       struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
+       struct flow_cache *fc = container_of(nfb, struct flow_cache,
+                                               hotcpu_notifier);
        int res, cpu = (unsigned long) hcpu;
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
 
@@ -444,9 +432,20 @@ static int flow_cache_cpu(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static int __init flow_cache_init(struct flow_cache *fc)
+int flow_cache_init(struct net *net)
 {
        int i;
+       struct flow_cache *fc = &net->xfrm.flow_cache_global;
+
+       if (!flow_cachep)
+               flow_cachep = kmem_cache_create("flow_cache",
+                                               sizeof(struct flow_cache_entry),
+                                               0, SLAB_PANIC, NULL);
+       spin_lock_init(&net->xfrm.flow_cache_gc_lock);
+       INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
+       INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
+       INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
+       mutex_init(&net->xfrm.flow_flush_sem);
 
        fc->hash_shift = 10;
        fc->low_watermark = 2 * flow_cache_hash_size(fc);
@@ -484,14 +483,23 @@ err:
 
        return -ENOMEM;
 }
+EXPORT_SYMBOL(flow_cache_init);
 
-static int __init flow_cache_init_global(void)
+void flow_cache_fini(struct net *net)
 {
-       flow_cachep = kmem_cache_create("flow_cache",
-                                       sizeof(struct flow_cache_entry),
-                                       0, SLAB_PANIC, NULL);
+       int i;
+       struct flow_cache *fc = &net->xfrm.flow_cache_global;
 
-       return flow_cache_init(&flow_cache_global);
-}
+       del_timer_sync(&fc->rnd_timer);
+       unregister_hotcpu_notifier(&fc->hotcpu_notifier);
 
-module_init(flow_cache_init_global);
+       for_each_possible_cpu(i) {
+               struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
+               kfree(fcp->hash_table);
+               fcp->hash_table = NULL;
+       }
+
+       free_percpu(fc->percpu);
+       fc->percpu = NULL;
+}
+EXPORT_SYMBOL(flow_cache_fini);
index e29e810663d777ecee281b61fda8ec8dbdcbfb92..107ed12a5323ab20e796042ae671e7b60ac0b488 100644 (file)
@@ -61,7 +61,7 @@ bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
 
 again:
        switch (proto) {
-       case __constant_htons(ETH_P_IP): {
+       case htons(ETH_P_IP): {
                const struct iphdr *iph;
                struct iphdr _iph;
 ip:
@@ -77,7 +77,7 @@ ip:
                iph_to_flow_copy_addrs(flow, iph);
                break;
        }
-       case __constant_htons(ETH_P_IPV6): {
+       case htons(ETH_P_IPV6): {
                const struct ipv6hdr *iph;
                struct ipv6hdr _iph;
 ipv6:
@@ -91,8 +91,8 @@ ipv6:
                nhoff += sizeof(struct ipv6hdr);
                break;
        }
-       case __constant_htons(ETH_P_8021AD):
-       case __constant_htons(ETH_P_8021Q): {
+       case htons(ETH_P_8021AD):
+       case htons(ETH_P_8021Q): {
                const struct vlan_hdr *vlan;
                struct vlan_hdr _vlan;
 
@@ -104,7 +104,7 @@ ipv6:
                nhoff += sizeof(*vlan);
                goto again;
        }
-       case __constant_htons(ETH_P_PPP_SES): {
+       case htons(ETH_P_PPP_SES): {
                struct {
                        struct pppoe_hdr hdr;
                        __be16 proto;
@@ -115,9 +115,9 @@ ipv6:
                proto = hdr->proto;
                nhoff += PPPOE_SES_HLEN;
                switch (proto) {
-               case __constant_htons(PPP_IP):
+               case htons(PPP_IP):
                        goto ip;
-               case __constant_htons(PPP_IPV6):
+               case htons(PPP_IPV6):
                        goto ipv6;
                default:
                        return false;
@@ -203,8 +203,8 @@ static __always_inline u32 __flow_hash_1word(u32 a)
 
 /*
  * __skb_get_hash: calculate a flow hash based on src/dst addresses
- * and src/dst port numbers.  Sets rxhash in skb to non-zero hash value
- * on success, zero indicates no valid hash.  Also, sets l4_rxhash in skb
+ * and src/dst port numbers.  Sets hash in skb to non-zero hash value
+ * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
  * if hash is a canonical 4-tuple hash over transport ports.
  */
 void __skb_get_hash(struct sk_buff *skb)
@@ -216,7 +216,7 @@ void __skb_get_hash(struct sk_buff *skb)
                return;
 
        if (keys.ports)
-               skb->l4_rxhash = 1;
+               skb->l4_hash = 1;
 
        /* get a consistent hash (same value on both flow directions) */
        if (((__force u32)keys.dst < (__force u32)keys.src) ||
@@ -232,7 +232,7 @@ void __skb_get_hash(struct sk_buff *skb)
        if (!hash)
                hash = 1;
 
-       skb->rxhash = hash;
+       skb->hash = hash;
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -344,7 +344,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
                                        hash = skb->sk->sk_hash;
                                else
                                        hash = (__force u16) skb->protocol ^
-                                           skb->rxhash;
+                                           skb->hash;
                                hash = __flow_hash_1word(hash);
                                queue_index = map->queues[
                                    ((u64)hash * map->len) >> 32];
index e16129019c6658ae7b1ab697692f8e6484c8cd90..8f8a96ef9f3f64ba519fe4c872d46c7b7c680ec9 100644 (file)
@@ -836,10 +836,10 @@ out:
 static __inline__ int neigh_max_probes(struct neighbour *n)
 {
        struct neigh_parms *p = n->parms;
-       return (n->nud_state & NUD_PROBE) ?
-               NEIGH_VAR(p, UCAST_PROBES) :
-               NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
-               NEIGH_VAR(p, MCAST_PROBES);
+       int max_probes = NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES);
+       if (!(n->nud_state & NUD_PROBE))
+               max_probes += NEIGH_VAR(p, MCAST_PROBES);
+       return max_probes;
 }
 
 static void neigh_invalidate(struct neighbour *neigh)
@@ -945,6 +945,7 @@ static void neigh_timer_handler(unsigned long arg)
                neigh->nud_state = NUD_FAILED;
                notify = 1;
                neigh_invalidate(neigh);
+               goto out;
        }
 
        if (neigh->nud_state & NUD_IN_TIMER) {
index 93886246a0b42a4f02e8387e758216582dd6d54f..46239627848436938cbd1efb743f60ffb0b15289 100644 (file)
@@ -104,6 +104,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
 }
 
 NETDEVICE_SHOW_RO(dev_id, fmt_hex);
+NETDEVICE_SHOW_RO(dev_port, fmt_dec);
 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
 NETDEVICE_SHOW_RO(iflink, fmt_dec);
@@ -252,6 +253,16 @@ static ssize_t operstate_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(operstate);
 
+static ssize_t carrier_changes_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct net_device *netdev = to_net_dev(dev);
+       return sprintf(buf, fmt_dec,
+                      atomic_read(&netdev->carrier_changes));
+}
+static DEVICE_ATTR_RO(carrier_changes);
+
 /* read-write attributes */
 
 static int change_mtu(struct net_device *net, unsigned long new_mtu)
@@ -373,6 +384,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_netdev_group.attr,
        &dev_attr_type.attr,
        &dev_attr_dev_id.attr,
+       &dev_attr_dev_port.attr,
        &dev_attr_iflink.attr,
        &dev_attr_ifindex.attr,
        &dev_attr_addr_assign_type.attr,
@@ -384,6 +396,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_duplex.attr,
        &dev_attr_dormant.attr,
        &dev_attr_operstate.attr,
+       &dev_attr_carrier_changes.attr,
        &dev_attr_ifalias.attr,
        &dev_attr_carrier.attr,
        &dev_attr_mtu.attr,
@@ -996,15 +1009,12 @@ static struct attribute_group dql_group = {
 #endif /* CONFIG_BQL */
 
 #ifdef CONFIG_XPS
-static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
 {
        struct net_device *dev = queue->dev;
-       int i;
-
-       for (i = 0; i < dev->num_tx_queues; i++)
-               if (queue == &dev->_tx[i])
-                       break;
+       unsigned int i;
 
+       i = queue - dev->_tx;
        BUG_ON(i >= dev->num_tx_queues);
 
        return i;
index df9e6b1a975920f47fbd68b540ccb4d7f8511759..ed7740f7a94d538d617ced2c0f8174f1eb1dd3ed 100644 (file)
 
 static struct sk_buff_head skb_pool;
 
-static atomic_t trapped;
-
 DEFINE_STATIC_SRCU(netpoll_srcu);
 
 #define USEC_PER_POLL  50
-#define NETPOLL_RX_ENABLED  1
-#define NETPOLL_RX_DROP     2
 
 #define MAX_SKB_SIZE                                                   \
        (sizeof(struct ethhdr) +                                        \
@@ -61,7 +57,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu);
         MAX_UDP_CHUNK)
 
 static void zap_completion_queue(void);
-static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
 static void netpoll_async_cleanup(struct work_struct *work);
 
 static unsigned int carrier_timeout = 4;
@@ -74,6 +69,37 @@ module_param(carrier_timeout, uint, 0644);
 #define np_notice(np, fmt, ...)                                \
        pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
 
+static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
+                             struct netdev_queue *txq)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+       int status = NETDEV_TX_OK;
+       netdev_features_t features;
+
+       features = netif_skb_features(skb);
+
+       if (vlan_tx_tag_present(skb) &&
+           !vlan_hw_offload_capable(features, skb->vlan_proto)) {
+               skb = __vlan_put_tag(skb, skb->vlan_proto,
+                                    vlan_tx_tag_get(skb));
+               if (unlikely(!skb)) {
+                       /* This is actually a packet drop, but we
+                        * don't want the code that calls this
+                        * function to try and operate on a NULL skb.
+                        */
+                       goto out;
+               }
+               skb->vlan_tci = 0;
+       }
+
+       status = ops->ndo_start_xmit(skb, dev);
+       if (status == NETDEV_TX_OK)
+               txq_trans_update(txq);
+
+out:
+       return status;
+}
+
 static void queue_process(struct work_struct *work)
 {
        struct netpoll_info *npinfo =
@@ -83,51 +109,31 @@ static void queue_process(struct work_struct *work)
 
        while ((skb = skb_dequeue(&npinfo->txq))) {
                struct net_device *dev = skb->dev;
-               const struct net_device_ops *ops = dev->netdev_ops;
                struct netdev_queue *txq;
 
                if (!netif_device_present(dev) || !netif_running(dev)) {
-                       __kfree_skb(skb);
+                       kfree_skb(skb);
                        continue;
                }
 
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 
                local_irq_save(flags);
-               __netif_tx_lock(txq, smp_processor_id());
+               HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (netif_xmit_frozen_or_stopped(txq) ||
-                   ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
+                   netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
                        skb_queue_head(&npinfo->txq, skb);
-                       __netif_tx_unlock(txq);
+                       HARD_TX_UNLOCK(dev, txq);
                        local_irq_restore(flags);
 
                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
                        return;
                }
-               __netif_tx_unlock(txq);
+               HARD_TX_UNLOCK(dev, txq);
                local_irq_restore(flags);
        }
 }
 
-static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
-                           unsigned short ulen, __be32 saddr, __be32 daddr)
-{
-       __wsum psum;
-
-       if (uh->check == 0 || skb_csum_unnecessary(skb))
-               return 0;
-
-       psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
-
-       if (skb->ip_summed == CHECKSUM_COMPLETE &&
-           !csum_fold(csum_add(psum, skb->csum)))
-               return 0;
-
-       skb->csum = psum;
-
-       return __skb_checksum_complete(skb);
-}
-
 /*
  * Check whether delayed processing was scheduled for our NIC. If so,
  * we attempt to grab the poll lock and use ->poll() to pump the card.
@@ -138,14 +144,8 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  * trylock here and interrupts are already disabled in the softirq
  * case. Further, we test the poll_owner to avoid recursion on UP
  * systems where the lock doesn't exist.
- *
- * In cases where there is bi-directional communications, reading only
- * one message at a time can lead to packets being dropped by the
- * network adapter, forcing superfluous retries and possibly timeouts.
- * Thus, we set our budget to greater than 1.
  */
-static int poll_one_napi(struct netpoll_info *npinfo,
-                        struct napi_struct *napi, int budget)
+static int poll_one_napi(struct napi_struct *napi, int budget)
 {
        int work;
 
@@ -156,52 +156,35 @@ static int poll_one_napi(struct netpoll_info *npinfo,
        if (!test_bit(NAPI_STATE_SCHED, &napi->state))
                return budget;
 
-       npinfo->rx_flags |= NETPOLL_RX_DROP;
-       atomic_inc(&trapped);
        set_bit(NAPI_STATE_NPSVC, &napi->state);
 
        work = napi->poll(napi, budget);
+       WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
        trace_napi_poll(napi);
 
        clear_bit(NAPI_STATE_NPSVC, &napi->state);
-       atomic_dec(&trapped);
-       npinfo->rx_flags &= ~NETPOLL_RX_DROP;
 
        return budget - work;
 }
 
-static void poll_napi(struct net_device *dev)
+static void poll_napi(struct net_device *dev, int budget)
 {
        struct napi_struct *napi;
-       int budget = 16;
 
        list_for_each_entry(napi, &dev->napi_list, dev_list) {
                if (napi->poll_owner != smp_processor_id() &&
                    spin_trylock(&napi->poll_lock)) {
-                       budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
-                                              napi, budget);
+                       budget = poll_one_napi(napi, budget);
                        spin_unlock(&napi->poll_lock);
-
-                       if (!budget)
-                               break;
                }
        }
 }
 
-static void service_neigh_queue(struct netpoll_info *npi)
-{
-       if (npi) {
-               struct sk_buff *skb;
-
-               while ((skb = skb_dequeue(&npi->neigh_tx)))
-                       netpoll_neigh_reply(skb, npi);
-       }
-}
-
 static void netpoll_poll_dev(struct net_device *dev)
 {
        const struct net_device_ops *ops;
        struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
+       int budget = 0;
 
        /* Don't do any rx activity if the dev_lock mutex is held
         * the dev_open/close paths use this to block netpoll activity
@@ -224,31 +207,14 @@ static void netpoll_poll_dev(struct net_device *dev)
        /* Process pending work on NIC */
        ops->ndo_poll_controller(dev);
 
-       poll_napi(dev);
+       poll_napi(dev, budget);
 
        up(&ni->dev_lock);
 
-       if (dev->flags & IFF_SLAVE) {
-               if (ni) {
-                       struct net_device *bond_dev;
-                       struct sk_buff *skb;
-                       struct netpoll_info *bond_ni;
-
-                       bond_dev = netdev_master_upper_dev_get_rcu(dev);
-                       bond_ni = rcu_dereference_bh(bond_dev->npinfo);
-                       while ((skb = skb_dequeue(&ni->neigh_tx))) {
-                               skb->dev = bond_dev;
-                               skb_queue_tail(&bond_ni->neigh_tx, skb);
-                       }
-               }
-       }
-
-       service_neigh_queue(ni);
-
        zap_completion_queue();
 }
 
-void netpoll_rx_disable(struct net_device *dev)
+void netpoll_poll_disable(struct net_device *dev)
 {
        struct netpoll_info *ni;
        int idx;
@@ -259,9 +225,9 @@ void netpoll_rx_disable(struct net_device *dev)
                down(&ni->dev_lock);
        srcu_read_unlock(&netpoll_srcu, idx);
 }
-EXPORT_SYMBOL(netpoll_rx_disable);
+EXPORT_SYMBOL(netpoll_poll_disable);
 
-void netpoll_rx_enable(struct net_device *dev)
+void netpoll_poll_enable(struct net_device *dev)
 {
        struct netpoll_info *ni;
        rcu_read_lock();
@@ -270,7 +236,7 @@ void netpoll_rx_enable(struct net_device *dev)
                up(&ni->dev_lock);
        rcu_read_unlock();
 }
-EXPORT_SYMBOL(netpoll_rx_enable);
+EXPORT_SYMBOL(netpoll_poll_enable);
 
 static void refill_skbs(void)
 {
@@ -359,7 +325,6 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 {
        int status = NETDEV_TX_BUSY;
        unsigned long tries;
-       const struct net_device_ops *ops = dev->netdev_ops;
        /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo;
 
@@ -367,7 +332,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
        npinfo = rcu_dereference_bh(np->dev->npinfo);
        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
-               __kfree_skb(skb);
+               dev_kfree_skb_irq(skb);
                return;
        }
 
@@ -380,29 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                /* try until next clock tick */
                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
                     tries > 0; --tries) {
-                       if (__netif_tx_trylock(txq)) {
-                               if (!netif_xmit_stopped(txq)) {
-                                       if (vlan_tx_tag_present(skb) &&
-                                           !vlan_hw_offload_capable(netif_skb_features(skb),
-                                                                    skb->vlan_proto)) {
-                                               skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
-                                               if (unlikely(!skb)) {
-                                                       /* This is actually a packet drop, but we
-                                                        * don't want the code at the end of this
-                                                        * function to try and re-queue a NULL skb.
-                                                        */
-                                                       status = NETDEV_TX_OK;
-                                                       goto unlock_txq;
-                                               }
-                                               skb->vlan_tci = 0;
-                                       }
-
-                                       status = ops->ndo_start_xmit(skb, dev);
-                                       if (status == NETDEV_TX_OK)
-                                               txq_trans_update(txq);
-                               }
-                       unlock_txq:
-                               __netif_tx_unlock(txq);
+                       if (HARD_TX_TRYLOCK(dev, txq)) {
+                               if (!netif_xmit_stopped(txq))
+                                       status = netpoll_start_xmit(skb, dev, txq);
+
+                               HARD_TX_UNLOCK(dev, txq);
 
                                if (status == NETDEV_TX_OK)
                                        break;
@@ -417,7 +364,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
 
                WARN_ONCE(!irqs_disabled(),
                        "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
-                       dev->name, ops->ndo_start_xmit);
+                       dev->name, dev->netdev_ops->ndo_start_xmit);
 
        }
 
@@ -529,384 +476,6 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 }
 EXPORT_SYMBOL(netpoll_send_udp);
 
-static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
-{
-       int size, type = ARPOP_REPLY;
-       __be32 sip, tip;
-       unsigned char *sha;
-       struct sk_buff *send_skb;
-       struct netpoll *np, *tmp;
-       unsigned long flags;
-       int hlen, tlen;
-       int hits = 0, proto;
-
-       if (list_empty(&npinfo->rx_np))
-               return;
-
-       /* Before checking the packet, we do some early
-          inspection whether this is interesting at all */
-       spin_lock_irqsave(&npinfo->rx_lock, flags);
-       list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
-               if (np->dev == skb->dev)
-                       hits++;
-       }
-       spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-
-       /* No netpoll struct is using this dev */
-       if (!hits)
-               return;
-
-       proto = ntohs(eth_hdr(skb)->h_proto);
-       if (proto == ETH_P_ARP) {
-               struct arphdr *arp;
-               unsigned char *arp_ptr;
-               /* No arp on this interface */
-               if (skb->dev->flags & IFF_NOARP)
-                       return;
-
-               if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
-                       return;
-
-               skb_reset_network_header(skb);
-               skb_reset_transport_header(skb);
-               arp = arp_hdr(skb);
-
-               if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
-                    arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
-                   arp->ar_pro != htons(ETH_P_IP) ||
-                   arp->ar_op != htons(ARPOP_REQUEST))
-                       return;
-
-               arp_ptr = (unsigned char *)(arp+1);
-               /* save the location of the src hw addr */
-               sha = arp_ptr;
-               arp_ptr += skb->dev->addr_len;
-               memcpy(&sip, arp_ptr, 4);
-               arp_ptr += 4;
-               /* If we actually cared about dst hw addr,
-                  it would get copied here */
-               arp_ptr += skb->dev->addr_len;
-               memcpy(&tip, arp_ptr, 4);
-
-               /* Should we ignore arp? */
-               if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
-                       return;
-
-               size = arp_hdr_len(skb->dev);
-
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
-                       if (tip != np->local_ip.ip)
-                               continue;
-
-                       hlen = LL_RESERVED_SPACE(np->dev);
-                       tlen = np->dev->needed_tailroom;
-                       send_skb = find_skb(np, size + hlen + tlen, hlen);
-                       if (!send_skb)
-                               continue;
-
-                       skb_reset_network_header(send_skb);
-                       arp = (struct arphdr *) skb_put(send_skb, size);
-                       send_skb->dev = skb->dev;
-                       send_skb->protocol = htons(ETH_P_ARP);
-
-                       /* Fill the device header for the ARP frame */
-                       if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
-                                           sha, np->dev->dev_addr,
-                                           send_skb->len) < 0) {
-                               kfree_skb(send_skb);
-                               continue;
-                       }
-
-                       /*
-                        * Fill out the arp protocol part.
-                        *
-                        * we only support ethernet device type,
-                        * which (according to RFC 1390) should
-                        * always equal 1 (Ethernet).
-                        */
-
-                       arp->ar_hrd = htons(np->dev->type);
-                       arp->ar_pro = htons(ETH_P_IP);
-                       arp->ar_hln = np->dev->addr_len;
-                       arp->ar_pln = 4;
-                       arp->ar_op = htons(type);
-
-                       arp_ptr = (unsigned char *)(arp + 1);
-                       memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
-                       arp_ptr += np->dev->addr_len;
-                       memcpy(arp_ptr, &tip, 4);
-                       arp_ptr += 4;
-                       memcpy(arp_ptr, sha, np->dev->addr_len);
-                       arp_ptr += np->dev->addr_len;
-                       memcpy(arp_ptr, &sip, 4);
-
-                       netpoll_send_skb(np, send_skb);
-
-                       /* If there are several rx_skb_hooks for the same
-                        * address we're fine by sending a single reply
-                        */
-                       break;
-               }
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-       } else if( proto == ETH_P_IPV6) {
-#if IS_ENABLED(CONFIG_IPV6)
-               struct nd_msg *msg;
-               u8 *lladdr = NULL;
-               struct ipv6hdr *hdr;
-               struct icmp6hdr *icmp6h;
-               const struct in6_addr *saddr;
-               const struct in6_addr *daddr;
-               struct inet6_dev *in6_dev = NULL;
-               struct in6_addr *target;
-
-               in6_dev = in6_dev_get(skb->dev);
-               if (!in6_dev || !in6_dev->cnf.accept_ra)
-                       return;
-
-               if (!pskb_may_pull(skb, skb->len))
-                       return;
-
-               msg = (struct nd_msg *)skb_transport_header(skb);
-
-               __skb_push(skb, skb->data - skb_transport_header(skb));
-
-               if (ipv6_hdr(skb)->hop_limit != 255)
-                       return;
-               if (msg->icmph.icmp6_code != 0)
-                       return;
-               if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
-                       return;
-
-               saddr = &ipv6_hdr(skb)->saddr;
-               daddr = &ipv6_hdr(skb)->daddr;
-
-               size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
-
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
-                       if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
-                               continue;
-
-                       hlen = LL_RESERVED_SPACE(np->dev);
-                       tlen = np->dev->needed_tailroom;
-                       send_skb = find_skb(np, size + hlen + tlen, hlen);
-                       if (!send_skb)
-                               continue;
-
-                       send_skb->protocol = htons(ETH_P_IPV6);
-                       send_skb->dev = skb->dev;
-
-                       skb_reset_network_header(send_skb);
-                       hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
-                       *(__be32*)hdr = htonl(0x60000000);
-                       hdr->payload_len = htons(size);
-                       hdr->nexthdr = IPPROTO_ICMPV6;
-                       hdr->hop_limit = 255;
-                       hdr->saddr = *saddr;
-                       hdr->daddr = *daddr;
-
-                       icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
-                       icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
-                       icmp6h->icmp6_router = 0;
-                       icmp6h->icmp6_solicited = 1;
-
-                       target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
-                       *target = msg->target;
-                       icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
-                                                             IPPROTO_ICMPV6,
-                                                             csum_partial(icmp6h,
-                                                                          size, 0));
-
-                       if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
-                                           lladdr, np->dev->dev_addr,
-                                           send_skb->len) < 0) {
-                               kfree_skb(send_skb);
-                               continue;
-                       }
-
-                       netpoll_send_skb(np, send_skb);
-
-                       /* If there are several rx_skb_hooks for the same
-                        * address, we're fine by sending a single reply
-                        */
-                       break;
-               }
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-#endif
-       }
-}
-
-static bool pkt_is_ns(struct sk_buff *skb)
-{
-       struct nd_msg *msg;
-       struct ipv6hdr *hdr;
-
-       if (skb->protocol != htons(ETH_P_IPV6))
-               return false;
-       if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
-               return false;
-
-       msg = (struct nd_msg *)skb_transport_header(skb);
-       __skb_push(skb, skb->data - skb_transport_header(skb));
-       hdr = ipv6_hdr(skb);
-
-       if (hdr->nexthdr != IPPROTO_ICMPV6)
-               return false;
-       if (hdr->hop_limit != 255)
-               return false;
-       if (msg->icmph.icmp6_code != 0)
-               return false;
-       if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
-               return false;
-
-       return true;
-}
-
-int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
-{
-       int proto, len, ulen, data_len;
-       int hits = 0, offset;
-       const struct iphdr *iph;
-       struct udphdr *uh;
-       struct netpoll *np, *tmp;
-       uint16_t source;
-
-       if (list_empty(&npinfo->rx_np))
-               goto out;
-
-       if (skb->dev->type != ARPHRD_ETHER)
-               goto out;
-
-       /* check if netpoll clients need ARP */
-       if (skb->protocol == htons(ETH_P_ARP) && atomic_read(&trapped)) {
-               skb_queue_tail(&npinfo->neigh_tx, skb);
-               return 1;
-       } else if (pkt_is_ns(skb) && atomic_read(&trapped)) {
-               skb_queue_tail(&npinfo->neigh_tx, skb);
-               return 1;
-       }
-
-       if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
-               skb = vlan_untag(skb);
-               if (unlikely(!skb))
-                       goto out;
-       }
-
-       proto = ntohs(eth_hdr(skb)->h_proto);
-       if (proto != ETH_P_IP && proto != ETH_P_IPV6)
-               goto out;
-       if (skb->pkt_type == PACKET_OTHERHOST)
-               goto out;
-       if (skb_shared(skb))
-               goto out;
-
-       if (proto == ETH_P_IP) {
-               if (!pskb_may_pull(skb, sizeof(struct iphdr)))
-                       goto out;
-               iph = (struct iphdr *)skb->data;
-               if (iph->ihl < 5 || iph->version != 4)
-                       goto out;
-               if (!pskb_may_pull(skb, iph->ihl*4))
-                       goto out;
-               iph = (struct iphdr *)skb->data;
-               if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
-                       goto out;
-
-               len = ntohs(iph->tot_len);
-               if (skb->len < len || len < iph->ihl*4)
-                       goto out;
-
-               /*
-                * Our transport medium may have padded the buffer out.
-                * Now We trim to the true length of the frame.
-                */
-               if (pskb_trim_rcsum(skb, len))
-                       goto out;
-
-               iph = (struct iphdr *)skb->data;
-               if (iph->protocol != IPPROTO_UDP)
-                       goto out;
-
-               len -= iph->ihl*4;
-               uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
-               offset = (unsigned char *)(uh + 1) - skb->data;
-               ulen = ntohs(uh->len);
-               data_len = skb->len - offset;
-               source = ntohs(uh->source);
-
-               if (ulen != len)
-                       goto out;
-               if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
-                       goto out;
-               list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
-                       if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
-                               continue;
-                       if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
-                               continue;
-                       if (np->local_port && np->local_port != ntohs(uh->dest))
-                               continue;
-
-                       np->rx_skb_hook(np, source, skb, offset, data_len);
-                       hits++;
-               }
-       } else {
-#if IS_ENABLED(CONFIG_IPV6)
-               const struct ipv6hdr *ip6h;
-
-               if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
-                       goto out;
-               ip6h = (struct ipv6hdr *)skb->data;
-               if (ip6h->version != 6)
-                       goto out;
-               len = ntohs(ip6h->payload_len);
-               if (!len)
-                       goto out;
-               if (len + sizeof(struct ipv6hdr) > skb->len)
-                       goto out;
-               if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
-                       goto out;
-               ip6h = ipv6_hdr(skb);
-               if (!pskb_may_pull(skb, sizeof(struct udphdr)))
-                       goto out;
-               uh = udp_hdr(skb);
-               offset = (unsigned char *)(uh + 1) - skb->data;
-               ulen = ntohs(uh->len);
-               data_len = skb->len - offset;
-               source = ntohs(uh->source);
-               if (ulen != skb->len)
-                       goto out;
-               if (udp6_csum_init(skb, uh, IPPROTO_UDP))
-                       goto out;
-               list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
-                       if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
-                               continue;
-                       if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
-                               continue;
-                       if (np->local_port && np->local_port != ntohs(uh->dest))
-                               continue;
-
-                       np->rx_skb_hook(np, source, skb, offset, data_len);
-                       hits++;
-               }
-#endif
-       }
-
-       if (!hits)
-               goto out;
-
-       kfree_skb(skb);
-       return 1;
-
-out:
-       if (atomic_read(&trapped)) {
-               kfree_skb(skb);
-               return 1;
-       }
-
-       return 0;
-}
-
 void netpoll_print_options(struct netpoll *np)
 {
        np_info(np, "local port %d\n", np->local_port);
@@ -1026,11 +595,10 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
 }
 EXPORT_SYMBOL(netpoll_parse_options);
 
-int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
 {
        struct netpoll_info *npinfo;
        const struct net_device_ops *ops;
-       unsigned long flags;
        int err;
 
        np->dev = ndev;
@@ -1046,18 +614,13 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
        }
 
        if (!ndev->npinfo) {
-               npinfo = kmalloc(sizeof(*npinfo), gfp);
+               npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
                if (!npinfo) {
                        err = -ENOMEM;
                        goto out;
                }
 
-               npinfo->rx_flags = 0;
-               INIT_LIST_HEAD(&npinfo->rx_np);
-
-               spin_lock_init(&npinfo->rx_lock);
                sema_init(&npinfo->dev_lock, 1);
-               skb_queue_head_init(&npinfo->neigh_tx);
                skb_queue_head_init(&npinfo->txq);
                INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 
@@ -1065,7 +628,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
 
                ops = np->dev->netdev_ops;
                if (ops->ndo_netpoll_setup) {
-                       err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
+                       err = ops->ndo_netpoll_setup(ndev, npinfo);
                        if (err)
                                goto free_npinfo;
                }
@@ -1076,13 +639,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
 
        npinfo->netpoll = np;
 
-       if (np->rx_skb_hook) {
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               npinfo->rx_flags |= NETPOLL_RX_ENABLED;
-               list_add_tail(&np->rx, &npinfo->rx_np);
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-       }
-
        /* last thing to do is link it to the net device structure */
        rcu_assign_pointer(ndev->npinfo, npinfo);
 
@@ -1204,7 +760,7 @@ int netpoll_setup(struct netpoll *np)
        /* fill up the skb queue */
        refill_skbs();
 
-       err = __netpoll_setup(np, ndev, GFP_KERNEL);
+       err = __netpoll_setup(np, ndev);
        if (err)
                goto put;
 
@@ -1231,7 +787,6 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
        struct netpoll_info *npinfo =
                        container_of(rcu_head, struct netpoll_info, rcu);
 
-       skb_queue_purge(&npinfo->neigh_tx);
        skb_queue_purge(&npinfo->txq);
 
        /* we can't call cancel_delayed_work_sync here, as we are in softirq */
@@ -1247,7 +802,6 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
 void __netpoll_cleanup(struct netpoll *np)
 {
        struct netpoll_info *npinfo;
-       unsigned long flags;
 
        /* rtnl_dereference would be preferable here but
         * rcu_cleanup_netpoll path can put us in here safely without
@@ -1257,14 +811,6 @@ void __netpoll_cleanup(struct netpoll *np)
        if (!npinfo)
                return;
 
-       if (!list_empty(&npinfo->rx_np)) {
-               spin_lock_irqsave(&npinfo->rx_lock, flags);
-               list_del(&np->rx);
-               if (list_empty(&npinfo->rx_np))
-                       npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
-               spin_unlock_irqrestore(&npinfo->rx_lock, flags);
-       }
-
        synchronize_srcu(&netpoll_srcu);
 
        if (atomic_dec_and_test(&npinfo->refcnt)) {
@@ -1274,7 +820,7 @@ void __netpoll_cleanup(struct netpoll *np)
                if (ops->ndo_netpoll_cleanup)
                        ops->ndo_netpoll_cleanup(np->dev);
 
-               rcu_assign_pointer(np->dev->npinfo, NULL);
+               RCU_INIT_POINTER(np->dev->npinfo, NULL);
                call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
        }
 }
@@ -1308,18 +854,3 @@ out:
        rtnl_unlock();
 }
 EXPORT_SYMBOL(netpoll_cleanup);
-
-int netpoll_trap(void)
-{
-       return atomic_read(&trapped);
-}
-EXPORT_SYMBOL(netpoll_trap);
-
-void netpoll_set_trap(int trap)
-{
-       if (trap)
-               atomic_inc(&trapped);
-       else
-               atomic_dec(&trapped);
-}
-EXPORT_SYMBOL(netpoll_set_trap);
index fdac61cac1bd11b15a69f78584029731e9310e4e..d0dac57291afab943db9719aa5818ebb6611cf65 100644 (file)
@@ -476,23 +476,22 @@ static int pgctrl_show(struct seq_file *seq, void *v)
 static ssize_t pgctrl_write(struct file *file, const char __user *buf,
                            size_t count, loff_t *ppos)
 {
-       int err = 0;
        char data[128];
        struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id);
 
-       if (!capable(CAP_NET_ADMIN)) {
-               err = -EPERM;
-               goto out;
-       }
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (count == 0)
+               return -EINVAL;
 
        if (count > sizeof(data))
                count = sizeof(data);
 
-       if (copy_from_user(data, buf, count)) {
-               err = -EFAULT;
-               goto out;
-       }
-       data[count - 1] = 0;    /* Make string */
+       if (copy_from_user(data, buf, count))
+               return -EFAULT;
+
+       data[count - 1] = 0;    /* Strip trailing '\n' and terminate string */
 
        if (!strcmp(data, "stop"))
                pktgen_stop_all_threads_ifs(pn);
@@ -506,10 +505,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
        else
                pr_warning("Unknown command: %s\n", data);
 
-       err = count;
-
-out:
-       return err;
+       return count;
 }
 
 static int pgctrl_open(struct inode *inode, struct file *file)
@@ -1251,7 +1247,13 @@ static ssize_t pktgen_if_write(struct file *file,
                                "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
                                f,
                                "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
-                               "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n");
+                               "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, "
+                               "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, "
+                               "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, "
+#ifdef CONFIG_XFRM
+                               "IPSEC, "
+#endif
+                               "NODE_ALLOC\n");
                        return count;
                }
                sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
index 4425148d2b51592626b92a1451d9bc1208213fb8..467f326126e0eb913416c9bd2f847816d6d98948 100644 (file)
@@ -221,5 +221,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 out:
        spin_unlock_bh(&fastopenq->lock);
        sock_put(lsk);
-       return;
 }
index 120eecc0f5a471f0157894f48ee33f1d1f6a9af5..d4ff41739b0f23fcb572905dd34288cb1d8ebd49 100644 (file)
@@ -822,6 +822,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
               + nla_total_size(1) /* IFLA_OPERSTATE */
               + nla_total_size(1) /* IFLA_LINKMODE */
+              + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
               + nla_total_size(ext_filter_mask
                                & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
               + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -970,7 +971,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
            (dev->qdisc &&
             nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
            (dev->ifalias &&
-            nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
+            nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
+           nla_put_u32(skb, IFLA_CARRIER_CHANGES,
+                       atomic_read(&dev->carrier_changes)))
                goto nla_put_failure;
 
        if (1) {
@@ -1121,56 +1124,7 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
-{
-       struct net *net = sock_net(skb->sk);
-       int h, s_h;
-       int idx = 0, s_idx;
-       struct net_device *dev;
-       struct hlist_head *head;
-       struct nlattr *tb[IFLA_MAX+1];
-       u32 ext_filter_mask = 0;
-
-       s_h = cb->args[0];
-       s_idx = cb->args[1];
-
-       rcu_read_lock();
-       cb->seq = net->dev_base_seq;
-
-       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
-                       ifla_policy) >= 0) {
-
-               if (tb[IFLA_EXT_MASK])
-                       ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
-       }
-
-       for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
-               idx = 0;
-               head = &net->dev_index_head[h];
-               hlist_for_each_entry_rcu(dev, head, index_hlist) {
-                       if (idx < s_idx)
-                               goto cont;
-                       if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
-                                            NETLINK_CB(cb->skb).portid,
-                                            cb->nlh->nlmsg_seq, 0,
-                                            NLM_F_MULTI,
-                                            ext_filter_mask) <= 0)
-                               goto out;
-
-                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
-                       idx++;
-               }
-       }
-out:
-       rcu_read_unlock();
-       cb->args[1] = idx;
-       cb->args[0] = h;
-
-       return skb->len;
-}
-
-const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_IFNAME]           = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
        [IFLA_ADDRESS]          = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
        [IFLA_BROADCAST]        = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
@@ -1196,8 +1150,8 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_NUM_TX_QUEUES]    = { .type = NLA_U32 },
        [IFLA_NUM_RX_QUEUES]    = { .type = NLA_U32 },
        [IFLA_PHYS_PORT_ID]     = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
+       [IFLA_CARRIER_CHANGES]  = { .type = NLA_U32 },  /* ignored */
 };
-EXPORT_SYMBOL(ifla_policy);
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
        [IFLA_INFO_KIND]        = { .type = NLA_STRING },
@@ -1235,6 +1189,61 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
        [IFLA_PORT_RESPONSE]    = { .type = NLA_U16, },
 };
 
+static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       int h, s_h;
+       int idx = 0, s_idx;
+       struct net_device *dev;
+       struct hlist_head *head;
+       struct nlattr *tb[IFLA_MAX+1];
+       u32 ext_filter_mask = 0;
+
+       s_h = cb->args[0];
+       s_idx = cb->args[1];
+
+       rcu_read_lock();
+       cb->seq = net->dev_base_seq;
+
+       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+                       ifla_policy) >= 0) {
+
+               if (tb[IFLA_EXT_MASK])
+                       ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       }
+
+       for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+               idx = 0;
+               head = &net->dev_index_head[h];
+               hlist_for_each_entry_rcu(dev, head, index_hlist) {
+                       if (idx < s_idx)
+                               goto cont;
+                       if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+                                            NETLINK_CB(cb->skb).portid,
+                                            cb->nlh->nlmsg_seq, 0,
+                                            NLM_F_MULTI,
+                                            ext_filter_mask) <= 0)
+                               goto out;
+
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+                       idx++;
+               }
+       }
+out:
+       rcu_read_unlock();
+       cb->args[1] = idx;
+       cb->args[0] = h;
+
+       return skb->len;
+}
+
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
+{
+       return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
+}
+EXPORT_SYMBOL(rtnl_nla_parse_ifla);
+
 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
 {
        struct net *net;
index 90b96a11b974d2697defcc9ccf4b14b65d704c49..30c7d35dd862a090c691732af63af1e8fdbad41a 100644 (file)
@@ -3300,6 +3300,32 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
        return elt;
 }
 
+/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
+ * sglist without mark the sg which contain last skb data as the end.
+ * So the caller can mannipulate sg list as will when padding new data after
+ * the first call without calling sg_unmark_end to expend sg list.
+ *
+ * Scenario to use skb_to_sgvec_nomark:
+ * 1. sg_init_table
+ * 2. skb_to_sgvec_nomark(payload1)
+ * 3. skb_to_sgvec_nomark(payload2)
+ *
+ * This is equivalent to:
+ * 1. sg_init_table
+ * 2. skb_to_sgvec(payload1)
+ * 3. sg_unmark_end
+ * 4. skb_to_sgvec(payload2)
+ *
+ * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
+ * is more preferable.
+ */
+int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+                       int offset, int len)
+{
+       return __skb_to_sgvec(skb, sg, offset, len);
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
+
 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
 {
        int nsg = __skb_to_sgvec(skb, sg, offset, len);
@@ -3562,15 +3588,47 @@ static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
        return 0;
 }
 
+#define MAX_TCP_HDR_LEN (15 * 4)
+
+static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
+                                     typeof(IPPROTO_IP) proto,
+                                     unsigned int off)
+{
+       switch (proto) {
+               int err;
+
+       case IPPROTO_TCP:
+               err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
+                                         off + MAX_TCP_HDR_LEN);
+               if (!err && !skb_partial_csum_set(skb, off,
+                                                 offsetof(struct tcphdr,
+                                                          check)))
+                       err = -EPROTO;
+               return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
+
+       case IPPROTO_UDP:
+               err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
+                                         off + sizeof(struct udphdr));
+               if (!err && !skb_partial_csum_set(skb, off,
+                                                 offsetof(struct udphdr,
+                                                          check)))
+                       err = -EPROTO;
+               return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
+       }
+
+       return ERR_PTR(-EPROTO);
+}
+
 /* This value should be large enough to cover a tagged ethernet header plus
  * maximally sized IP and TCP or UDP headers.
  */
 #define MAX_IP_HDR_LEN 128
 
-static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
+static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
 {
        unsigned int off;
        bool fragment;
+       __sum16 *csum;
        int err;
 
        fragment = false;
@@ -3591,51 +3649,15 @@ static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
        if (fragment)
                goto out;
 
-       switch (ip_hdr(skb)->protocol) {
-       case IPPROTO_TCP:
-               err = skb_maybe_pull_tail(skb,
-                                         off + sizeof(struct tcphdr),
-                                         MAX_IP_HDR_LEN);
-               if (err < 0)
-                       goto out;
-
-               if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct tcphdr, check))) {
-                       err = -EPROTO;
-                       goto out;
-               }
-
-               if (recalculate)
-                       tcp_hdr(skb)->check =
-                               ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
-                                                  ip_hdr(skb)->daddr,
-                                                  skb->len - off,
-                                                  IPPROTO_TCP, 0);
-               break;
-       case IPPROTO_UDP:
-               err = skb_maybe_pull_tail(skb,
-                                         off + sizeof(struct udphdr),
-                                         MAX_IP_HDR_LEN);
-               if (err < 0)
-                       goto out;
-
-               if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct udphdr, check))) {
-                       err = -EPROTO;
-                       goto out;
-               }
-
-               if (recalculate)
-                       udp_hdr(skb)->check =
-                               ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
-                                                  ip_hdr(skb)->daddr,
-                                                  skb->len - off,
-                                                  IPPROTO_UDP, 0);
-               break;
-       default:
-               goto out;
-       }
+       csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
+       if (IS_ERR(csum))
+               return PTR_ERR(csum);
 
+       if (recalculate)
+               *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+                                          ip_hdr(skb)->daddr,
+                                          skb->len - off,
+                                          ip_hdr(skb)->protocol, 0);
        err = 0;
 
 out:
@@ -3658,6 +3680,7 @@ static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
        unsigned int len;
        bool fragment;
        bool done;
+       __sum16 *csum;
 
        fragment = false;
        done = false;
@@ -3735,51 +3758,14 @@ static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
        if (!done || fragment)
                goto out;
 
-       switch (nexthdr) {
-       case IPPROTO_TCP:
-               err = skb_maybe_pull_tail(skb,
-                                         off + sizeof(struct tcphdr),
-                                         MAX_IPV6_HDR_LEN);
-               if (err < 0)
-                       goto out;
-
-               if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct tcphdr, check))) {
-                       err = -EPROTO;
-                       goto out;
-               }
-
-               if (recalculate)
-                       tcp_hdr(skb)->check =
-                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                &ipv6_hdr(skb)->daddr,
-                                                skb->len - off,
-                                                IPPROTO_TCP, 0);
-               break;
-       case IPPROTO_UDP:
-               err = skb_maybe_pull_tail(skb,
-                                         off + sizeof(struct udphdr),
-                                         MAX_IPV6_HDR_LEN);
-               if (err < 0)
-                       goto out;
-
-               if (!skb_partial_csum_set(skb, off,
-                                         offsetof(struct udphdr, check))) {
-                       err = -EPROTO;
-                       goto out;
-               }
-
-               if (recalculate)
-                       udp_hdr(skb)->check =
-                               ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
-                                                &ipv6_hdr(skb)->daddr,
-                                                skb->len - off,
-                                                IPPROTO_UDP, 0);
-               break;
-       default:
-               goto out;
-       }
+       csum = skb_checksum_setup_ip(skb, nexthdr, off);
+       if (IS_ERR(csum))
+               return PTR_ERR(csum);
 
+       if (recalculate)
+               *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+                                        &ipv6_hdr(skb)->daddr,
+                                        skb->len - off, nexthdr, 0);
        err = 0;
 
 out:
@@ -3797,7 +3783,7 @@ int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
 
        switch (skb->protocol) {
        case htons(ETH_P_IP):
-               err = skb_checksum_setup_ip(skb, recalculate);
+               err = skb_checksum_setup_ipv4(skb, recalculate);
                break;
 
        case htons(ETH_P_IPV6):
index a0e9cf6379de3eac8ac1182c3be73e8d140b4a1b..d7af1885932269eb9f4196fe1211a6d09e298b97 100644 (file)
@@ -52,9 +52,10 @@ EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
 int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
                             struct sk_buff *skb, int attrtype)
 {
-       struct nlattr *attr;
+       struct sock_fprog_kern *fprog;
        struct sk_filter *filter;
-       unsigned int len;
+       struct nlattr *attr;
+       unsigned int flen;
        int err = 0;
 
        if (!ns_capable(user_ns, CAP_NET_ADMIN)) {
@@ -63,24 +64,20 @@ int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
        }
 
        rcu_read_lock();
-
        filter = rcu_dereference(sk->sk_filter);
-       len = filter ? filter->len * sizeof(struct sock_filter) : 0;
+       if (!filter)
+               goto out;
 
-       attr = nla_reserve(skb, attrtype, len);
+       fprog = filter->orig_prog;
+       flen = sk_filter_proglen(fprog);
+
+       attr = nla_reserve(skb, attrtype, flen);
        if (attr == NULL) {
                err = -EMSGSIZE;
                goto out;
        }
 
-       if (filter) {
-               struct sock_filter *fb = (struct sock_filter *)nla_data(attr);
-               int i;
-
-               for (i = 0; i < filter->len; i++, fb++)
-                       sk_decode_filter(&filter->insns[i], fb);
-       }
-
+       memcpy(nla_data(attr), fprog->filter, flen);
 out:
        rcu_read_unlock();
        return err;
index 661b5a40ec1029016a4a2e70579c690c40110512..9ff26b3cc021b1c3e80d80f5dfff7d726a0c46e9 100644 (file)
 #include <linux/skbuff.h>
 #include <linux/export.h>
 
-static struct sock_filter ptp_filter[] = {
-       PTP_FILTER
-};
+static struct sk_filter *ptp_insns __read_mostly;
+
+unsigned int ptp_classify_raw(const struct sk_buff *skb)
+{
+       return SK_RUN_FILTER(ptp_insns, skb);
+}
+EXPORT_SYMBOL_GPL(ptp_classify_raw);
 
 static unsigned int classify(const struct sk_buff *skb)
 {
-       if (likely(skb->dev &&
-                  skb->dev->phydev &&
+       if (likely(skb->dev && skb->dev->phydev &&
                   skb->dev->phydev->drv))
-               return sk_run_filter(skb, ptp_filter);
+               return ptp_classify_raw(skb);
        else
                return PTP_CLASS_NONE;
 }
@@ -60,11 +63,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
                if (likely(phydev->drv->txtstamp)) {
                        if (!atomic_inc_not_zero(&sk->sk_refcnt))
                                return;
+
                        clone = skb_clone(skb, GFP_ATOMIC);
                        if (!clone) {
                                sock_put(sk);
                                return;
                        }
+
                        clone->sk = sk;
                        phydev->drv->txtstamp(phydev, clone, type);
                }
@@ -89,12 +94,15 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
        }
 
        *skb_hwtstamps(skb) = *hwtstamps;
+
        serr = SKB_EXT_ERR(skb);
        memset(serr, 0, sizeof(*serr));
        serr->ee.ee_errno = ENOMSG;
        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
        skb->sk = NULL;
+
        err = sock_queue_err_skb(sk, skb);
+
        sock_put(sk);
        if (err)
                kfree_skb(skb);
@@ -135,5 +143,10 @@ EXPORT_SYMBOL_GPL(skb_defer_rx_timestamp);
 
 void __init skb_timestamping_init(void)
 {
-       BUG_ON(sk_chk_filter(ptp_filter, ARRAY_SIZE(ptp_filter)));
+       static struct sock_filter ptp_filter[] = { PTP_FILTER };
+       struct sock_fprog ptp_prog = {
+               .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
+       };
+
+       BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog));
 }
index cac505f166d51133a32dac0e08c12358871fe6f6..e5302b7f7ca9f0f6f67954689572d6a5b09a3c74 100644 (file)
@@ -209,7 +209,7 @@ static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr_priv,
        /* Address substitution (IEC62439-3 pp 26, 50): replace mac
         * address of outgoing frame with that of the outgoing slave's.
         */
-       memcpy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr);
 
        return dev_queue_xmit(skb);
 }
@@ -346,7 +346,7 @@ static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
 
        /* Payload: MacAddressA */
        hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
-       memcpy(hsr_sp->MacAddressA, hsr_dev->dev_addr, ETH_ALEN);
+       ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr);
 
        dev_queue_xmit(skb);
        return;
@@ -493,7 +493,7 @@ static int check_slave_ok(struct net_device *dev)
 
 
 /* Default multicast address for HSR Supervision frames */
-static const unsigned char def_multicast_addr[ETH_ALEN] = {
+static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
        0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
 };
 
@@ -519,7 +519,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        hsr_priv->announce_timer.function = hsr_announce;
        hsr_priv->announce_timer.data = (unsigned long) hsr_priv;
 
-       memcpy(hsr_priv->sup_multicast_addr, def_multicast_addr, ETH_ALEN);
+       ether_addr_copy(hsr_priv->sup_multicast_addr, def_multicast_addr);
        hsr_priv->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
 
 /* FIXME: should I modify the value of these?
@@ -547,7 +547,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
 
        /* Set hsr_dev's MAC address to that of mac_slave1 */
-       memcpy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+       ether_addr_copy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr);
 
        /* Set required header length */
        for (i = 0; i < HSR_MAX_SLAVE; i++) {
index 7ae0d7f6dbd0bff10516e9415050d7aa41a07da0..83e58449366a92bcc4e349241337914bca8dcd10 100644 (file)
@@ -108,8 +108,8 @@ int hsr_create_self_node(struct list_head *self_node_db,
        if (!node)
                return -ENOMEM;
 
-       memcpy(node->MacAddressA, addr_a, ETH_ALEN);
-       memcpy(node->MacAddressB, addr_b, ETH_ALEN);
+       ether_addr_copy(node->MacAddressA, addr_a);
+       ether_addr_copy(node->MacAddressB, addr_b);
 
        rcu_read_lock();
        oldnode = list_first_or_null_rcu(self_node_db,
@@ -199,7 +199,7 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
                /* Node is known, but frame was received from an unknown
                 * address. Node is PICS_SUBS capable; merge its AddrB.
                 */
-               memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+               ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
                node->AddrB_if = dev_idx;
                return node;
        }
@@ -208,8 +208,8 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
        if (!node)
                return NULL;
 
-       memcpy(node->MacAddressA, hsr_sp->MacAddressA, ETH_ALEN);
-       memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+       ether_addr_copy(node->MacAddressA, hsr_sp->MacAddressA);
+       ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
        if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source))
                node->AddrB_if = dev_idx;
        else
@@ -250,7 +250,7 @@ void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
        rcu_read_lock();
        node = find_node_by_AddrB(&hsr_priv->node_db, ethhdr->h_source);
        if (node)
-               memcpy(ethhdr->h_source, node->MacAddressA, ETH_ALEN);
+               ether_addr_copy(ethhdr->h_source, node->MacAddressA);
        rcu_read_unlock();
 }
 
@@ -272,7 +272,7 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
        rcu_read_lock();
        node = find_node_by_AddrA(&hsr_priv->node_db, ethhdr->h_dest);
        if (node && (node->AddrB_if == dev_idx))
-               memcpy(ethhdr->h_dest, node->MacAddressB, ETH_ALEN);
+               ether_addr_copy(ethhdr->h_dest, node->MacAddressB);
        rcu_read_unlock();
 }
 
@@ -428,13 +428,13 @@ void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
                node = list_first_or_null_rcu(&hsr_priv->node_db,
                                                struct node_entry, mac_list);
                if (node)
-                       memcpy(addr, node->MacAddressA, ETH_ALEN);
+                       ether_addr_copy(addr, node->MacAddressA);
                return node;
        }
 
        node = _pos;
        list_for_each_entry_continue_rcu(node, &hsr_priv->node_db, mac_list) {
-               memcpy(addr, node->MacAddressA, ETH_ALEN);
+               ether_addr_copy(addr, node->MacAddressA);
                return node;
        }
 
@@ -462,7 +462,7 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
                return -ENOENT; /* No such entry */
        }
 
-       memcpy(addr_b, node->MacAddressB, ETH_ALEN);
+       ether_addr_copy(addr_b, node->MacAddressB);
 
        tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A];
        if (node->time_in_stale[HSR_DEV_SLAVE_A])
index af68dd83a4e3df0ed12b18d5361d436fa3edd9f6..3fee5218a691f20c8b028e7ed2738febc7dfd0b7 100644 (file)
@@ -138,8 +138,8 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                        break;
 
                if (dev == hsr_priv->slave[0])
-                       memcpy(hsr_priv->dev->dev_addr,
-                              hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+                       ether_addr_copy(hsr_priv->dev->dev_addr,
+                                       hsr_priv->slave[0]->dev_addr);
 
                /* Make sure we recognize frames from ourselves in hsr_rcv() */
                res = hsr_create_self_node(&hsr_priv->self_node_db,
@@ -459,7 +459,7 @@ static int __init hsr_init(void)
 static void __exit hsr_exit(void)
 {
        unregister_netdevice_notifier(&hsr_nb);
-       del_timer(&prune_timer);
+       del_timer_sync(&prune_timer);
        hsr_netlink_exit();
        dev_remove_pack(&hsr_pt);
 }
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
deleted file mode 100644 (file)
index 8edfea5..0000000
+++ /dev/null
@@ -1,814 +0,0 @@
-/*
- * Copyright 2011, Siemens AG
- * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-
-/*
- * Based on patches from Jon Smirl <jonsmirl@gmail.com>
- * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/* Jon's code is based on 6lowpan implementation for Contiki which is:
- * Copyright (c) 2008, Swedish Institute of Computer Science.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Institute nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <linux/bitops.h>
-#include <linux/if_arp.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <net/af_ieee802154.h>
-#include <net/ieee802154.h>
-#include <net/ieee802154_netdev.h>
-#include <net/ipv6.h>
-
-#include "6lowpan.h"
-
-static LIST_HEAD(lowpan_devices);
-
-/* private device info */
-struct lowpan_dev_info {
-       struct net_device       *real_dev; /* real WPAN device ptr */
-       struct mutex            dev_list_mtx; /* mutex for list ops */
-       unsigned short          fragment_tag;
-};
-
-struct lowpan_dev_record {
-       struct net_device *ldev;
-       struct list_head list;
-};
-
-struct lowpan_fragment {
-       struct sk_buff          *skb;           /* skb to be assembled */
-       u16                     length;         /* length to be assemled */
-       u32                     bytes_rcv;      /* bytes received */
-       u16                     tag;            /* current fragment tag */
-       struct timer_list       timer;          /* assembling timer */
-       struct list_head        list;           /* fragments list */
-};
-
-static LIST_HEAD(lowpan_fragments);
-static DEFINE_SPINLOCK(flist_lock);
-
-static inline struct
-lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
-{
-       return netdev_priv(dev);
-}
-
-static inline void lowpan_address_flip(u8 *src, u8 *dest)
-{
-       int i;
-       for (i = 0; i < IEEE802154_ADDR_LEN; i++)
-               (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
-}
-
-static int lowpan_header_create(struct sk_buff *skb,
-                          struct net_device *dev,
-                          unsigned short type, const void *_daddr,
-                          const void *_saddr, unsigned int len)
-{
-       const u8 *saddr = _saddr;
-       const u8 *daddr = _daddr;
-       struct ieee802154_addr sa, da;
-
-       /* TODO:
-        * if this package isn't ipv6 one, where should it be routed?
-        */
-       if (type != ETH_P_IPV6)
-               return 0;
-
-       if (!saddr)
-               saddr = dev->dev_addr;
-
-       raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
-       raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
-
-       lowpan_header_compress(skb, dev, type, daddr, saddr, len);
-
-       /*
-        * NOTE1: I'm still unsure about the fact that compression and WPAN
-        * header are created here and not later in the xmit. So wait for
-        * an opinion of net maintainers.
-        */
-       /*
-        * NOTE2: to be absolutely correct, we must derive PANid information
-        * from MAC subif of the 'dev' and 'real_dev' network devices, but
-        * this isn't implemented in mainline yet, so currently we assign 0xff
-        */
-       mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
-       mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
-
-       /* prepare wpan address data */
-       sa.addr_type = IEEE802154_ADDR_LONG;
-       sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-
-       memcpy(&(sa.hwaddr), saddr, 8);
-       /* intra-PAN communications */
-       da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-
-       /*
-        * if the destination address is the broadcast address, use the
-        * corresponding short address
-        */
-       if (lowpan_is_addr_broadcast(daddr)) {
-               da.addr_type = IEEE802154_ADDR_SHORT;
-               da.short_addr = IEEE802154_ADDR_BROADCAST;
-       } else {
-               da.addr_type = IEEE802154_ADDR_LONG;
-               memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
-
-               /* request acknowledgment */
-               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
-       }
-
-       return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
-                       type, (void *)&da, (void *)&sa, skb->len);
-}
-
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-                                       struct net_device *dev)
-{
-       struct lowpan_dev_record *entry;
-       struct sk_buff *skb_cp;
-       int stat = NET_RX_SUCCESS;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(entry, &lowpan_devices, list)
-               if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
-                       skb_cp = skb_copy(skb, GFP_ATOMIC);
-                       if (!skb_cp) {
-                               stat = -ENOMEM;
-                               break;
-                       }
-
-                       skb_cp->dev = entry->ldev;
-                       stat = netif_rx(skb_cp);
-               }
-       rcu_read_unlock();
-
-       return stat;
-}
-
-static void lowpan_fragment_timer_expired(unsigned long entry_addr)
-{
-       struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
-
-       pr_debug("timer expired for frame with tag %d\n", entry->tag);
-
-       list_del(&entry->list);
-       dev_kfree_skb(entry->skb);
-       kfree(entry);
-}
-
-static struct lowpan_fragment *
-lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
-{
-       struct lowpan_fragment *frame;
-
-       frame = kzalloc(sizeof(struct lowpan_fragment),
-                       GFP_ATOMIC);
-       if (!frame)
-               goto frame_err;
-
-       INIT_LIST_HEAD(&frame->list);
-
-       frame->length = len;
-       frame->tag = tag;
-
-       /* allocate buffer for frame assembling */
-       frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
-                                              sizeof(struct ipv6hdr));
-
-       if (!frame->skb)
-               goto skb_err;
-
-       frame->skb->priority = skb->priority;
-
-       /* reserve headroom for uncompressed ipv6 header */
-       skb_reserve(frame->skb, sizeof(struct ipv6hdr));
-       skb_put(frame->skb, frame->length);
-
-       /* copy the first control block to keep a
-        * trace of the link-layer addresses in case
-        * of a link-local compressed address
-        */
-       memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
-
-       init_timer(&frame->timer);
-       /* time out is the same as for ipv6 - 60 sec */
-       frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
-       frame->timer.data = (unsigned long)frame;
-       frame->timer.function = lowpan_fragment_timer_expired;
-
-       add_timer(&frame->timer);
-
-       list_add_tail(&frame->list, &lowpan_fragments);
-
-       return frame;
-
-skb_err:
-       kfree(frame);
-frame_err:
-       return NULL;
-}
-
-static int process_data(struct sk_buff *skb)
-{
-       u8 iphc0, iphc1;
-       const struct ieee802154_addr *_saddr, *_daddr;
-
-       raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
-       /* at least two bytes will be used for the encoding */
-       if (skb->len < 2)
-               goto drop;
-
-       if (lowpan_fetch_skb_u8(skb, &iphc0))
-               goto drop;
-
-       /* fragments assembling */
-       switch (iphc0 & LOWPAN_DISPATCH_MASK) {
-       case LOWPAN_DISPATCH_FRAG1:
-       case LOWPAN_DISPATCH_FRAGN:
-       {
-               struct lowpan_fragment *frame;
-               /* slen stores the rightmost 8 bits of the 11 bits length */
-               u8 slen, offset = 0;
-               u16 len, tag;
-               bool found = false;
-
-               if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
-                   lowpan_fetch_skb_u16(skb, &tag))  /* fragment tag */
-                       goto drop;
-
-               /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
-               len = ((iphc0 & 7) << 8) | slen;
-
-               if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
-                       pr_debug("%s received a FRAG1 packet (tag: %d, "
-                                "size of the entire IP packet: %d)",
-                                __func__, tag, len);
-               } else { /* FRAGN */
-                       if (lowpan_fetch_skb_u8(skb, &offset))
-                               goto unlock_and_drop;
-                       pr_debug("%s received a FRAGN packet (tag: %d, "
-                                "size of the entire IP packet: %d, "
-                                "offset: %d)", __func__, tag, len, offset * 8);
-               }
-
-               /*
-                * check if frame assembling with the same tag is
-                * already in progress
-                */
-               spin_lock_bh(&flist_lock);
-
-               list_for_each_entry(frame, &lowpan_fragments, list)
-                       if (frame->tag == tag) {
-                               found = true;
-                               break;
-                       }
-
-               /* alloc new frame structure */
-               if (!found) {
-                       pr_debug("%s first fragment received for tag %d, "
-                                "begin packet reassembly", __func__, tag);
-                       frame = lowpan_alloc_new_frame(skb, len, tag);
-                       if (!frame)
-                               goto unlock_and_drop;
-               }
-
-               /* if payload fits buffer, copy it */
-               if (likely((offset * 8 + skb->len) <= frame->length))
-                       skb_copy_to_linear_data_offset(frame->skb, offset * 8,
-                                                       skb->data, skb->len);
-               else
-                       goto unlock_and_drop;
-
-               frame->bytes_rcv += skb->len;
-
-               /* frame assembling complete */
-               if ((frame->bytes_rcv == frame->length) &&
-                    frame->timer.expires > jiffies) {
-                       /* if timer haven't expired - first of all delete it */
-                       del_timer_sync(&frame->timer);
-                       list_del(&frame->list);
-                       spin_unlock_bh(&flist_lock);
-
-                       pr_debug("%s successfully reassembled fragment "
-                                "(tag %d)", __func__, tag);
-
-                       dev_kfree_skb(skb);
-                       skb = frame->skb;
-                       kfree(frame);
-
-                       if (lowpan_fetch_skb_u8(skb, &iphc0))
-                               goto drop;
-
-                       break;
-               }
-               spin_unlock_bh(&flist_lock);
-
-               return kfree_skb(skb), 0;
-       }
-       default:
-               break;
-       }
-
-       if (lowpan_fetch_skb_u8(skb, &iphc1))
-               goto drop;
-
-       _saddr = &mac_cb(skb)->sa;
-       _daddr = &mac_cb(skb)->da;
-
-       return lowpan_process_data(skb, skb->dev, (u8 *)_saddr->hwaddr,
-                               _saddr->addr_type, IEEE802154_ADDR_LEN,
-                               (u8 *)_daddr->hwaddr, _daddr->addr_type,
-                               IEEE802154_ADDR_LEN, iphc0, iphc1,
-                               lowpan_give_skb_to_devices);
-
-unlock_and_drop:
-       spin_unlock_bh(&flist_lock);
-drop:
-       kfree_skb(skb);
-       return -EINVAL;
-}
-
-static int lowpan_set_address(struct net_device *dev, void *p)
-{
-       struct sockaddr *sa = p;
-
-       if (netif_running(dev))
-               return -EBUSY;
-
-       /* TODO: validate addr */
-       memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
-
-       return 0;
-}
-
-static int
-lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
-                       int mlen, int plen, int offset, int type)
-{
-       struct sk_buff *frag;
-       int hlen;
-
-       hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
-                       LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
-
-       raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
-
-       frag = netdev_alloc_skb(skb->dev,
-                               hlen + mlen + plen + IEEE802154_MFR_SIZE);
-       if (!frag)
-               return -ENOMEM;
-
-       frag->priority = skb->priority;
-
-       /* copy header, MFR and payload */
-       skb_put(frag, mlen);
-       skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
-
-       skb_put(frag, hlen);
-       skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
-
-       skb_put(frag, plen);
-       skb_copy_to_linear_data_offset(frag, mlen + hlen,
-                                      skb_network_header(skb) + offset, plen);
-
-       raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
-
-       return dev_queue_xmit(frag);
-}
-
-static int
-lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
-{
-       int  err, header_length, payload_length, tag, offset = 0;
-       u8 head[5];
-
-       header_length = skb->mac_len;
-       payload_length = skb->len - header_length;
-       tag = lowpan_dev_info(dev)->fragment_tag++;
-
-       /* first fragment header */
-       head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
-       head[1] = payload_length & 0xff;
-       head[2] = tag >> 8;
-       head[3] = tag & 0xff;
-
-       err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
-                                  0, LOWPAN_DISPATCH_FRAG1);
-
-       if (err) {
-               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
-                        __func__, tag);
-               goto exit;
-       }
-
-       offset = LOWPAN_FRAG_SIZE;
-
-       /* next fragment header */
-       head[0] &= ~LOWPAN_DISPATCH_FRAG1;
-       head[0] |= LOWPAN_DISPATCH_FRAGN;
-
-       while (payload_length - offset > 0) {
-               int len = LOWPAN_FRAG_SIZE;
-
-               head[4] = offset / 8;
-
-               if (payload_length - offset < len)
-                       len = payload_length - offset;
-
-               err = lowpan_fragment_xmit(skb, head, header_length,
-                                          len, offset, LOWPAN_DISPATCH_FRAGN);
-               if (err) {
-                       pr_debug("%s unable to send a subsequent FRAGN packet "
-                                "(tag: %d, offset: %d", __func__, tag, offset);
-                       goto exit;
-               }
-
-               offset += len;
-       }
-
-exit:
-       return err;
-}
-
-static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       int err = -1;
-
-       pr_debug("package xmit\n");
-
-       skb->dev = lowpan_dev_info(dev)->real_dev;
-       if (skb->dev == NULL) {
-               pr_debug("ERROR: no real wpan device found\n");
-               goto error;
-       }
-
-       /* Send directly if less than the MTU minus the 2 checksum bytes. */
-       if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
-               err = dev_queue_xmit(skb);
-               goto out;
-       }
-
-       pr_debug("frame is too big, fragmentation is needed\n");
-       err = lowpan_skb_fragmentation(skb, dev);
-error:
-       dev_kfree_skb(skb);
-out:
-       if (err)
-               pr_debug("ERROR: xmit failed\n");
-
-       return (err < 0) ? NET_XMIT_DROP : err;
-}
-
-static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-       return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
-}
-
-static u16 lowpan_get_pan_id(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-       return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static u16 lowpan_get_short_addr(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-       return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-       return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
-
-static struct header_ops lowpan_header_ops = {
-       .create = lowpan_header_create,
-};
-
-static struct lock_class_key lowpan_tx_busylock;
-static struct lock_class_key lowpan_netdev_xmit_lock_key;
-
-static void lowpan_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &lowpan_netdev_xmit_lock_key);
-}
-
-
-static int lowpan_dev_init(struct net_device *dev)
-{
-       netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
-       dev->qdisc_tx_busylock = &lowpan_tx_busylock;
-       return 0;
-}
-
-static const struct net_device_ops lowpan_netdev_ops = {
-       .ndo_init               = lowpan_dev_init,
-       .ndo_start_xmit         = lowpan_xmit,
-       .ndo_set_mac_address    = lowpan_set_address,
-};
-
-static struct ieee802154_mlme_ops lowpan_mlme = {
-       .get_pan_id = lowpan_get_pan_id,
-       .get_phy = lowpan_get_phy,
-       .get_short_addr = lowpan_get_short_addr,
-       .get_dsn = lowpan_get_dsn,
-};
-
-static void lowpan_setup(struct net_device *dev)
-{
-       dev->addr_len           = IEEE802154_ADDR_LEN;
-       memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-       dev->type               = ARPHRD_IEEE802154;
-       /* Frame Control + Sequence Number + Address fields + Security Header */
-       dev->hard_header_len    = 2 + 1 + 20 + 14;
-       dev->needed_tailroom    = 2; /* FCS */
-       dev->mtu                = 1281;
-       dev->tx_queue_len       = 0;
-       dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
-       dev->watchdog_timeo     = 0;
-
-       dev->netdev_ops         = &lowpan_netdev_ops;
-       dev->header_ops         = &lowpan_header_ops;
-       dev->ml_priv            = &lowpan_mlme;
-       dev->destructor         = free_netdev;
-}
-
-static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
-{
-       if (tb[IFLA_ADDRESS]) {
-               if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
-                       return -EINVAL;
-       }
-       return 0;
-}
-
-static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *pt, struct net_device *orig_dev)
-{
-       struct sk_buff *local_skb;
-
-       if (!netif_running(dev))
-               goto drop;
-
-       if (dev->type != ARPHRD_IEEE802154)
-               goto drop;
-
-       /* check that it's our buffer */
-       if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
-               /* Copy the packet so that the IPv6 header is
-                * properly aligned.
-                */
-               local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
-                                           skb_tailroom(skb), GFP_ATOMIC);
-               if (!local_skb)
-                       goto drop;
-
-               local_skb->protocol = htons(ETH_P_IPV6);
-               local_skb->pkt_type = PACKET_HOST;
-
-               /* Pull off the 1-byte of 6lowpan header. */
-               skb_pull(local_skb, 1);
-
-               lowpan_give_skb_to_devices(local_skb, NULL);
-
-               kfree_skb(local_skb);
-               kfree_skb(skb);
-       } else {
-               switch (skb->data[0] & 0xe0) {
-               case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
-               case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
-               case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
-                       local_skb = skb_clone(skb, GFP_ATOMIC);
-                       if (!local_skb)
-                               goto drop;
-                       process_data(local_skb);
-
-                       kfree_skb(skb);
-                       break;
-               default:
-                       break;
-               }
-       }
-
-       return NET_RX_SUCCESS;
-
-drop:
-       kfree_skb(skb);
-       return NET_RX_DROP;
-}
-
-static int lowpan_newlink(struct net *src_net, struct net_device *dev,
-                         struct nlattr *tb[], struct nlattr *data[])
-{
-       struct net_device *real_dev;
-       struct lowpan_dev_record *entry;
-
-       pr_debug("adding new link\n");
-
-       if (!tb[IFLA_LINK])
-               return -EINVAL;
-       /* find and hold real wpan device */
-       real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
-       if (!real_dev)
-               return -ENODEV;
-       if (real_dev->type != ARPHRD_IEEE802154) {
-               dev_put(real_dev);
-               return -EINVAL;
-       }
-
-       lowpan_dev_info(dev)->real_dev = real_dev;
-       lowpan_dev_info(dev)->fragment_tag = 0;
-       mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
-       if (!entry) {
-               dev_put(real_dev);
-               lowpan_dev_info(dev)->real_dev = NULL;
-               return -ENOMEM;
-       }
-
-       entry->ldev = dev;
-
-       /* Set the lowpan harware address to the wpan hardware address. */
-       memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
-
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       INIT_LIST_HEAD(&entry->list);
-       list_add_tail(&entry->list, &lowpan_devices);
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       register_netdevice(dev);
-
-       return 0;
-}
-
-static void lowpan_dellink(struct net_device *dev, struct list_head *head)
-{
-       struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
-       struct net_device *real_dev = lowpan_dev->real_dev;
-       struct lowpan_dev_record *entry, *tmp;
-
-       ASSERT_RTNL();
-
-       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-       list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-               if (entry->ldev == dev) {
-                       list_del(&entry->list);
-                       kfree(entry);
-               }
-       }
-       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
-       unregister_netdevice_queue(dev, head);
-
-       dev_put(real_dev);
-}
-
-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
-       .kind           = "lowpan",
-       .priv_size      = sizeof(struct lowpan_dev_info),
-       .setup          = lowpan_setup,
-       .newlink        = lowpan_newlink,
-       .dellink        = lowpan_dellink,
-       .validate       = lowpan_validate,
-};
-
-static inline int __init lowpan_netlink_init(void)
-{
-       return rtnl_link_register(&lowpan_link_ops);
-}
-
-static inline void lowpan_netlink_fini(void)
-{
-       rtnl_link_unregister(&lowpan_link_ops);
-}
-
-static int lowpan_device_event(struct notifier_block *unused,
-                              unsigned long event, void *ptr)
-{
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       LIST_HEAD(del_list);
-       struct lowpan_dev_record *entry, *tmp;
-
-       if (dev->type != ARPHRD_IEEE802154)
-               goto out;
-
-       if (event == NETDEV_UNREGISTER) {
-               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
-                               lowpan_dellink(entry->ldev, &del_list);
-               }
-
-               unregister_netdevice_many(&del_list);
-       }
-
-out:
-       return NOTIFY_DONE;
-}
-
-static struct notifier_block lowpan_dev_notifier = {
-       .notifier_call = lowpan_device_event,
-};
-
-static struct packet_type lowpan_packet_type = {
-       .type = __constant_htons(ETH_P_IEEE802154),
-       .func = lowpan_rcv,
-};
-
-static int __init lowpan_init_module(void)
-{
-       int err = 0;
-
-       err = lowpan_netlink_init();
-       if (err < 0)
-               goto out;
-
-       dev_add_pack(&lowpan_packet_type);
-
-       err = register_netdevice_notifier(&lowpan_dev_notifier);
-       if (err < 0) {
-               dev_remove_pack(&lowpan_packet_type);
-               lowpan_netlink_fini();
-       }
-out:
-       return err;
-}
-
-static void __exit lowpan_cleanup_module(void)
-{
-       struct lowpan_fragment *frame, *tframe;
-
-       lowpan_netlink_fini();
-
-       dev_remove_pack(&lowpan_packet_type);
-
-       unregister_netdevice_notifier(&lowpan_dev_notifier);
-
-       /* Now 6lowpan packet_type is removed, so no new fragments are
-        * expected on RX, therefore that's the time to clean incomplete
-        * fragments.
-        */
-       spin_lock_bh(&flist_lock);
-       list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
-               del_timer_sync(&frame->timer);
-               list_del(&frame->list);
-               dev_kfree_skb(frame->skb);
-               kfree(frame);
-       }
-       spin_unlock_bh(&flist_lock);
-}
-
-module_init(lowpan_init_module);
-module_exit(lowpan_cleanup_module);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
deleted file mode 100644 (file)
index 2b835db..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright 2011, Siemens AG
- * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
- */
-
-/*
- * Based on patches from Jon Smirl <jonsmirl@gmail.com>
- * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/* Jon's code is based on 6lowpan implementation for Contiki which is:
- * Copyright (c) 2008, Swedish Institute of Computer Science.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Institute nor the names of its contributors
- *    may be used to endorse or promote products derived from this software
- *    without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef __6LOWPAN_H__
-#define __6LOWPAN_H__
-
-#define UIP_802154_SHORTADDR_LEN       2  /* compressed ipv6 address length */
-#define UIP_IPH_LEN                    40 /* ipv6 fixed header size */
-#define UIP_PROTO_UDP                  17 /* ipv6 next header value for UDP */
-#define UIP_FRAGH_LEN                  8  /* ipv6 fragment header size */
-
-/*
- * ipv6 address based on mac
- * second bit-flip (Universe/Local) is done according RFC2464
- */
-#define is_addr_mac_addr_based(a, m) \
-       ((((a)->s6_addr[8])  == (((m)[0]) ^ 0x02)) &&   \
-        (((a)->s6_addr[9])  == (m)[1]) &&              \
-        (((a)->s6_addr[10]) == (m)[2]) &&              \
-        (((a)->s6_addr[11]) == (m)[3]) &&              \
-        (((a)->s6_addr[12]) == (m)[4]) &&              \
-        (((a)->s6_addr[13]) == (m)[5]) &&              \
-        (((a)->s6_addr[14]) == (m)[6]) &&              \
-        (((a)->s6_addr[15]) == (m)[7]))
-
-/* ipv6 address is unspecified */
-#define is_addr_unspecified(a)         \
-       ((((a)->s6_addr32[0]) == 0) &&  \
-        (((a)->s6_addr32[1]) == 0) &&  \
-        (((a)->s6_addr32[2]) == 0) &&  \
-        (((a)->s6_addr32[3]) == 0))
-
-/* compare ipv6 addresses prefixes */
-#define ipaddr_prefixcmp(addr1, addr2, length) \
-       (memcmp(addr1, addr2, length >> 3) == 0)
-
-/* local link, i.e. FE80::/10 */
-#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))
-
-/*
- * check whether we can compress the IID to 16 bits,
- * it's possible for unicast adresses with first 49 bits are zero only.
- */
-#define lowpan_is_iid_16_bit_compressable(a)   \
-       ((((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr[10]) == 0) &&           \
-        (((a)->s6_addr[11]) == 0xff) &&        \
-        (((a)->s6_addr[12]) == 0xfe) &&        \
-        (((a)->s6_addr[13]) == 0))
-
-/* multicast address */
-#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
-
-/* check whether the 112-bit gid of the multicast address is mappable to: */
-
-/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
-#define lowpan_is_mcast_addr_compressable(a)   \
-       ((((a)->s6_addr16[1]) == 0) &&          \
-        (((a)->s6_addr16[2]) == 0) &&          \
-        (((a)->s6_addr16[3]) == 0) &&          \
-        (((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr16[5]) == 0) &&          \
-        (((a)->s6_addr16[6]) == 0) &&          \
-        (((a)->s6_addr[14])  == 0) &&          \
-        ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
-
-/* 48 bits, FFXX::00XX:XXXX:XXXX */
-#define lowpan_is_mcast_addr_compressable48(a) \
-       ((((a)->s6_addr16[1]) == 0) &&          \
-        (((a)->s6_addr16[2]) == 0) &&          \
-        (((a)->s6_addr16[3]) == 0) &&          \
-        (((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr[10]) == 0))
-
-/* 32 bits, FFXX::00XX:XXXX */
-#define lowpan_is_mcast_addr_compressable32(a) \
-       ((((a)->s6_addr16[1]) == 0) &&          \
-        (((a)->s6_addr16[2]) == 0) &&          \
-        (((a)->s6_addr16[3]) == 0) &&          \
-        (((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr16[5]) == 0) &&          \
-        (((a)->s6_addr[12]) == 0))
-
-/* 8 bits, FF02::00XX */
-#define lowpan_is_mcast_addr_compressable8(a)  \
-       ((((a)->s6_addr[1])  == 2) &&           \
-        (((a)->s6_addr16[1]) == 0) &&          \
-        (((a)->s6_addr16[2]) == 0) &&          \
-        (((a)->s6_addr16[3]) == 0) &&          \
-        (((a)->s6_addr16[4]) == 0) &&          \
-        (((a)->s6_addr16[5]) == 0) &&          \
-        (((a)->s6_addr16[6]) == 0) &&          \
-        (((a)->s6_addr[14]) == 0))
-
-#define lowpan_is_addr_broadcast(a)    \
-       ((((a)[0]) == 0xFF) &&  \
-        (((a)[1]) == 0xFF) &&  \
-        (((a)[2]) == 0xFF) &&  \
-        (((a)[3]) == 0xFF) &&  \
-        (((a)[4]) == 0xFF) &&  \
-        (((a)[5]) == 0xFF) &&  \
-        (((a)[6]) == 0xFF) &&  \
-        (((a)[7]) == 0xFF))
-
-#define LOWPAN_DISPATCH_IPV6   0x41 /* 01000001 = 65 */
-#define LOWPAN_DISPATCH_HC1    0x42 /* 01000010 = 66 */
-#define LOWPAN_DISPATCH_IPHC   0x60 /* 011xxxxx = ... */
-#define LOWPAN_DISPATCH_FRAG1  0xc0 /* 11000xxx */
-#define LOWPAN_DISPATCH_FRAGN  0xe0 /* 11100xxx */
-
-#define LOWPAN_DISPATCH_MASK   0xf8 /* 11111000 */
-
-#define LOWPAN_FRAG_TIMEOUT    (HZ * 60)       /* time-out 60 sec */
-
-#define LOWPAN_FRAG1_HEAD_SIZE 0x4
-#define LOWPAN_FRAGN_HEAD_SIZE 0x5
-
-/*
- * According IEEE802.15.4 standard:
- *   - MTU is 127 octets
- *   - maximum MHR size is 37 octets
- *   - MFR size is 2 octets
- *
- * so minimal payload size that we may guarantee is:
- *   MTU - MHR - MFR = 88 octets
- */
-#define LOWPAN_FRAG_SIZE       88
-
-/*
- * Values of fields within the IPHC encoding first byte
- * (C stands for compressed and I for inline)
- */
-#define LOWPAN_IPHC_TF         0x18
-
-#define LOWPAN_IPHC_FL_C       0x10
-#define LOWPAN_IPHC_TC_C       0x08
-#define LOWPAN_IPHC_NH_C       0x04
-#define LOWPAN_IPHC_TTL_1      0x01
-#define LOWPAN_IPHC_TTL_64     0x02
-#define LOWPAN_IPHC_TTL_255    0x03
-#define LOWPAN_IPHC_TTL_I      0x00
-
-
-/* Values of fields within the IPHC encoding second byte */
-#define LOWPAN_IPHC_CID                0x80
-
-#define LOWPAN_IPHC_ADDR_00    0x00
-#define LOWPAN_IPHC_ADDR_01    0x01
-#define LOWPAN_IPHC_ADDR_02    0x02
-#define LOWPAN_IPHC_ADDR_03    0x03
-
-#define LOWPAN_IPHC_SAC                0x40
-#define LOWPAN_IPHC_SAM                0x30
-
-#define LOWPAN_IPHC_SAM_BIT    4
-
-#define LOWPAN_IPHC_M          0x08
-#define LOWPAN_IPHC_DAC                0x04
-#define LOWPAN_IPHC_DAM_00     0x00
-#define LOWPAN_IPHC_DAM_01     0x01
-#define LOWPAN_IPHC_DAM_10     0x02
-#define LOWPAN_IPHC_DAM_11     0x03
-
-#define LOWPAN_IPHC_DAM_BIT    0
-/*
- * LOWPAN_UDP encoding (works together with IPHC)
- */
-#define LOWPAN_NHC_UDP_MASK            0xF8
-#define LOWPAN_NHC_UDP_ID              0xF0
-#define LOWPAN_NHC_UDP_CHECKSUMC       0x04
-#define LOWPAN_NHC_UDP_CHECKSUMI       0x00
-
-#define LOWPAN_NHC_UDP_4BIT_PORT       0xF0B0
-#define LOWPAN_NHC_UDP_4BIT_MASK       0xFFF0
-#define LOWPAN_NHC_UDP_8BIT_PORT       0xF000
-#define LOWPAN_NHC_UDP_8BIT_MASK       0xFF00
-
-/* values for port compression, _with checksum_ ie bit 5 set to 0 */
-#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
-#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
-                                       dest = 0xF0 + 8 bit inline */
-#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
-                                       dest = 16 bit inline */
-#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
-#define LOWPAN_NHC_UDP_CS_C    0x04 /* checksum elided */
-
-#ifdef DEBUG
-/* print data in line */
-static inline void raw_dump_inline(const char *caller, char *msg,
-                                  unsigned char *buf, int len)
-{
-       if (msg)
-               pr_debug("%s():%s: ", caller, msg);
-
-       print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, buf, len, false);
-}
-
-/* print data in a table format:
- *
- * addr: xx xx xx xx xx xx
- * addr: xx xx xx xx xx xx
- * ...
- */
-static inline void raw_dump_table(const char *caller, char *msg,
-                                 unsigned char *buf, int len)
-{
-       if (msg)
-               pr_debug("%s():%s:\n", caller, msg);
-
-       print_hex_dump_debug("\t", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false);
-}
-#else
-static inline void raw_dump_table(const char *caller, char *msg,
-                                 unsigned char *buf, int len) { }
-static inline void raw_dump_inline(const char *caller, char *msg,
-                                  unsigned char *buf, int len) { }
-#endif
-
-static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
-{
-       if (unlikely(!pskb_may_pull(skb, 1)))
-               return -EINVAL;
-
-       *val = skb->data[0];
-       skb_pull(skb, 1);
-
-       return 0;
-}
-
-static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
-{
-       if (unlikely(!pskb_may_pull(skb, 2)))
-               return -EINVAL;
-
-       *val = (skb->data[0] << 8) | skb->data[1];
-       skb_pull(skb, 2);
-
-       return 0;
-}
-
-static inline bool lowpan_fetch_skb(struct sk_buff *skb,
-               void *data, const unsigned int len)
-{
-       if (unlikely(!pskb_may_pull(skb, len)))
-               return true;
-
-       skb_copy_from_linear_data(skb, data, len);
-       skb_pull(skb, len);
-
-       return false;
-}
-
-static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
-                                      const size_t len)
-{
-       memcpy(*hc_ptr, data, len);
-       *hc_ptr += len;
-}
-
-typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
-
-int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
-               const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
-               const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
-               u8 iphc0, u8 iphc1, skb_delivery_cb skb_deliver);
-int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
-                       unsigned short type, const void *_daddr,
-                       const void *_saddr, unsigned int len);
-
-#endif /* __6LOWPAN_H__ */
index 860aa2d445bae361d5d588a6c1a4e41310d0d629..211b5686d719679242d6a7b5e0cd65547ae37cd3 100644 (file)
 #include <linux/if_arp.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <net/6lowpan.h>
 #include <net/ipv6.h>
 #include <net/af_ieee802154.h>
 
-#include "6lowpan.h"
-
 /*
  * Uncompress address function for source and
  * destination address(non-multicast).
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
new file mode 100644 (file)
index 0000000..0f5a69e
--- /dev/null
@@ -0,0 +1,674 @@
+/* Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/6lowpan.h>
+#include <net/ipv6.h>
+
+#include "reassembly.h"
+
+static LIST_HEAD(lowpan_devices);
+
+/* private device info */
+struct lowpan_dev_info {
+       struct net_device       *real_dev; /* real WPAN device ptr */
+       struct mutex            dev_list_mtx; /* mutex for list ops */
+       __be16                  fragment_tag;
+};
+
+struct lowpan_dev_record {
+       struct net_device *ldev;
+       struct list_head list;
+};
+
+static inline struct
+lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
+{
+       return netdev_priv(dev);
+}
+
+static inline void lowpan_address_flip(u8 *src, u8 *dest)
+{
+       int i;
+       for (i = 0; i < IEEE802154_ADDR_LEN; i++)
+               (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
+}
+
+static int lowpan_header_create(struct sk_buff *skb,
+                          struct net_device *dev,
+                          unsigned short type, const void *_daddr,
+                          const void *_saddr, unsigned int len)
+{
+       const u8 *saddr = _saddr;
+       const u8 *daddr = _daddr;
+       struct ieee802154_addr sa, da;
+
+       /* TODO:
+        * if this package isn't ipv6 one, where should it be routed?
+        */
+       if (type != ETH_P_IPV6)
+               return 0;
+
+       if (!saddr)
+               saddr = dev->dev_addr;
+
+       raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
+       raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
+
+       lowpan_header_compress(skb, dev, type, daddr, saddr, len);
+
+       /* NOTE1: I'm still unsure about the fact that compression and WPAN
+        * header are created here and not later in the xmit. So wait for
+        * an opinion of net maintainers.
+        */
+       /* NOTE2: to be absolutely correct, we must derive PANid information
+        * from MAC subif of the 'dev' and 'real_dev' network devices, but
+        * this isn't implemented in mainline yet, so currently we assign 0xff
+        */
+       mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+       mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+
+       /* prepare wpan address data */
+       sa.mode = IEEE802154_ADDR_LONG;
+       sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
+
+       /* intra-PAN communications */
+       da.pan_id = sa.pan_id;
+
+       /* if the destination address is the broadcast address, use the
+        * corresponding short address
+        */
+       if (lowpan_is_addr_broadcast(daddr)) {
+               da.mode = IEEE802154_ADDR_SHORT;
+               da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+       } else {
+               da.mode = IEEE802154_ADDR_LONG;
+               da.extended_addr = ieee802154_devaddr_from_raw(daddr);
+
+               /* request acknowledgment */
+               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
+       }
+
+       return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
+                       type, (void *)&da, (void *)&sa, 0);
+}
+
+static int lowpan_give_skb_to_devices(struct sk_buff *skb,
+                                       struct net_device *dev)
+{
+       struct lowpan_dev_record *entry;
+       struct sk_buff *skb_cp;
+       int stat = NET_RX_SUCCESS;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(entry, &lowpan_devices, list)
+               if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
+                       skb_cp = skb_copy(skb, GFP_ATOMIC);
+                       if (!skb_cp) {
+                               stat = -ENOMEM;
+                               break;
+                       }
+
+                       skb_cp->dev = entry->ldev;
+                       stat = netif_rx(skb_cp);
+               }
+       rcu_read_unlock();
+
+       return stat;
+}
+
+static int process_data(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+{
+       u8 iphc0, iphc1;
+       struct ieee802154_addr_sa sa, da;
+       void *sap, *dap;
+
+       raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
+       /* at least two bytes will be used for the encoding */
+       if (skb->len < 2)
+               goto drop;
+
+       if (lowpan_fetch_skb_u8(skb, &iphc0))
+               goto drop;
+
+       if (lowpan_fetch_skb_u8(skb, &iphc1))
+               goto drop;
+
+       ieee802154_addr_to_sa(&sa, &hdr->source);
+       ieee802154_addr_to_sa(&da, &hdr->dest);
+
+       if (sa.addr_type == IEEE802154_ADDR_SHORT)
+               sap = &sa.short_addr;
+       else
+               sap = &sa.hwaddr;
+
+       if (da.addr_type == IEEE802154_ADDR_SHORT)
+               dap = &da.short_addr;
+       else
+               dap = &da.hwaddr;
+
+       return lowpan_process_data(skb, skb->dev, sap, sa.addr_type,
+                                  IEEE802154_ADDR_LEN, dap, da.addr_type,
+                                  IEEE802154_ADDR_LEN, iphc0, iphc1,
+                                  lowpan_give_skb_to_devices);
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static int lowpan_set_address(struct net_device *dev, void *p)
+{
+       struct sockaddr *sa = p;
+
+       if (netif_running(dev))
+               return -EBUSY;
+
+       /* TODO: validate addr */
+       memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+       return 0;
+}
+
+static int
+lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
+                    int mlen, int plen, int offset, int type)
+{
+       struct sk_buff *frag;
+       int hlen;
+
+       hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
+                       LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
+
+       raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
+
+       frag = netdev_alloc_skb(skb->dev,
+                               hlen + mlen + plen + IEEE802154_MFR_SIZE);
+       if (!frag)
+               return -ENOMEM;
+
+       frag->priority = skb->priority;
+
+       /* copy header, MFR and payload */
+       skb_put(frag, mlen);
+       skb_copy_to_linear_data(frag, skb_mac_header(skb), mlen);
+
+       skb_put(frag, hlen);
+       skb_copy_to_linear_data_offset(frag, mlen, head, hlen);
+
+       skb_put(frag, plen);
+       skb_copy_to_linear_data_offset(frag, mlen + hlen,
+                                      skb_network_header(skb) + offset, plen);
+
+       raw_dump_table(__func__, " raw fragment dump", frag->data, frag->len);
+
+       return dev_queue_xmit(frag);
+}
+
+static int
+lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
+{
+       int err;
+       u16 dgram_offset, dgram_size, payload_length, header_length,
+           lowpan_size, frag_plen, offset;
+       __be16 tag;
+       u8 head[5];
+
+       header_length = skb->mac_len;
+       payload_length = skb->len - header_length;
+       tag = lowpan_dev_info(dev)->fragment_tag++;
+       lowpan_size = skb_network_header_len(skb);
+       dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
+                    header_length;
+
+       /* first fragment header */
+       head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7);
+       head[1] = dgram_size & 0xff;
+       memcpy(head + 2, &tag, sizeof(tag));
+
+       /* calc the nearest payload length(divided to 8) for first fragment
+        * which fits into a IEEE802154_MTU
+        */
+       frag_plen = round_down(IEEE802154_MTU - header_length -
+                              LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
+                              IEEE802154_MFR_SIZE, 8);
+
+       err = lowpan_fragment_xmit(skb, head, header_length,
+                                  frag_plen + lowpan_size, 0,
+                                  LOWPAN_DISPATCH_FRAG1);
+       if (err) {
+               pr_debug("%s unable to send FRAG1 packet (tag: %d)",
+                        __func__, tag);
+               goto exit;
+       }
+
+       offset = lowpan_size + frag_plen;
+       dgram_offset += frag_plen;
+
+       /* next fragment header */
+       head[0] &= ~LOWPAN_DISPATCH_FRAG1;
+       head[0] |= LOWPAN_DISPATCH_FRAGN;
+
+       frag_plen = round_down(IEEE802154_MTU - header_length -
+                              LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8);
+
+       while (payload_length - offset > 0) {
+               int len = frag_plen;
+
+               head[4] = dgram_offset >> 3;
+
+               if (payload_length - offset < len)
+                       len = payload_length - offset;
+
+               err = lowpan_fragment_xmit(skb, head, header_length, len,
+                                          offset, LOWPAN_DISPATCH_FRAGN);
+               if (err) {
+                       pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
+                                __func__, tag, offset);
+                       goto exit;
+               }
+
+               offset += len;
+               dgram_offset += len;
+       }
+
+exit:
+       return err;
+}
+
+static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       int err = -1;
+
+       pr_debug("package xmit\n");
+
+       skb->dev = lowpan_dev_info(dev)->real_dev;
+       if (skb->dev == NULL) {
+               pr_debug("ERROR: no real wpan device found\n");
+               goto error;
+       }
+
+       /* Send directly if less than the MTU minus the 2 checksum bytes. */
+       if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
+               err = dev_queue_xmit(skb);
+               goto out;
+       }
+
+       pr_debug("frame is too big, fragmentation is needed\n");
+       err = lowpan_skb_fragmentation(skb, dev);
+error:
+       dev_kfree_skb(skb);
+out:
+       if (err)
+               pr_debug("ERROR: xmit failed\n");
+
+       return (err < 0) ? NET_XMIT_DROP : err;
+}
+
+static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
+{
+       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+       return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
+}
+
+static __le16 lowpan_get_pan_id(const struct net_device *dev)
+{
+       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+       return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
+}
+
+static __le16 lowpan_get_short_addr(const struct net_device *dev)
+{
+       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+       return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
+}
+
+static u8 lowpan_get_dsn(const struct net_device *dev)
+{
+       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+       return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
+}
+
+static struct header_ops lowpan_header_ops = {
+       .create = lowpan_header_create,
+};
+
+static struct lock_class_key lowpan_tx_busylock;
+static struct lock_class_key lowpan_netdev_xmit_lock_key;
+
+static void lowpan_set_lockdep_class_one(struct net_device *dev,
+                                        struct netdev_queue *txq,
+                                        void *_unused)
+{
+       lockdep_set_class(&txq->_xmit_lock,
+                         &lowpan_netdev_xmit_lock_key);
+}
+
+
+static int lowpan_dev_init(struct net_device *dev)
+{
+       netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
+       dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+       return 0;
+}
+
+static const struct net_device_ops lowpan_netdev_ops = {
+       .ndo_init               = lowpan_dev_init,
+       .ndo_start_xmit         = lowpan_xmit,
+       .ndo_set_mac_address    = lowpan_set_address,
+};
+
+static struct ieee802154_mlme_ops lowpan_mlme = {
+       .get_pan_id = lowpan_get_pan_id,
+       .get_phy = lowpan_get_phy,
+       .get_short_addr = lowpan_get_short_addr,
+       .get_dsn = lowpan_get_dsn,
+};
+
+static void lowpan_setup(struct net_device *dev)
+{
+       dev->addr_len           = IEEE802154_ADDR_LEN;
+       memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+       dev->type               = ARPHRD_IEEE802154;
+       /* Frame Control + Sequence Number + Address fields + Security Header */
+       dev->hard_header_len    = 2 + 1 + 20 + 14;
+       dev->needed_tailroom    = 2; /* FCS */
+       dev->mtu                = 1281;
+       dev->tx_queue_len       = 0;
+       dev->flags              = IFF_BROADCAST | IFF_MULTICAST;
+       dev->watchdog_timeo     = 0;
+
+       dev->netdev_ops         = &lowpan_netdev_ops;
+       dev->header_ops         = &lowpan_header_ops;
+       dev->ml_priv            = &lowpan_mlme;
+       dev->destructor         = free_netdev;
+}
+
+static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
+       struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct ieee802154_hdr hdr;
+       int ret;
+
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               goto drop;
+
+       if (!netif_running(dev))
+               goto drop_skb;
+
+       if (dev->type != ARPHRD_IEEE802154)
+               goto drop_skb;
+
+       if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+               goto drop_skb;
+
+       /* check that it's our buffer */
+       if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
+               skb->protocol = htons(ETH_P_IPV6);
+               skb->pkt_type = PACKET_HOST;
+
+               /* Pull off the 1-byte of 6lowpan header. */
+               skb_pull(skb, 1);
+
+               ret = lowpan_give_skb_to_devices(skb, NULL);
+               if (ret == NET_RX_DROP)
+                       goto drop;
+       } else {
+               switch (skb->data[0] & 0xe0) {
+               case LOWPAN_DISPATCH_IPHC:      /* ipv6 datagram */
+                       ret = process_data(skb, &hdr);
+                       if (ret == NET_RX_DROP)
+                               goto drop;
+                       break;
+               case LOWPAN_DISPATCH_FRAG1:     /* first fragment header */
+                       ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
+                       if (ret == 1) {
+                               ret = process_data(skb, &hdr);
+                               if (ret == NET_RX_DROP)
+                                       goto drop;
+                       }
+                       break;
+               case LOWPAN_DISPATCH_FRAGN:     /* next fragments headers */
+                       ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
+                       if (ret == 1) {
+                               ret = process_data(skb, &hdr);
+                               if (ret == NET_RX_DROP)
+                                       goto drop;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return NET_RX_SUCCESS;
+drop_skb:
+       kfree_skb(skb);
+drop:
+       return NET_RX_DROP;
+}
+
+static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_device *real_dev;
+       struct lowpan_dev_record *entry;
+
+       pr_debug("adding new link\n");
+
+       if (!tb[IFLA_LINK])
+               return -EINVAL;
+       /* find and hold real wpan device */
+       real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+       if (!real_dev)
+               return -ENODEV;
+       if (real_dev->type != ARPHRD_IEEE802154) {
+               dev_put(real_dev);
+               return -EINVAL;
+       }
+
+       lowpan_dev_info(dev)->real_dev = real_dev;
+       mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               dev_put(real_dev);
+               lowpan_dev_info(dev)->real_dev = NULL;
+               return -ENOMEM;
+       }
+
+       entry->ldev = dev;
+
+       /* Set the lowpan harware address to the wpan hardware address. */
+       memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
+       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+       INIT_LIST_HEAD(&entry->list);
+       list_add_tail(&entry->list, &lowpan_devices);
+       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       register_netdevice(dev);
+
+       return 0;
+}
+
+static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
+       struct net_device *real_dev = lowpan_dev->real_dev;
+       struct lowpan_dev_record *entry, *tmp;
+
+       ASSERT_RTNL();
+
+       mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+       list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+               if (entry->ldev == dev) {
+                       list_del(&entry->list);
+                       kfree(entry);
+               }
+       }
+       mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
+
+       unregister_netdevice_queue(dev, head);
+
+       dev_put(real_dev);
+}
+
+static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
+       .kind           = "lowpan",
+       .priv_size      = sizeof(struct lowpan_dev_info),
+       .setup          = lowpan_setup,
+       .newlink        = lowpan_newlink,
+       .dellink        = lowpan_dellink,
+       .validate       = lowpan_validate,
+};
+
+static inline int __init lowpan_netlink_init(void)
+{
+       return rtnl_link_register(&lowpan_link_ops);
+}
+
+static inline void lowpan_netlink_fini(void)
+{
+       rtnl_link_unregister(&lowpan_link_ops);
+}
+
+static int lowpan_device_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       LIST_HEAD(del_list);
+       struct lowpan_dev_record *entry, *tmp;
+
+       if (dev->type != ARPHRD_IEEE802154)
+               goto out;
+
+       if (event == NETDEV_UNREGISTER) {
+               list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+                       if (lowpan_dev_info(entry->ldev)->real_dev == dev)
+                               lowpan_dellink(entry->ldev, &del_list);
+               }
+
+               unregister_netdevice_many(&del_list);
+       }
+
+out:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block lowpan_dev_notifier = {
+       .notifier_call = lowpan_device_event,
+};
+
+static struct packet_type lowpan_packet_type = {
+       .type = htons(ETH_P_IEEE802154),
+       .func = lowpan_rcv,
+};
+
+static int __init lowpan_init_module(void)
+{
+       int err = 0;
+
+       err = lowpan_net_frag_init();
+       if (err < 0)
+               goto out;
+
+       err = lowpan_netlink_init();
+       if (err < 0)
+               goto out_frag;
+
+       dev_add_pack(&lowpan_packet_type);
+
+       err = register_netdevice_notifier(&lowpan_dev_notifier);
+       if (err < 0)
+               goto out_pack;
+
+       return 0;
+
+out_pack:
+       dev_remove_pack(&lowpan_packet_type);
+       lowpan_netlink_fini();
+out_frag:
+       lowpan_net_frag_exit();
+out:
+       return err;
+}
+
+static void __exit lowpan_cleanup_module(void)
+{
+       lowpan_netlink_fini();
+
+       dev_remove_pack(&lowpan_packet_type);
+
+       lowpan_net_frag_exit();
+
+       unregister_netdevice_notifier(&lowpan_dev_notifier);
+}
+
+module_init(lowpan_init_module);
+module_exit(lowpan_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("lowpan");
index 9c9879d5ea64e7e3e6fe04e1de3675d3ed3c256c..8af1330b3137b0282d316190770d77150f336d56 100644 (file)
@@ -15,7 +15,7 @@ config IEEE802154_6LOWPAN
        depends on IEEE802154 && IPV6
        select 6LOWPAN_IPHC
        ---help---
-       IPv6 compression over IEEE 802.15.4.
+         IPv6 compression over IEEE 802.15.4.
 
 config 6LOWPAN_IPHC
        tristate
index e8f05885ced6806f1139a053ee8ea4fa1b228823..bf1b51497a41048442640cf02d8de1f62c18ef6a 100644 (file)
@@ -2,5 +2,9 @@ obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
 obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
 obj-$(CONFIG_6LOWPAN_IPHC) += 6lowpan_iphc.o
 
-ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
+6lowpan-y := 6lowpan_rtnl.o reassembly.o
+ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o \
+                header_ops.o
 af_802154-y := af_ieee802154.o raw.o dgram.o
+
+ccflags-y += -D__CHECK_ENDIAN__
index b1ec5253752217f0f411383b9a61ad710ab74816..8330a09bfc95e6877351029cff36b542af11c66f 100644 (file)
 #define AF802154_H
 
 struct sk_buff;
-struct net_devce;
+struct net_device;
+struct ieee802154_addr;
 extern struct proto ieee802154_raw_prot;
 extern struct proto ieee802154_dgram_prot;
 void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb);
 int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb);
 struct net_device *ieee802154_get_dev(struct net *net,
-               struct ieee802154_addr *addr);
+                                     const struct ieee802154_addr *addr);
 
 #endif
index 40e606f3788f11f5dcdeb59420ff18a4ec24eab1..351d9a94ec2faa429612d6b7da26b09b63e49a25 100644 (file)
 /*
  * Utility function for families
  */
-struct net_device *ieee802154_get_dev(struct net *net,
-               struct ieee802154_addr *addr)
+struct net_device*
+ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
 {
        struct net_device *dev = NULL;
        struct net_device *tmp;
-       u16 pan_id, short_addr;
+       __le16 pan_id, short_addr;
+       u8 hwaddr[IEEE802154_ADDR_LEN];
 
-       switch (addr->addr_type) {
+       switch (addr->mode) {
        case IEEE802154_ADDR_LONG:
+               ieee802154_devaddr_to_raw(hwaddr, addr->extended_addr);
                rcu_read_lock();
-               dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, addr->hwaddr);
+               dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, hwaddr);
                if (dev)
                        dev_hold(dev);
                rcu_read_unlock();
                break;
        case IEEE802154_ADDR_SHORT:
-               if (addr->pan_id == 0xffff ||
-                   addr->short_addr == IEEE802154_ADDR_UNDEF ||
-                   addr->short_addr == 0xffff)
+               if (addr->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST) ||
+                   addr->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
+                   addr->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST))
                        break;
 
                rtnl_lock();
@@ -86,7 +88,7 @@ struct net_device *ieee802154_get_dev(struct net *net,
                break;
        default:
                pr_warning("Unsupported ieee802154 address type: %d\n",
-                               addr->addr_type);
+                               addr->mode);
                break;
        }
 
@@ -326,7 +328,7 @@ drop:
 
 
 static struct packet_type ieee802154_packet_type = {
-       .type = __constant_htons(ETH_P_IEEE802154),
+       .type = htons(ETH_P_IEEE802154),
        .func = ieee802154_rcv,
 };
 
index 1846c1fe0d06a1f788c04b977b11f8eb70f84ce8..786437bc0c08531785d3f5fa1deb5bff8efe9e62 100644 (file)
@@ -73,10 +73,10 @@ static int dgram_init(struct sock *sk)
 {
        struct dgram_sock *ro = dgram_sk(sk);
 
-       ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
-       ro->dst_addr.pan_id = 0xffff;
+       ro->dst_addr.mode = IEEE802154_ADDR_LONG;
+       ro->dst_addr.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
        ro->want_ack = 1;
-       memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
+       memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
        return 0;
 }
 
@@ -88,6 +88,7 @@ static void dgram_close(struct sock *sk, long timeout)
 static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
 {
        struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
+       struct ieee802154_addr haddr;
        struct dgram_sock *ro = dgram_sk(sk);
        int err = -EINVAL;
        struct net_device *dev;
@@ -102,7 +103,8 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
        if (addr->family != AF_IEEE802154)
                goto out;
 
-       dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
+       ieee802154_addr_from_sa(&haddr, &addr->addr);
+       dev = ieee802154_get_dev(sock_net(sk), &haddr);
        if (!dev) {
                err = -ENODEV;
                goto out;
@@ -113,7 +115,7 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
                goto out_put;
        }
 
-       memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr));
+       ro->src_addr = haddr;
 
        ro->bound = 1;
        err = 0;
@@ -149,8 +151,7 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
                         * of this packet since that is all
                         * that will be read.
                         */
-                       /* FIXME: parse the header for more correct value */
-                       amount = skb->len - (3+8+8);
+                       amount = skb->len - ieee802154_hdr_length(skb);
                }
                spin_unlock_bh(&sk->sk_receive_queue.lock);
                return put_user(amount, (int __user *)arg);
@@ -181,7 +182,7 @@ static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
                goto out;
        }
 
-       memcpy(&ro->dst_addr, &addr->addr, sizeof(struct ieee802154_addr));
+       ieee802154_addr_from_sa(&ro->dst_addr, &addr->addr);
 
 out:
        release_sock(sk);
@@ -194,8 +195,8 @@ static int dgram_disconnect(struct sock *sk, int flags)
 
        lock_sock(sk);
 
-       ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
-       memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
+       ro->dst_addr.mode = IEEE802154_ADDR_LONG;
+       memset(&ro->dst_addr.extended_addr, 0xff, IEEE802154_ADDR_LEN);
 
        release_sock(sk);
 
@@ -232,7 +233,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
 
        if (size > mtu) {
                pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EINVAL;
+               err = -EMSGSIZE;
                goto out_dev;
        }
 
@@ -312,7 +313,7 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
 
        if (saddr) {
                saddr->family = AF_IEEE802154;
-               saddr->addr = mac_cb(skb)->sa;
+               ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
                *addr_len = sizeof(*saddr);
        }
 
@@ -328,6 +329,10 @@ out:
 
 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               return NET_RX_DROP;
+
        if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
@@ -336,40 +341,43 @@ static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
        return NET_RX_SUCCESS;
 }
 
-static inline int ieee802154_match_sock(u8 *hw_addr, u16 pan_id,
-               u16 short_addr, struct dgram_sock *ro)
+static inline bool
+ieee802154_match_sock(__le64 hw_addr, __le16 pan_id, __le16 short_addr,
+                     struct dgram_sock *ro)
 {
        if (!ro->bound)
-               return 1;
+               return true;
 
-       if (ro->src_addr.addr_type == IEEE802154_ADDR_LONG &&
-           !memcmp(ro->src_addr.hwaddr, hw_addr, IEEE802154_ADDR_LEN))
-               return 1;
+       if (ro->src_addr.mode == IEEE802154_ADDR_LONG &&
+           hw_addr == ro->src_addr.extended_addr)
+               return true;
 
-       if (ro->src_addr.addr_type == IEEE802154_ADDR_SHORT &&
-                    pan_id == ro->src_addr.pan_id &&
-                    short_addr == ro->src_addr.short_addr)
-               return 1;
+       if (ro->src_addr.mode == IEEE802154_ADDR_SHORT &&
+           pan_id == ro->src_addr.pan_id &&
+           short_addr == ro->src_addr.short_addr)
+               return true;
 
-       return 0;
+       return false;
 }
 
 int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
 {
        struct sock *sk, *prev = NULL;
        int ret = NET_RX_SUCCESS;
-       u16 pan_id, short_addr;
+       __le16 pan_id, short_addr;
+       __le64 hw_addr;
 
        /* Data frame processing */
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
        pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
        short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
+       hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
 
        read_lock(&dgram_lock);
        sk_for_each(sk, &dgram_head) {
-               if (ieee802154_match_sock(dev->dev_addr, pan_id, short_addr,
-                                       dgram_sk(sk))) {
+               if (ieee802154_match_sock(hw_addr, pan_id, short_addr,
+                                         dgram_sk(sk))) {
                        if (prev) {
                                struct sk_buff *clone;
                                clone = skb_clone(skb, GFP_ATOMIC);
diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c
new file mode 100644 (file)
index 0000000..bed42a4
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2014 Fraunhofer ITWM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Phoebe Buckheister <phoebe.buckheister@itwm.fraunhofer.de>
+ */
+
+#include <net/mac802154.h>
+#include <net/ieee802154.h>
+#include <net/ieee802154_netdev.h>
+
+static int
+ieee802154_hdr_push_addr(u8 *buf, const struct ieee802154_addr *addr,
+                        bool omit_pan)
+{
+       int pos = 0;
+
+       if (addr->mode == IEEE802154_ADDR_NONE)
+               return 0;
+
+       if (!omit_pan) {
+               memcpy(buf + pos, &addr->pan_id, 2);
+               pos += 2;
+       }
+
+       switch (addr->mode) {
+       case IEEE802154_ADDR_SHORT:
+               memcpy(buf + pos, &addr->short_addr, 2);
+               pos += 2;
+               break;
+
+       case IEEE802154_ADDR_LONG:
+               memcpy(buf + pos, &addr->extended_addr, IEEE802154_ADDR_LEN);
+               pos += IEEE802154_ADDR_LEN;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return pos;
+}
+
+static int
+ieee802154_hdr_push_sechdr(u8 *buf, const struct ieee802154_sechdr *hdr)
+{
+       int pos = 5;
+
+       memcpy(buf, hdr, 1);
+       memcpy(buf + 1, &hdr->frame_counter, 4);
+
+       switch (hdr->key_id_mode) {
+       case IEEE802154_SCF_KEY_IMPLICIT:
+               return pos;
+
+       case IEEE802154_SCF_KEY_INDEX:
+               break;
+
+       case IEEE802154_SCF_KEY_SHORT_INDEX:
+               memcpy(buf + pos, &hdr->short_src, 4);
+               pos += 4;
+               break;
+
+       case IEEE802154_SCF_KEY_HW_INDEX:
+               memcpy(buf + pos, &hdr->extended_src, IEEE802154_ADDR_LEN);
+               pos += IEEE802154_ADDR_LEN;
+               break;
+       }
+
+       buf[pos++] = hdr->key_id;
+
+       return pos;
+}
+
+int
+ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+{
+       u8 buf[MAC802154_FRAME_HARD_HEADER_LEN];
+       int pos = 2;
+       int rc;
+       struct ieee802154_hdr_fc fc = hdr->fc;
+
+       buf[pos++] = hdr->seq;
+
+       fc.dest_addr_mode = hdr->dest.mode;
+
+       rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false);
+       if (rc < 0)
+               return -EINVAL;
+       pos += rc;
+
+       fc.source_addr_mode = hdr->source.mode;
+
+       if (hdr->source.pan_id == hdr->dest.pan_id &&
+           hdr->dest.mode != IEEE802154_ADDR_NONE)
+               fc.intra_pan = true;
+
+       rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc.intra_pan);
+       if (rc < 0)
+               return -EINVAL;
+       pos += rc;
+
+       if (fc.security_enabled) {
+               fc.version = 1;
+
+               rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec);
+               if (rc < 0)
+                       return -EINVAL;
+
+               pos += rc;
+       }
+
+       memcpy(buf, &fc, 2);
+
+       memcpy(skb_push(skb, pos), buf, pos);
+
+       return pos;
+}
+EXPORT_SYMBOL_GPL(ieee802154_hdr_push);
+
+static int
+ieee802154_hdr_get_addr(const u8 *buf, int mode, bool omit_pan,
+                       struct ieee802154_addr *addr)
+{
+       int pos = 0;
+
+       addr->mode = mode;
+
+       if (mode == IEEE802154_ADDR_NONE)
+               return 0;
+
+       if (!omit_pan) {
+               memcpy(&addr->pan_id, buf + pos, 2);
+               pos += 2;
+       }
+
+       if (mode == IEEE802154_ADDR_SHORT) {
+               memcpy(&addr->short_addr, buf + pos, 2);
+               return pos + 2;
+       } else {
+               memcpy(&addr->extended_addr, buf + pos, IEEE802154_ADDR_LEN);
+               return pos + IEEE802154_ADDR_LEN;
+       }
+}
+
+static int ieee802154_hdr_addr_len(int mode, bool omit_pan)
+{
+       int pan_len = omit_pan ? 0 : 2;
+
+       switch (mode) {
+       case IEEE802154_ADDR_NONE: return 0;
+       case IEEE802154_ADDR_SHORT: return 2 + pan_len;
+       case IEEE802154_ADDR_LONG: return IEEE802154_ADDR_LEN + pan_len;
+       default: return -EINVAL;
+       }
+}
+
+static int
+ieee802154_hdr_get_sechdr(const u8 *buf, struct ieee802154_sechdr *hdr)
+{
+       int pos = 5;
+
+       memcpy(hdr, buf, 1);
+       memcpy(&hdr->frame_counter, buf + 1, 4);
+
+       switch (hdr->key_id_mode) {
+       case IEEE802154_SCF_KEY_IMPLICIT:
+               return pos;
+
+       case IEEE802154_SCF_KEY_INDEX:
+               break;
+
+       case IEEE802154_SCF_KEY_SHORT_INDEX:
+               memcpy(&hdr->short_src, buf + pos, 4);
+               pos += 4;
+               break;
+
+       case IEEE802154_SCF_KEY_HW_INDEX:
+               memcpy(&hdr->extended_src, buf + pos, IEEE802154_ADDR_LEN);
+               pos += IEEE802154_ADDR_LEN;
+               break;
+       }
+
+       hdr->key_id = buf[pos++];
+
+       return pos;
+}
+
+static int ieee802154_hdr_sechdr_len(u8 sc)
+{
+       switch (IEEE802154_SCF_KEY_ID_MODE(sc)) {
+       case IEEE802154_SCF_KEY_IMPLICIT: return 5;
+       case IEEE802154_SCF_KEY_INDEX: return 6;
+       case IEEE802154_SCF_KEY_SHORT_INDEX: return 10;
+       case IEEE802154_SCF_KEY_HW_INDEX: return 14;
+       default: return -EINVAL;
+       }
+}
+
+static int ieee802154_hdr_minlen(const struct ieee802154_hdr *hdr)
+{
+       int dlen, slen;
+
+       dlen = ieee802154_hdr_addr_len(hdr->fc.dest_addr_mode, false);
+       slen = ieee802154_hdr_addr_len(hdr->fc.source_addr_mode,
+                                      hdr->fc.intra_pan);
+
+       if (slen < 0 || dlen < 0)
+               return -EINVAL;
+
+       return 3 + dlen + slen + hdr->fc.security_enabled;
+}
+
+static int
+ieee802154_hdr_get_addrs(const u8 *buf, struct ieee802154_hdr *hdr)
+{
+       int pos = 0;
+
+       pos += ieee802154_hdr_get_addr(buf + pos, hdr->fc.dest_addr_mode,
+                                      false, &hdr->dest);
+       pos += ieee802154_hdr_get_addr(buf + pos, hdr->fc.source_addr_mode,
+                                      hdr->fc.intra_pan, &hdr->source);
+
+       if (hdr->fc.intra_pan)
+               hdr->source.pan_id = hdr->dest.pan_id;
+
+       return pos;
+}
+
+int
+ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr)
+{
+       int pos = 3, rc;
+
+       if (!pskb_may_pull(skb, 3))
+               return -EINVAL;
+
+       memcpy(hdr, skb->data, 3);
+
+       rc = ieee802154_hdr_minlen(hdr);
+       if (rc < 0 || !pskb_may_pull(skb, rc))
+               return -EINVAL;
+
+       pos += ieee802154_hdr_get_addrs(skb->data + pos, hdr);
+
+       if (hdr->fc.security_enabled) {
+               int want = pos + ieee802154_hdr_sechdr_len(skb->data[pos]);
+
+               if (!pskb_may_pull(skb, want))
+                       return -EINVAL;
+
+               pos += ieee802154_hdr_get_sechdr(skb->data + pos, &hdr->sec);
+       }
+
+       skb_pull(skb, pos);
+       return pos;
+}
+EXPORT_SYMBOL_GPL(ieee802154_hdr_pull);
+
+int
+ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr)
+{
+       const u8 *buf = skb_mac_header(skb);
+       int pos = 3, rc;
+
+       if (buf + 3 > skb_tail_pointer(skb))
+               return -EINVAL;
+
+       memcpy(hdr, buf, 3);
+
+       rc = ieee802154_hdr_minlen(hdr);
+       if (rc < 0 || buf + rc > skb_tail_pointer(skb))
+               return -EINVAL;
+
+       pos += ieee802154_hdr_get_addrs(buf + pos, hdr);
+       return pos;
+}
+EXPORT_SYMBOL_GPL(ieee802154_hdr_peek_addrs);
index cee4425b995689702375435f0be7dd85eca3ed73..6cbc8965be9181ce27314973532968dd7caab181 100644 (file)
@@ -53,6 +53,7 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
 int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
 int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
 int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_set_phyparams(struct sk_buff *skb, struct genl_info *info);
 
 enum ieee802154_mcgrp_ids {
        IEEE802154_COORD_MCGRP,
index 43f1b2bf469f40938a431582b305c45587049a04..67c151bf4b91b610835ffc60185428a9a84be8d8 100644 (file)
@@ -115,6 +115,7 @@ static const struct genl_ops ieee8021154_ops[] = {
                        ieee802154_dump_phy),
        IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
        IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
+       IEEE802154_OP(IEEE802154_SET_PHYPARAMS, ieee802154_set_phyparams),
        /* see nl-mac.c */
        IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
        IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
index ba5c1e002f37b2630c53a0284736f5d177bded76..bda8dba4f993b0ce981888627cd3099dc2f13642 100644 (file)
 
 #include "ieee802154.h"
 
+static int nla_put_hwaddr(struct sk_buff *msg, int type, __le64 hwaddr)
+{
+       return nla_put_u64(msg, type, swab64((__force u64)hwaddr));
+}
+
+static __le64 nla_get_hwaddr(const struct nlattr *nla)
+{
+       return ieee802154_devaddr_from_raw(nla_data(nla));
+}
+
+static int nla_put_shortaddr(struct sk_buff *msg, int type, __le16 addr)
+{
+       return nla_put_u16(msg, type, le16_to_cpu(addr));
+}
+
+static __le16 nla_get_shortaddr(const struct nlattr *nla)
+{
+       return cpu_to_le16(nla_get_u16(nla));
+}
+
 int ieee802154_nl_assoc_indic(struct net_device *dev,
                struct ieee802154_addr *addr, u8 cap)
 {
@@ -46,7 +66,7 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
 
        pr_debug("%s\n", __func__);
 
-       if (addr->addr_type != IEEE802154_ADDR_LONG) {
+       if (addr->mode != IEEE802154_ADDR_LONG) {
                pr_err("%s: received non-long source address!\n", __func__);
                return -EINVAL;
        }
@@ -59,8 +79,8 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
            nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
            nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
                    dev->dev_addr) ||
-           nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-                   addr->hwaddr) ||
+           nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR,
+                          addr->extended_addr) ||
            nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
                goto nla_put_failure;
 
@@ -72,7 +92,7 @@ nla_put_failure:
 }
 EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
 
-int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
+int ieee802154_nl_assoc_confirm(struct net_device *dev, __le16 short_addr,
                u8 status)
 {
        struct sk_buff *msg;
@@ -87,7 +107,7 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
            nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
            nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
                    dev->dev_addr) ||
-           nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
+           nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
            nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
                goto nla_put_failure;
        return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
@@ -114,13 +134,13 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev,
            nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
                    dev->dev_addr))
                goto nla_put_failure;
-       if (addr->addr_type == IEEE802154_ADDR_LONG) {
-               if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
-                           addr->hwaddr))
+       if (addr->mode == IEEE802154_ADDR_LONG) {
+               if (nla_put_hwaddr(msg, IEEE802154_ATTR_SRC_HW_ADDR,
+                                  addr->extended_addr))
                        goto nla_put_failure;
        } else {
-               if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
-                               addr->short_addr))
+               if (nla_put_shortaddr(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
+                                     addr->short_addr))
                        goto nla_put_failure;
        }
        if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
@@ -157,8 +177,8 @@ nla_put_failure:
 }
 EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm);
 
-int ieee802154_nl_beacon_indic(struct net_device *dev,
-               u16 panid, u16 coord_addr)
+int ieee802154_nl_beacon_indic(struct net_device *dev, __le16 panid,
+                              __le16 coord_addr)
 {
        struct sk_buff *msg;
 
@@ -172,8 +192,9 @@ int ieee802154_nl_beacon_indic(struct net_device *dev,
            nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
            nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
                    dev->dev_addr) ||
-           nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
-           nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
+           nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_SHORT_ADDR,
+                             coord_addr) ||
+           nla_put_shortaddr(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
                goto nla_put_failure;
        return ieee802154_nl_mcast(msg, IEEE802154_COORD_MCGRP);
 
@@ -243,6 +264,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
 {
        void *hdr;
        struct wpan_phy *phy;
+       __le16 short_addr, pan_id;
 
        pr_debug("%s\n", __func__);
 
@@ -254,15 +276,16 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        phy = ieee802154_mlme_ops(dev)->get_phy(dev);
        BUG_ON(!phy);
 
+       short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
+       pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+
        if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
            nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
            nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
            nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
                    dev->dev_addr) ||
-           nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR,
-                       ieee802154_mlme_ops(dev)->get_short_addr(dev)) ||
-           nla_put_u16(msg, IEEE802154_ATTR_PAN_ID,
-                       ieee802154_mlme_ops(dev)->get_pan_id(dev)))
+           nla_put_shortaddr(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
+           nla_put_shortaddr(msg, IEEE802154_ATTR_PAN_ID, pan_id))
                goto nla_put_failure;
        wpan_phy_put(phy);
        return genlmsg_end(msg, hdr);
@@ -322,16 +345,16 @@ int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info)
                goto out;
 
        if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) {
-               addr.addr_type = IEEE802154_ADDR_LONG;
-               nla_memcpy(addr.hwaddr,
-                               info->attrs[IEEE802154_ATTR_COORD_HW_ADDR],
-                               IEEE802154_ADDR_LEN);
+               addr.mode = IEEE802154_ADDR_LONG;
+               addr.extended_addr = nla_get_hwaddr(
+                               info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]);
        } else {
-               addr.addr_type = IEEE802154_ADDR_SHORT;
-               addr.short_addr = nla_get_u16(
+               addr.mode = IEEE802154_ADDR_SHORT;
+               addr.short_addr = nla_get_shortaddr(
                                info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
        }
-       addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
+       addr.pan_id = nla_get_shortaddr(
+                       info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
 
        if (info->attrs[IEEE802154_ATTR_PAGE])
                page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
@@ -365,14 +388,13 @@ int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
        if (!ieee802154_mlme_ops(dev)->assoc_resp)
                goto out;
 
-       addr.addr_type = IEEE802154_ADDR_LONG;
-       nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
-                       IEEE802154_ADDR_LEN);
+       addr.mode = IEEE802154_ADDR_LONG;
+       addr.extended_addr = nla_get_hwaddr(
+                       info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
        addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
 
-
        ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
-               nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
+               nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
                nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS]));
 
 out:
@@ -398,13 +420,12 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
                goto out;
 
        if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) {
-               addr.addr_type = IEEE802154_ADDR_LONG;
-               nla_memcpy(addr.hwaddr,
-                               info->attrs[IEEE802154_ATTR_DEST_HW_ADDR],
-                               IEEE802154_ADDR_LEN);
+               addr.mode = IEEE802154_ADDR_LONG;
+               addr.extended_addr = nla_get_hwaddr(
+                               info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
        } else {
-               addr.addr_type = IEEE802154_ADDR_SHORT;
-               addr.short_addr = nla_get_u16(
+               addr.mode = IEEE802154_ADDR_SHORT;
+               addr.short_addr = nla_get_shortaddr(
                                info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
        }
        addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
@@ -449,10 +470,11 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
        if (!ieee802154_mlme_ops(dev)->start_req)
                goto out;
 
-       addr.addr_type = IEEE802154_ADDR_SHORT;
-       addr.short_addr = nla_get_u16(
+       addr.mode = IEEE802154_ADDR_SHORT;
+       addr.short_addr = nla_get_shortaddr(
                        info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]);
-       addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
+       addr.pan_id = nla_get_shortaddr(
+                       info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
 
        channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]);
        bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]);
@@ -467,7 +489,7 @@ int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
                page = 0;
 
 
-       if (addr.short_addr == IEEE802154_ADDR_BROADCAST) {
+       if (addr.short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
                ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
                dev_put(dev);
                return -EINVAL;
index 89b265aea151eaf7f301c28f391d064b7d22edfc..222310a07762237cee3f1d339348dc73ab8f6dda 100644 (file)
@@ -55,7 +55,15 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
        mutex_lock(&phy->pib_lock);
        if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
            nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
-           nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
+           nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel) ||
+           nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, phy->transmit_power) ||
+           nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, phy->lbt) ||
+           nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, phy->cca_mode) ||
+           nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, phy->cca_ed_level) ||
+           nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, phy->csma_retries) ||
+           nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, phy->min_be) ||
+           nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE, phy->max_be) ||
+           nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES, phy->frame_retries))
                goto nla_put_failure;
        for (i = 0; i < 32; i++) {
                if (phy->channels_supported[i])
@@ -354,3 +362,193 @@ out_dev:
 
        return rc;
 }
+
+static int phy_set_txpower(struct wpan_phy *phy, struct genl_info *info)
+{
+       int txpower = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+       int rc;
+
+       rc = phy->set_txpower(phy, txpower);
+       if (rc < 0)
+               return rc;
+
+       phy->transmit_power = txpower;
+
+       return 0;
+}
+
+static int phy_set_lbt(struct wpan_phy *phy, struct genl_info *info)
+{
+       u8 on = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
+       int rc;
+
+       rc = phy->set_lbt(phy, on);
+       if (rc < 0)
+               return rc;
+
+       phy->lbt = on;
+
+       return 0;
+}
+
+static int phy_set_cca_mode(struct wpan_phy *phy, struct genl_info *info)
+{
+       u8 mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
+       int rc;
+
+       if (mode > 3)
+               return -EINVAL;
+
+       rc = phy->set_cca_mode(phy, mode);
+       if (rc < 0)
+               return rc;
+
+       phy->cca_mode = mode;
+
+       return 0;
+}
+
+static int phy_set_cca_ed_level(struct wpan_phy *phy, struct genl_info *info)
+{
+       s32 level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+       int rc;
+
+       rc = phy->set_cca_ed_level(phy, level);
+       if (rc < 0)
+               return rc;
+
+       phy->cca_ed_level = level;
+
+       return 0;
+}
+
+static int phy_set_csma_params(struct wpan_phy *phy, struct genl_info *info)
+{
+       int rc;
+       u8 min_be = phy->min_be;
+       u8 max_be = phy->max_be;
+       u8 retries = phy->csma_retries;
+
+       if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
+               retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
+       if (info->attrs[IEEE802154_ATTR_CSMA_MIN_BE])
+               min_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MIN_BE]);
+       if (info->attrs[IEEE802154_ATTR_CSMA_MAX_BE])
+               max_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]);
+
+       if (retries > 5 || max_be < 3 || max_be > 8 || min_be > max_be)
+               return -EINVAL;
+
+       rc = phy->set_csma_params(phy, min_be, max_be, retries);
+       if (rc < 0)
+               return rc;
+
+       phy->min_be = min_be;
+       phy->max_be = max_be;
+       phy->csma_retries = retries;
+
+       return 0;
+}
+
+static int phy_set_frame_retries(struct wpan_phy *phy, struct genl_info *info)
+{
+       s8 retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]);
+       int rc;
+
+       if (retries < -1 || retries > 7)
+               return -EINVAL;
+
+       rc = phy->set_frame_retries(phy, retries);
+       if (rc < 0)
+               return rc;
+
+       phy->frame_retries = retries;
+
+       return 0;
+}
+
+int ieee802154_set_phyparams(struct sk_buff *skb, struct genl_info *info)
+{
+       struct wpan_phy *phy;
+       const char *name;
+       int rc = -ENOTSUPP;
+
+       pr_debug("%s\n", __func__);
+
+       if (!info->attrs[IEEE802154_ATTR_PHY_NAME] &&
+           !info->attrs[IEEE802154_ATTR_LBT_ENABLED] &&
+           !info->attrs[IEEE802154_ATTR_CCA_MODE] &&
+           !info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL] &&
+           !info->attrs[IEEE802154_ATTR_CSMA_RETRIES] &&
+           !info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] &&
+           !info->attrs[IEEE802154_ATTR_CSMA_MAX_BE] &&
+           !info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
+               return -EINVAL;
+
+       name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
+       if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
+               return -EINVAL; /* phy name should be null-terminated */
+
+       phy = wpan_phy_find(name);
+       if (!phy)
+               return -ENODEV;
+
+       if ((!phy->set_txpower && info->attrs[IEEE802154_ATTR_TXPOWER]) ||
+           (!phy->set_lbt && info->attrs[IEEE802154_ATTR_LBT_ENABLED]) ||
+           (!phy->set_cca_mode && info->attrs[IEEE802154_ATTR_CCA_MODE]) ||
+           (!phy->set_cca_ed_level &&
+            info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]))
+               goto out;
+
+       mutex_lock(&phy->pib_lock);
+
+       if (info->attrs[IEEE802154_ATTR_TXPOWER]) {
+               rc = phy_set_txpower(phy, info);
+               if (rc < 0)
+                       goto error;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_LBT_ENABLED]) {
+               rc = phy_set_lbt(phy, info);
+               if (rc < 0)
+                       goto error;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_CCA_MODE]) {
+               rc = phy_set_cca_mode(phy, info);
+               if (rc < 0)
+                       goto error;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) {
+               rc = phy_set_cca_ed_level(phy, info);
+               if (rc < 0)
+                       goto error;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES] ||
+           info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] ||
+           info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]) {
+               rc = phy_set_csma_params(phy, info);
+               if (rc < 0)
+                       goto error;
+       }
+
+       if (info->attrs[IEEE802154_ATTR_FRAME_RETRIES]) {
+               rc = phy_set_frame_retries(phy, info);
+               if (rc < 0)
+                       goto error;
+       }
+
+       mutex_unlock(&phy->pib_lock);
+
+       wpan_phy_put(phy);
+
+       return 0;
+
+error:
+       mutex_unlock(&phy->pib_lock);
+out:
+       wpan_phy_put(phy);
+       return rc;
+}
index 6adda4d46f95255d6540f639e383c9985a949138..fd7be5e45cefb99d37df9d173c9bf7c0ab1d7dc3 100644 (file)
@@ -52,5 +52,15 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
        [IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
        [IEEE802154_ATTR_ED_LIST] = { .len = 27 },
        [IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, },
+
+       [IEEE802154_ATTR_TXPOWER] = { .type = NLA_S8, },
+       [IEEE802154_ATTR_LBT_ENABLED] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_CCA_MODE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
+       [IEEE802154_ATTR_CSMA_RETRIES] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_CSMA_MIN_BE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
+
+       [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
 };
 
index 41f538b8e59c9d7912cdf51287d9afa7a116c646..74d54fae33d74a58ca2628f7547250906bea663e 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <net/sock.h>
 #include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
 
 #include "af802154.h"
 
@@ -55,21 +56,24 @@ static void raw_close(struct sock *sk, long timeout)
        sk_common_release(sk);
 }
 
-static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len)
+static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
 {
-       struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
+       struct ieee802154_addr addr;
+       struct sockaddr_ieee802154 *uaddr = (struct sockaddr_ieee802154 *)_uaddr;
        int err = 0;
        struct net_device *dev = NULL;
 
-       if (len < sizeof(*addr))
+       if (len < sizeof(*uaddr))
                return -EINVAL;
 
-       if (addr->family != AF_IEEE802154)
+       uaddr = (struct sockaddr_ieee802154 *)_uaddr;
+       if (uaddr->family != AF_IEEE802154)
                return -EINVAL;
 
        lock_sock(sk);
 
-       dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
+       ieee802154_addr_from_sa(&addr, &uaddr->addr);
+       dev = ieee802154_get_dev(sock_net(sk), &addr);
        if (!dev) {
                err = -ENODEV;
                goto out;
@@ -209,6 +213,10 @@ out:
 
 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
+       skb = skb_share_check(skb, GFP_ATOMIC);
+       if (!skb)
+               return NET_RX_DROP;
+
        if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
new file mode 100644 (file)
index 0000000..ef2d543
--- /dev/null
@@ -0,0 +1,571 @@
+/*     6LoWPAN fragment reassembly
+ *
+ *
+ *     Authors:
+ *     Alexander Aring         <aar@pengutronix.de>
+ *
+ *     Based on: net/ipv6/reassembly.c
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "6LoWPAN: " fmt
+
+#include <linux/net.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/6lowpan.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+#include "reassembly.h"
+
+struct lowpan_frag_info {
+       __be16 d_tag;
+       u16 d_size;
+       u8 d_offset;
+};
+
+struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
+{
+       return (struct lowpan_frag_info *)skb->cb;
+}
+
+static struct inet_frags lowpan_frags;
+
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
+                            struct sk_buff *prev, struct net_device *dev);
+
+static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
+                                    const struct ieee802154_addr *saddr,
+                                    const struct ieee802154_addr *daddr)
+{
+       u32 c;
+
+       net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
+       c = jhash_3words(ieee802154_addr_hash(saddr),
+                        ieee802154_addr_hash(daddr),
+                        (__force u32)(tag + (d_size << 16)),
+                        lowpan_frags.rnd);
+
+       return c & (INETFRAGS_HASHSZ - 1);
+}
+
+static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
+{
+       struct lowpan_frag_queue *fq;
+
+       fq = container_of(q, struct lowpan_frag_queue, q);
+       return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
+}
+
+static bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
+{
+       struct lowpan_frag_queue *fq;
+       struct lowpan_create_arg *arg = a;
+
+       fq = container_of(q, struct lowpan_frag_queue, q);
+       return  fq->tag == arg->tag && fq->d_size == arg->d_size &&
+               ieee802154_addr_equal(&fq->saddr, arg->src) &&
+               ieee802154_addr_equal(&fq->daddr, arg->dst);
+}
+
+static void lowpan_frag_init(struct inet_frag_queue *q, void *a)
+{
+       struct lowpan_frag_queue *fq;
+       struct lowpan_create_arg *arg = a;
+
+       fq = container_of(q, struct lowpan_frag_queue, q);
+
+       fq->tag = arg->tag;
+       fq->d_size = arg->d_size;
+       fq->saddr = *arg->src;
+       fq->daddr = *arg->dst;
+}
+
+static void lowpan_frag_expire(unsigned long data)
+{
+       struct frag_queue *fq;
+       struct net *net;
+
+       fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+       net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
+
+       spin_lock(&fq->q.lock);
+
+       if (fq->q.last_in & INET_FRAG_COMPLETE)
+               goto out;
+
+       inet_frag_kill(&fq->q, &lowpan_frags);
+out:
+       spin_unlock(&fq->q.lock);
+       inet_frag_put(&fq->q, &lowpan_frags);
+}
+
+static inline struct lowpan_frag_queue *
+fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
+       const struct ieee802154_addr *src,
+       const struct ieee802154_addr *dst)
+{
+       struct inet_frag_queue *q;
+       struct lowpan_create_arg arg;
+       unsigned int hash;
+
+       arg.tag = frag_info->d_tag;
+       arg.d_size = frag_info->d_size;
+       arg.src = src;
+       arg.dst = dst;
+
+       read_lock(&lowpan_frags.lock);
+       hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
+
+       q = inet_frag_find(&net->ieee802154_lowpan.frags,
+                          &lowpan_frags, &arg, hash);
+       if (IS_ERR_OR_NULL(q)) {
+               inet_frag_maybe_warn_overflow(q, pr_fmt());
+               return NULL;
+       }
+       return container_of(q, struct lowpan_frag_queue, q);
+}
+
+static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
+                            struct sk_buff *skb, const u8 frag_type)
+{
+       struct sk_buff *prev, *next;
+       struct net_device *dev;
+       int end, offset;
+
+       if (fq->q.last_in & INET_FRAG_COMPLETE)
+               goto err;
+
+       offset = lowpan_cb(skb)->d_offset << 3;
+       end = lowpan_cb(skb)->d_size;
+
+       /* Is this the final fragment? */
+       if (offset + skb->len == end) {
+               /* If we already have some bits beyond end
+                * or have different end, the segment is corrupted.
+                */
+               if (end < fq->q.len ||
+                   ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
+                       goto err;
+               fq->q.last_in |= INET_FRAG_LAST_IN;
+               fq->q.len = end;
+       } else {
+               if (end > fq->q.len) {
+                       /* Some bits beyond end -> corruption. */
+                       if (fq->q.last_in & INET_FRAG_LAST_IN)
+                               goto err;
+                       fq->q.len = end;
+               }
+       }
+
+       /* Find out which fragments are in front and at the back of us
+        * in the chain of fragments so far.  We must know where to put
+        * this fragment, right?
+        */
+       prev = fq->q.fragments_tail;
+       if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
+               next = NULL;
+               goto found;
+       }
+       prev = NULL;
+       for (next = fq->q.fragments; next != NULL; next = next->next) {
+               if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
+                       break;  /* bingo! */
+               prev = next;
+       }
+
+found:
+       /* Insert this fragment in the chain of fragments. */
+       skb->next = next;
+       if (!next)
+               fq->q.fragments_tail = skb;
+       if (prev)
+               prev->next = skb;
+       else
+               fq->q.fragments = skb;
+
+       dev = skb->dev;
+       if (dev)
+               skb->dev = NULL;
+
+       fq->q.stamp = skb->tstamp;
+       if (frag_type == LOWPAN_DISPATCH_FRAG1) {
+               /* Calculate uncomp. 6lowpan header to estimate full size */
+               fq->q.meat += lowpan_uncompress_size(skb, NULL);
+               fq->q.last_in |= INET_FRAG_FIRST_IN;
+       } else {
+               fq->q.meat += skb->len;
+       }
+       add_frag_mem_limit(&fq->q, skb->truesize);
+
+       if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+           fq->q.meat == fq->q.len) {
+               int res;
+               unsigned long orefdst = skb->_skb_refdst;
+
+               skb->_skb_refdst = 0UL;
+               res = lowpan_frag_reasm(fq, prev, dev);
+               skb->_skb_refdst = orefdst;
+               return res;
+       }
+
+       inet_frag_lru_move(&fq->q);
+       return -1;
+err:
+       kfree_skb(skb);
+       return -1;
+}
+
+/*     Check if this packet is complete.
+ *     Returns NULL on failure by any reason, and pointer
+ *     to current nexthdr field in reassembled frame.
+ *
+ *     It is called with locked fq, and caller must check that
+ *     queue is eligible for reassembly i.e. it is not COMPLETE,
+ *     the last and the first frames arrived and all the bits are here.
+ */
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
+                            struct net_device *dev)
+{
+       struct sk_buff *fp, *head = fq->q.fragments;
+       int sum_truesize;
+
+       inet_frag_kill(&fq->q, &lowpan_frags);
+
+       /* Make the one we just received the head. */
+       if (prev) {
+               head = prev->next;
+               fp = skb_clone(head, GFP_ATOMIC);
+
+               if (!fp)
+                       goto out_oom;
+
+               fp->next = head->next;
+               if (!fp->next)
+                       fq->q.fragments_tail = fp;
+               prev->next = fp;
+
+               skb_morph(head, fq->q.fragments);
+               head->next = fq->q.fragments->next;
+
+               consume_skb(fq->q.fragments);
+               fq->q.fragments = head;
+       }
+
+       /* Head of list must not be cloned. */
+       if (skb_unclone(head, GFP_ATOMIC))
+               goto out_oom;
+
+       /* If the first fragment is fragmented itself, we split
+        * it to two chunks: the first with data and paged part
+        * and the second, holding only fragments.
+        */
+       if (skb_has_frag_list(head)) {
+               struct sk_buff *clone;
+               int i, plen = 0;
+
+               clone = alloc_skb(0, GFP_ATOMIC);
+               if (!clone)
+                       goto out_oom;
+               clone->next = head->next;
+               head->next = clone;
+               skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+               skb_frag_list_init(head);
+               for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+                       plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+               clone->len = head->data_len - plen;
+               clone->data_len = clone->len;
+               head->data_len -= clone->len;
+               head->len -= clone->len;
+               add_frag_mem_limit(&fq->q, clone->truesize);
+       }
+
+       WARN_ON(head == NULL);
+
+       sum_truesize = head->truesize;
+       for (fp = head->next; fp;) {
+               bool headstolen;
+               int delta;
+               struct sk_buff *next = fp->next;
+
+               sum_truesize += fp->truesize;
+               if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+                       kfree_skb_partial(fp, headstolen);
+               } else {
+                       if (!skb_shinfo(head)->frag_list)
+                               skb_shinfo(head)->frag_list = fp;
+                       head->data_len += fp->len;
+                       head->len += fp->len;
+                       head->truesize += fp->truesize;
+               }
+               fp = next;
+       }
+       sub_frag_mem_limit(&fq->q, sum_truesize);
+
+       head->next = NULL;
+       head->dev = dev;
+       head->tstamp = fq->q.stamp;
+
+       fq->q.fragments = NULL;
+       fq->q.fragments_tail = NULL;
+
+       return 1;
+out_oom:
+       net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
+       return -1;
+}
+
+static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
+                               struct lowpan_frag_info *frag_info)
+{
+       bool fail;
+       u8 pattern = 0, low = 0;
+
+       fail = lowpan_fetch_skb(skb, &pattern, 1);
+       fail |= lowpan_fetch_skb(skb, &low, 1);
+       frag_info->d_size = (pattern & 7) << 8 | low;
+       fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
+
+       if (frag_type == LOWPAN_DISPATCH_FRAGN) {
+               fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
+       } else {
+               skb_reset_network_header(skb);
+               frag_info->d_offset = 0;
+       }
+
+       if (unlikely(fail))
+               return -EIO;
+
+       return 0;
+}
+
+int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
+{
+       struct lowpan_frag_queue *fq;
+       struct net *net = dev_net(skb->dev);
+       struct lowpan_frag_info *frag_info = lowpan_cb(skb);
+       struct ieee802154_addr source, dest;
+       int err;
+
+       source = mac_cb(skb)->source;
+       dest = mac_cb(skb)->dest;
+
+       err = lowpan_get_frag_info(skb, frag_type, frag_info);
+       if (err < 0)
+               goto err;
+
+       if (frag_info->d_size > net->ieee802154_lowpan.max_dsize)
+               goto err;
+
+       inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false);
+
+       fq = fq_find(net, frag_info, &source, &dest);
+       if (fq != NULL) {
+               int ret;
+               spin_lock(&fq->q.lock);
+               ret = lowpan_frag_queue(fq, skb, frag_type);
+               spin_unlock(&fq->q.lock);
+
+               inet_frag_put(&fq->q, &lowpan_frags);
+               return ret;
+       }
+
+err:
+       kfree_skb(skb);
+       return -1;
+}
+EXPORT_SYMBOL(lowpan_frag_rcv);
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table lowpan_frags_ns_ctl_table[] = {
+       {
+               .procname       = "6lowpanfrag_high_thresh",
+               .data           = &init_net.ieee802154_lowpan.frags.high_thresh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "6lowpanfrag_low_thresh",
+               .data           = &init_net.ieee802154_lowpan.frags.low_thresh,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "6lowpanfrag_time",
+               .data           = &init_net.ieee802154_lowpan.frags.timeout,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       {
+               .procname       = "6lowpanfrag_max_datagram_size",
+               .data           = &init_net.ieee802154_lowpan.max_dsize,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       { }
+};
+
+static struct ctl_table lowpan_frags_ctl_table[] = {
+       {
+               .procname       = "6lowpanfrag_secret_interval",
+               .data           = &lowpan_frags.secret_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       { }
+};
+
+static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
+{
+       struct ctl_table *table;
+       struct ctl_table_header *hdr;
+
+       table = lowpan_frags_ns_ctl_table;
+       if (!net_eq(net, &init_net)) {
+               table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
+                               GFP_KERNEL);
+               if (table == NULL)
+                       goto err_alloc;
+
+               table[0].data = &net->ieee802154_lowpan.frags.high_thresh;
+               table[1].data = &net->ieee802154_lowpan.frags.low_thresh;
+               table[2].data = &net->ieee802154_lowpan.frags.timeout;
+               table[3].data = &net->ieee802154_lowpan.max_dsize;
+
+               /* Don't export sysctls to unprivileged users */
+               if (net->user_ns != &init_user_ns)
+                       table[0].procname = NULL;
+       }
+
+       hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
+       if (hdr == NULL)
+               goto err_reg;
+
+       net->ieee802154_lowpan.sysctl.frags_hdr = hdr;
+       return 0;
+
+err_reg:
+       if (!net_eq(net, &init_net))
+               kfree(table);
+err_alloc:
+       return -ENOMEM;
+}
+
+static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
+{
+       struct ctl_table *table;
+
+       table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr);
+       if (!net_eq(net, &init_net))
+               kfree(table);
+}
+
+static struct ctl_table_header *lowpan_ctl_header;
+
+static int lowpan_frags_sysctl_register(void)
+{
+       lowpan_ctl_header = register_net_sysctl(&init_net,
+                                               "net/ieee802154/6lowpan",
+                                               lowpan_frags_ctl_table);
+       return lowpan_ctl_header == NULL ? -ENOMEM : 0;
+}
+
+static void lowpan_frags_sysctl_unregister(void)
+{
+       unregister_net_sysctl_table(lowpan_ctl_header);
+}
+#else
+static inline int lowpan_frags_ns_sysctl_register(struct net *net)
+{
+       return 0;
+}
+
+static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
+{
+}
+
+static inline int lowpan_frags_sysctl_register(void)
+{
+       return 0;
+}
+
+static inline void lowpan_frags_sysctl_unregister(void)
+{
+}
+#endif
+
+static int __net_init lowpan_frags_init_net(struct net *net)
+{
+       net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+       net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+       net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
+       net->ieee802154_lowpan.max_dsize = 0xFFFF;
+
+       inet_frags_init_net(&net->ieee802154_lowpan.frags);
+
+       return lowpan_frags_ns_sysctl_register(net);
+}
+
+static void __net_exit lowpan_frags_exit_net(struct net *net)
+{
+       lowpan_frags_ns_sysctl_unregister(net);
+       inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags);
+}
+
+static struct pernet_operations lowpan_frags_ops = {
+       .init = lowpan_frags_init_net,
+       .exit = lowpan_frags_exit_net,
+};
+
+int __init lowpan_net_frag_init(void)
+{
+       int ret;
+
+       ret = lowpan_frags_sysctl_register();
+       if (ret)
+               return ret;
+
+       ret = register_pernet_subsys(&lowpan_frags_ops);
+       if (ret)
+               goto err_pernet;
+
+       lowpan_frags.hashfn = lowpan_hashfn;
+       lowpan_frags.constructor = lowpan_frag_init;
+       lowpan_frags.destructor = NULL;
+       lowpan_frags.skb_free = NULL;
+       lowpan_frags.qsize = sizeof(struct frag_queue);
+       lowpan_frags.match = lowpan_frag_match;
+       lowpan_frags.frag_expire = lowpan_frag_expire;
+       lowpan_frags.secret_interval = 10 * 60 * HZ;
+       inet_frags_init(&lowpan_frags);
+
+       return ret;
+err_pernet:
+       lowpan_frags_sysctl_unregister();
+       return ret;
+}
+
+void lowpan_net_frag_exit(void)
+{
+       inet_frags_fini(&lowpan_frags);
+       lowpan_frags_sysctl_unregister();
+       unregister_pernet_subsys(&lowpan_frags_ops);
+}
diff --git a/net/ieee802154/reassembly.h b/net/ieee802154/reassembly.h
new file mode 100644 (file)
index 0000000..74e4a7c
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef __IEEE802154_6LOWPAN_REASSEMBLY_H__
+#define __IEEE802154_6LOWPAN_REASSEMBLY_H__
+
+#include <net/inet_frag.h>
+
+struct lowpan_create_arg {
+       __be16 tag;
+       u16 d_size;
+       const struct ieee802154_addr *src;
+       const struct ieee802154_addr *dst;
+};
+
+/* Equivalent of ipv4 struct ip
+ */
+struct lowpan_frag_queue {
+       struct inet_frag_queue  q;
+
+       __be16                  tag;
+       u16                     d_size;
+       struct ieee802154_addr  saddr;
+       struct ieee802154_addr  daddr;
+};
+
+static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
+{
+       switch (a->mode) {
+       case IEEE802154_ADDR_LONG:
+               return (((__force u64)a->extended_addr) >> 32) ^
+                       (((__force u64)a->extended_addr) & 0xffffffff);
+       case IEEE802154_ADDR_SHORT:
+               return (__force u32)(a->short_addr);
+       default:
+               return 0;
+       }
+}
+
+int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
+void lowpan_net_frag_exit(void);
+int lowpan_net_frag_init(void);
+
+#endif /* __IEEE802154_6LOWPAN_REASSEMBLY_H__ */
index 4dd37615a749c6f1191d5ce35bec09ca9ac05e81..edd0962d55f9a14a18c8901ffa0fa32432c1fcf6 100644 (file)
@@ -44,9 +44,7 @@ static DEVICE_ATTR_RO(name);
 
 MASTER_SHOW(current_channel, "%d");
 MASTER_SHOW(current_page, "%d");
-MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
-       ((signed char) (phy->transmit_power << 2)) >> 2,
-       (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1);
+MASTER_SHOW(transmit_power, "%d +- 1 dB");
 MASTER_SHOW(cca_mode, "%d");
 
 static ssize_t channels_supported_show(struct device *dev,
@@ -171,6 +169,12 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
        phy->current_channel = -1; /* not initialised */
        phy->current_page = 0; /* for compatibility */
 
+       /* defaults per 802.15.4-2011 */
+       phy->min_be = 3;
+       phy->max_be = 5;
+       phy->csma_retries = 4;
+       phy->frame_retries = -1; /* for compatibility, actual default is 3 */
+
        return phy;
 
 out:
index f8c49ce5b2839f96b5147185cfbb0402bba23da8..f032688d20d308412694cbc7ef29567a86264b4b 100644 (file)
@@ -55,4 +55,4 @@ obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
-                     xfrm4_output.o
+                     xfrm4_output.o xfrm4_protocol.o
index 19ab78aca547fc7fb45e56607e7adafd0036d947..8c54870db792cab059bff464d29776510ec3e5ec 100644 (file)
@@ -1505,9 +1505,9 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
                bhptr = per_cpu_ptr(mib[0], cpu);
                syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
                do {
-                       start = u64_stats_fetch_begin_bh(syncp);
+                       start = u64_stats_fetch_begin_irq(syncp);
                        v = *(((u64 *) bhptr) + offt);
-               } while (u64_stats_fetch_retry_bh(syncp, start));
+               } while (u64_stats_fetch_retry_irq(syncp, start));
 
                res += v;
        }
index 717902669d2f2ef34714c215c4695ffe59ddd283..a2afa89513a06d43ecdac6033dfcf89c16ea8ffc 100644 (file)
@@ -155,6 +155,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
        struct iphdr *iph, *top_iph;
        struct ip_auth_hdr *ah;
        struct ah_data *ahp;
+       int seqhi_len = 0;
+       __be32 *seqhi;
+       int sglists = 0;
+       struct scatterlist *seqhisg;
 
        ahp = x->data;
        ahash = ahp->ahash;
@@ -167,14 +171,19 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
        ah = ip_auth_hdr(skb);
        ihl = ip_hdrlen(skb);
 
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists = 1;
+               seqhi_len = sizeof(*seqhi);
+       }
        err = -ENOMEM;
-       iph = ah_alloc_tmp(ahash, nfrags, ihl);
+       iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
        if (!iph)
                goto out;
-
-       icv = ah_tmp_icv(ahash, iph, ihl);
+       seqhi = (__be32 *)((char *)iph + ihl);
+       icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
        req = ah_tmp_req(ahash, icv);
        sg = ah_req_sg(ahash, req);
+       seqhisg = sg + nfrags;
 
        memset(ah->auth_data, 0, ahp->icv_trunc_len);
 
@@ -210,10 +219,15 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
        ah->spi = x->id.spi;
        ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
-       sg_init_table(sg, nfrags);
-       skb_to_sgvec(skb, sg, 0, skb->len);
+       sg_init_table(sg, nfrags + sglists);
+       skb_to_sgvec_nomark(skb, sg, 0, skb->len);
 
-       ahash_request_set_crypt(req, sg, icv, skb->len);
+       if (x->props.flags & XFRM_STATE_ESN) {
+               /* Attach seqhi sg right after packet payload */
+               *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+               sg_set_buf(seqhisg, seqhi, seqhi_len);
+       }
+       ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
        ahash_request_set_callback(req, 0, ah_output_done, skb);
 
        AH_SKB_CB(skb)->tmp = iph;
@@ -295,6 +309,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
        struct ip_auth_hdr *ah;
        struct ah_data *ahp;
        int err = -ENOMEM;
+       int seqhi_len = 0;
+       __be32 *seqhi;
+       int sglists = 0;
+       struct scatterlist *seqhisg;
 
        if (!pskb_may_pull(skb, sizeof(*ah)))
                goto out;
@@ -335,14 +353,22 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
        iph = ip_hdr(skb);
        ihl = ip_hdrlen(skb);
 
-       work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists = 1;
+               seqhi_len = sizeof(*seqhi);
+       }
+
+       work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
+                               ahp->icv_trunc_len + seqhi_len);
        if (!work_iph)
                goto out;
 
-       auth_data = ah_tmp_auth(work_iph, ihl);
+       seqhi = (__be32 *)((char *)work_iph + ihl);
+       auth_data = ah_tmp_auth(seqhi, seqhi_len);
        icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
        req = ah_tmp_req(ahash, icv);
        sg = ah_req_sg(ahash, req);
+       seqhisg = sg + nfrags;
 
        memcpy(work_iph, iph, ihl);
        memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
@@ -361,10 +387,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb_push(skb, ihl);
 
-       sg_init_table(sg, nfrags);
-       skb_to_sgvec(skb, sg, 0, skb->len);
+       sg_init_table(sg, nfrags + sglists);
+       skb_to_sgvec_nomark(skb, sg, 0, skb->len);
 
-       ahash_request_set_crypt(req, sg, icv, skb->len);
+       if (x->props.flags & XFRM_STATE_ESN) {
+               /* Attach seqhi sg right after packet payload */
+               *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+               sg_set_buf(seqhisg, seqhi, seqhi_len);
+       }
+       ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
        ahash_request_set_callback(req, 0, ah_input_done, skb);
 
        AH_SKB_CB(skb)->tmp = work_iph;
@@ -397,7 +428,7 @@ out:
        return err;
 }
 
-static void ah4_err(struct sk_buff *skb, u32 info)
+static int ah4_err(struct sk_buff *skb, u32 info)
 {
        struct net *net = dev_net(skb->dev);
        const struct iphdr *iph = (const struct iphdr *)skb->data;
@@ -407,23 +438,25 @@ static void ah4_err(struct sk_buff *skb, u32 info)
        switch (icmp_hdr(skb)->type) {
        case ICMP_DEST_UNREACH:
                if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
-                       return;
+                       return 0;
        case ICMP_REDIRECT:
                break;
        default:
-               return;
+               return 0;
        }
 
        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
                              ah->spi, IPPROTO_AH, AF_INET);
        if (!x)
-               return;
+               return 0;
 
        if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
        else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
        xfrm_state_put(x);
+
+       return 0;
 }
 
 static int ah_init_state(struct xfrm_state *x)
@@ -505,6 +538,10 @@ static void ah_destroy(struct xfrm_state *x)
        kfree(ahp);
 }
 
+static int ah4_rcv_cb(struct sk_buff *skb, int err)
+{
+       return 0;
+}
 
 static const struct xfrm_type ah_type =
 {
@@ -518,11 +555,12 @@ static const struct xfrm_type ah_type =
        .output         = ah_output
 };
 
-static const struct net_protocol ah4_protocol = {
+static struct xfrm4_protocol ah4_protocol = {
        .handler        =       xfrm4_rcv,
+       .input_handler  =       xfrm_input,
+       .cb_handler     =       ah4_rcv_cb,
        .err_handler    =       ah4_err,
-       .no_policy      =       1,
-       .netns_ok       =       1,
+       .priority       =       0,
 };
 
 static int __init ah4_init(void)
@@ -531,7 +569,7 @@ static int __init ah4_init(void)
                pr_info("%s: can't add xfrm type\n", __func__);
                return -EAGAIN;
        }
-       if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
+       if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                xfrm_unregister_type(&ah_type, AF_INET);
                return -EAGAIN;
@@ -541,7 +579,7 @@ static int __init ah4_init(void)
 
 static void __exit ah4_fini(void)
 {
-       if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
+       if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
                pr_info("%s: can't remove protocol\n", __func__);
        if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
                pr_info("%s: can't remove xfrm type\n", __func__);
index 7785b28061acfecd59e4ece70815edb3dcc58977..360b565918c4b524d561a010ed7ec5df02312cdb 100644 (file)
@@ -473,7 +473,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
                 net_adj) & ~(blksize - 1)) + net_adj - 2;
 }
 
-static void esp4_err(struct sk_buff *skb, u32 info)
+static int esp4_err(struct sk_buff *skb, u32 info)
 {
        struct net *net = dev_net(skb->dev);
        const struct iphdr *iph = (const struct iphdr *)skb->data;
@@ -483,23 +483,25 @@ static void esp4_err(struct sk_buff *skb, u32 info)
        switch (icmp_hdr(skb)->type) {
        case ICMP_DEST_UNREACH:
                if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
-                       return;
+                       return 0;
        case ICMP_REDIRECT:
                break;
        default:
-               return;
+               return 0;
        }
 
        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
                              esph->spi, IPPROTO_ESP, AF_INET);
        if (!x)
-               return;
+               return 0;
 
        if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
        else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
        xfrm_state_put(x);
+
+       return 0;
 }
 
 static void esp_destroy(struct xfrm_state *x)
@@ -672,6 +674,11 @@ error:
        return err;
 }
 
+static int esp4_rcv_cb(struct sk_buff *skb, int err)
+{
+       return 0;
+}
+
 static const struct xfrm_type esp_type =
 {
        .description    = "ESP4",
@@ -685,11 +692,12 @@ static const struct xfrm_type esp_type =
        .output         = esp_output
 };
 
-static const struct net_protocol esp4_protocol = {
+static struct xfrm4_protocol esp4_protocol = {
        .handler        =       xfrm4_rcv,
+       .input_handler  =       xfrm_input,
+       .cb_handler     =       esp4_rcv_cb,
        .err_handler    =       esp4_err,
-       .no_policy      =       1,
-       .netns_ok       =       1,
+       .priority       =       0,
 };
 
 static int __init esp4_init(void)
@@ -698,7 +706,7 @@ static int __init esp4_init(void)
                pr_info("%s: can't add xfrm type\n", __func__);
                return -EAGAIN;
        }
-       if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
+       if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                xfrm_unregister_type(&esp_type, AF_INET);
                return -EAGAIN;
@@ -708,7 +716,7 @@ static int __init esp4_init(void)
 
 static void __exit esp4_fini(void)
 {
-       if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
+       if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
                pr_info("%s: can't remove protocol\n", __func__);
        if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
                pr_info("%s: can't remove xfrm type\n", __func__);
index c7539e22868b37d59018730aca786176df5408ee..1a629f870274de3c1b70910ec5285f7baae139eb 100644 (file)
@@ -659,7 +659,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
 
        if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
            ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
-               return ip_rt_dump(skb, cb);
+               return skb->len;
 
        s_h = cb->args[0];
        s_e = cb->args[1];
index f3869c186d975e5a0f80f067fb461069f9ba2689..be8abe73bb9f464a2e68679255acde3b708ce84b 100644 (file)
@@ -127,6 +127,10 @@ int ip_forward(struct sk_buff *skb)
        struct rtable *rt;      /* Route we use */
        struct ip_options *opt  = &(IPCB(skb)->opt);
 
+       /* that should never happen */
+       if (skb->pkt_type != PACKET_HOST)
+               goto drop;
+
        if (skb_warn_if_lro(skb))
                goto drop;
 
@@ -136,9 +140,6 @@ int ip_forward(struct sk_buff *skb)
        if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
                return NET_RX_SUCCESS;
 
-       if (skb->pkt_type != PACKET_HOST)
-               goto drop;
-
        skb_forward_csum(skb);
 
        /*
index 73c6b63bba74e57b70589ff548fccf0db79cc9a4..1a0755fea4914c20f95d036f30638ea2677b764f 100644 (file)
@@ -446,7 +446,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
        __be16 not_last_frag;
        struct rtable *rt = skb_rtable(skb);
        int err = 0;
-       bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
 
        dev = rt->dst.dev;
 
@@ -456,7 +455,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
        iph = ip_hdr(skb);
 
-       mtu = ip_dst_mtu_maybe_forward(&rt->dst, forwarding);
+       mtu = ip_skb_dst_mtu(skb);
        if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
                     (IPCB(skb)->frag_max_size &&
                      IPCB(skb)->frag_max_size > mtu))) {
@@ -822,8 +821,7 @@ static int __ip_append_data(struct sock *sk,
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
-       maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
-                        mtu : 0xFFFF;
+       maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
 
        if (cork->length + length > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1146,8 +1144,7 @@ ssize_t   ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
 
        fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
        maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
-       maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
-                        mtu : 0xFFFF;
+       maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
 
        if (cork->length + size > maxnonfragsize - fragheaderlen) {
                ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1308,8 +1305,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
         * to fragment the frame generated here. No matter, what transforms
         * how transforms change size of the packet, it will come out.
         */
-       if (inet->pmtudisc < IP_PMTUDISC_DO)
-               skb->local_df = 1;
+       skb->local_df = ip_sk_local_df(sk);
 
        /* DF bit is set when we want to see DF on outgoing frames.
         * If local_df is set too, we still allow to fragment this frame
index 580dd96666e09b9c88a7e5f9b57f613ce7027a2d..64741b9386329ba6a76b00ea660d78983d2efb8a 100644 (file)
@@ -186,7 +186,8 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ip_cmsg_recv);
 
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
+int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+                bool allow_ipv6)
 {
        int err, val;
        struct cmsghdr *cmsg;
@@ -194,6 +195,22 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
        for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
                if (!CMSG_OK(msg, cmsg))
                        return -EINVAL;
+#if defined(CONFIG_IPV6)
+               if (allow_ipv6 &&
+                   cmsg->cmsg_level == SOL_IPV6 &&
+                   cmsg->cmsg_type == IPV6_PKTINFO) {
+                       struct in6_pktinfo *src_info;
+
+                       if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
+                               return -EINVAL;
+                       src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+                       if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
+                               return -EINVAL;
+                       ipc->oif = src_info->ipi6_ifindex;
+                       ipc->addr = src_info->ipi6_addr.s6_addr32[3];
+                       continue;
+               }
+#endif
                if (cmsg->cmsg_level != SOL_IP)
                        continue;
                switch (cmsg->cmsg_type) {
@@ -626,7 +643,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                inet->nodefrag = val ? 1 : 0;
                break;
        case IP_MTU_DISCOVER:
-               if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_INTERFACE)
+               if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
                        goto e_inval;
                inet->pmtudisc = val;
                break;
index a82a22d8f77fdca5f496e9d7cb45f40b70d194ca..e77381d1df9a044ff6a8d01e051b8f885776cf43 100644 (file)
@@ -235,13 +235,17 @@ static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
 {
        unsigned int h;
        __be32 remote;
+       __be32 i_key = parms->i_key;
 
        if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
                remote = parms->iph.daddr;
        else
                remote = 0;
 
-       h = ip_tunnel_hash(parms->i_key, remote);
+       if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
+               i_key = 0;
+
+       h = ip_tunnel_hash(i_key, remote);
        return &itn->tunnels[h];
 }
 
@@ -398,7 +402,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
        fbt = netdev_priv(itn->fb_tunnel_dev);
        dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
        if (IS_ERR(dev))
-               return NULL;
+               return ERR_CAST(dev);
 
        dev->mtu = ip_tunnel_bind_dev(dev);
 
@@ -748,9 +752,13 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
 
                t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
 
-               if (!t && (cmd == SIOCADDTUNNEL))
+               if (!t && (cmd == SIOCADDTUNNEL)) {
                        t = ip_tunnel_create(net, itn, p);
-
+                       if (IS_ERR(t)) {
+                               err = PTR_ERR(t);
+                               break;
+                       }
+               }
                if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
                        if (t != NULL) {
                                if (t->dev != dev) {
@@ -777,8 +785,9 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
                if (t) {
                        err = 0;
                        ip_tunnel_update(itn, t, dev, p, true);
-               } else
-                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+               } else {
+                       err = -ENOENT;
+               }
                break;
 
        case SIOCDELTUNNEL:
@@ -993,19 +1002,13 @@ int ip_tunnel_init(struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        struct iphdr *iph = &tunnel->parms.iph;
-       int i, err;
+       int err;
 
        dev->destructor = ip_tunnel_dev_free;
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *ipt_stats;
-               ipt_stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&ipt_stats->syncp);
-       }
-
        tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
        if (!tunnel->dst_cache) {
                free_percpu(dev->tstats);
index 8d69626f2206900dfbb336063034875743a3936e..e0c2b1d2ea4eb825aa76c15199b366bc600f841a 100644 (file)
@@ -162,12 +162,12 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
                unsigned int start;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
+                       start = u64_stats_fetch_begin_irq(&tstats->syncp);
                        rx_packets = tstats->rx_packets;
                        tx_packets = tstats->tx_packets;
                        rx_bytes = tstats->rx_bytes;
                        tx_bytes = tstats->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
 
                tot->rx_packets += rx_packets;
                tot->tx_packets += tx_packets;
index 48eafae5176946841b53b95e7da8a46650fa548d..687ddef4e5747274fc9c248b73d3869ed57fe21f 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/init.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/if_ether.h>
+#include <linux/icmpv6.h>
 
 #include <net/sock.h>
 #include <net/ip.h>
@@ -49,8 +50,8 @@ static struct rtnl_link_ops vti_link_ops __read_mostly;
 static int vti_net_id __read_mostly;
 static int vti_tunnel_init(struct net_device *dev);
 
-/* We dont digest the packet therefore let the packet pass */
-static int vti_rcv(struct sk_buff *skb)
+static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
+                    int encap_type)
 {
        struct ip_tunnel *tunnel;
        const struct iphdr *iph = ip_hdr(skb);
@@ -60,79 +61,120 @@ static int vti_rcv(struct sk_buff *skb)
        tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
                                  iph->saddr, iph->daddr, 0);
        if (tunnel != NULL) {
-               struct pcpu_sw_netstats *tstats;
-               u32 oldmark = skb->mark;
-               int ret;
-
-
-               /* temporarily mark the skb with the tunnel o_key, to
-                * only match policies with this mark.
-                */
-               skb->mark = be32_to_cpu(tunnel->parms.o_key);
-               ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
-               skb->mark = oldmark;
-               if (!ret)
-                       return -1;
-
-               tstats = this_cpu_ptr(tunnel->dev->tstats);
-               u64_stats_update_begin(&tstats->syncp);
-               tstats->rx_packets++;
-               tstats->rx_bytes += skb->len;
-               u64_stats_update_end(&tstats->syncp);
-
-               secpath_reset(skb);
-               skb->dev = tunnel->dev;
+               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+                       goto drop;
+
+               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+               skb->mark = be32_to_cpu(tunnel->parms.i_key);
+
+               return xfrm_input(skb, nexthdr, spi, encap_type);
+       }
+
+       return -EINVAL;
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static int vti_rcv(struct sk_buff *skb)
+{
+       XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+       XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+       return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
+}
+
+static int vti_rcv_cb(struct sk_buff *skb, int err)
+{
+       unsigned short family;
+       struct net_device *dev;
+       struct pcpu_sw_netstats *tstats;
+       struct xfrm_state *x;
+       struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
+
+       if (!tunnel)
                return 1;
+
+       dev = tunnel->dev;
+
+       if (err) {
+               dev->stats.rx_errors++;
+               dev->stats.rx_dropped++;
+
+               return 0;
        }
 
-       return -1;
+       x = xfrm_input_state(skb);
+       family = x->inner_mode->afinfo->family;
+
+       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+               return -EPERM;
+
+       skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
+       skb->dev = dev;
+
+       tstats = this_cpu_ptr(dev->tstats);
+
+       u64_stats_update_begin(&tstats->syncp);
+       tstats->rx_packets++;
+       tstats->rx_bytes += skb->len;
+       u64_stats_update_end(&tstats->syncp);
+
+       return 0;
 }
 
-/* This function assumes it is being called from dev_queue_xmit()
- * and that skb is filled properly by that function.
- */
+static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src)
+{
+       xfrm_address_t *daddr = (xfrm_address_t *)&dst;
+       xfrm_address_t *saddr = (xfrm_address_t *)&src;
 
-static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+       /* if there is no transform then this tunnel is not functional.
+        * Or if the xfrm is not mode tunnel.
+        */
+       if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
+           x->props.family != AF_INET)
+               return false;
+
+       if (!dst)
+               return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET);
+
+       if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET))
+               return false;
+
+       return true;
+}
+
+static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+                           struct flowi *fl)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
-       struct iphdr  *tiph = &tunnel->parms.iph;
-       u8     tos;
-       struct rtable *rt;              /* Route to the other host */
+       struct ip_tunnel_parm *parms = &tunnel->parms;
+       struct dst_entry *dst = skb_dst(skb);
        struct net_device *tdev;        /* Device to other host */
-       struct iphdr  *old_iph = ip_hdr(skb);
-       __be32 dst = tiph->daddr;
-       struct flowi4 fl4;
        int err;
 
-       if (skb->protocol != htons(ETH_P_IP))
-               goto tx_error;
-
-       tos = old_iph->tos;
+       if (!dst) {
+               dev->stats.tx_carrier_errors++;
+               goto tx_error_icmp;
+       }
 
-       memset(&fl4, 0, sizeof(fl4));
-       flowi4_init_output(&fl4, tunnel->parms.link,
-                          be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
-                          RT_SCOPE_UNIVERSE,
-                          IPPROTO_IPIP, 0,
-                          dst, tiph->saddr, 0, 0);
-       rt = ip_route_output_key(dev_net(dev), &fl4);
-       if (IS_ERR(rt)) {
+       dst_hold(dst);
+       dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0);
+       if (IS_ERR(dst)) {
                dev->stats.tx_carrier_errors++;
                goto tx_error_icmp;
        }
-       /* if there is no transform then this tunnel is not functional.
-        * Or if the xfrm is not mode tunnel.
-        */
-       if (!rt->dst.xfrm ||
-           rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
+
+       if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
                dev->stats.tx_carrier_errors++;
-               ip_rt_put(rt);
+               dst_release(dst);
                goto tx_error_icmp;
        }
-       tdev = rt->dst.dev;
+
+       tdev = dst->dev;
 
        if (tdev == dev) {
-               ip_rt_put(rt);
+               dst_release(dst);
                dev->stats.collisions++;
                goto tx_error;
        }
@@ -146,10 +188,8 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                        tunnel->err_count = 0;
        }
 
-       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-       skb_dst_drop(skb);
-       skb_dst_set(skb, &rt->dst);
-       nf_reset(skb);
+       skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
+       skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
 
        err = dst_output(skb);
@@ -166,6 +206,95 @@ tx_error:
        return NETDEV_TX_OK;
 }
 
+/* This function assumes it is being called from dev_queue_xmit()
+ * and that skb is filled properly by that function.
+ */
+static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       struct flowi fl;
+
+       memset(&fl, 0, sizeof(fl));
+
+       skb->mark = be32_to_cpu(tunnel->parms.o_key);
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               xfrm_decode_session(skb, &fl, AF_INET);
+               memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+               break;
+       case htons(ETH_P_IPV6):
+               xfrm_decode_session(skb, &fl, AF_INET6);
+               memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+               break;
+       default:
+               dev->stats.tx_errors++;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       return vti_xmit(skb, dev, &fl);
+}
+
+static int vti4_err(struct sk_buff *skb, u32 info)
+{
+       __be32 spi;
+       struct xfrm_state *x;
+       struct ip_tunnel *tunnel;
+       struct ip_esp_hdr *esph;
+       struct ip_auth_hdr *ah ;
+       struct ip_comp_hdr *ipch;
+       struct net *net = dev_net(skb->dev);
+       const struct iphdr *iph = (const struct iphdr *)skb->data;
+       int protocol = iph->protocol;
+       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+       tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+                                 iph->daddr, iph->saddr, 0);
+       if (!tunnel)
+               return -1;
+
+       switch (protocol) {
+       case IPPROTO_ESP:
+               esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
+               spi = esph->spi;
+               break;
+       case IPPROTO_AH:
+               ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
+               spi = ah->spi;
+               break;
+       case IPPROTO_COMP:
+               ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
+               spi = htonl(ntohs(ipch->cpi));
+               break;
+       default:
+               return 0;
+       }
+
+       switch (icmp_hdr(skb)->type) {
+       case ICMP_DEST_UNREACH:
+               if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+                       return 0;
+       case ICMP_REDIRECT:
+               break;
+       default:
+               return 0;
+       }
+
+       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+                             spi, protocol, AF_INET);
+       if (!x)
+               return 0;
+
+       if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+               ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
+       else
+               ipv4_redirect(skb, net, 0, 0, protocol, 0);
+       xfrm_state_put(x);
+
+       return 0;
+}
+
 static int
 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
@@ -181,12 +310,13 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        return -EINVAL;
        }
 
+       p.i_flags |= VTI_ISVTI;
        err = ip_tunnel_ioctl(dev, &p, cmd);
        if (err)
                return err;
 
        if (cmd != SIOCDELTUNNEL) {
-               p.i_flags |= GRE_KEY | VTI_ISVTI;
+               p.i_flags |= GRE_KEY;
                p.o_flags |= GRE_KEY;
        }
 
@@ -224,7 +354,6 @@ static int vti_tunnel_init(struct net_device *dev)
        dev->flags              = IFF_NOARP;
        dev->iflink             = 0;
        dev->addr_len           = 4;
-       dev->features           |= NETIF_F_NETNS_LOCAL;
        dev->features           |= NETIF_F_LLTX;
        dev->priv_flags         &= ~IFF_XMIT_DST_RELEASE;
 
@@ -241,9 +370,28 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
        iph->ihl                = 5;
 }
 
-static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
+static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
        .handler        =       vti_rcv,
-       .priority       =       1,
+       .input_handler  =       vti_input,
+       .cb_handler     =       vti_rcv_cb,
+       .err_handler    =       vti4_err,
+       .priority       =       100,
+};
+
+static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
+       .handler        =       vti_rcv,
+       .input_handler  =       vti_input,
+       .cb_handler     =       vti_rcv_cb,
+       .err_handler    =       vti4_err,
+       .priority       =       100,
+};
+
+static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
+       .handler        =       vti_rcv,
+       .input_handler  =       vti_input,
+       .cb_handler     =       vti_rcv_cb,
+       .err_handler    =       vti4_err,
+       .priority       =       100,
 };
 
 static int __net_init vti_init_net(struct net *net)
@@ -287,6 +435,8 @@ static void vti_netlink_parms(struct nlattr *data[],
        if (!data)
                return;
 
+       parms->i_flags = VTI_ISVTI;
+
        if (data[IFLA_VTI_LINK])
                parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
 
@@ -382,10 +532,31 @@ static int __init vti_init(void)
        err = register_pernet_device(&vti_net_ops);
        if (err < 0)
                return err;
-       err = xfrm4_mode_tunnel_input_register(&vti_handler);
+       err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
+       if (err < 0) {
+               unregister_pernet_device(&vti_net_ops);
+               pr_info("vti init: can't register tunnel\n");
+
+               return err;
+       }
+
+       err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
+       if (err < 0) {
+               xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
+               unregister_pernet_device(&vti_net_ops);
+               pr_info("vti init: can't register tunnel\n");
+
+               return err;
+       }
+
+       err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
        if (err < 0) {
+               xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+               xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
                unregister_pernet_device(&vti_net_ops);
                pr_info("vti init: can't register tunnel\n");
+
+               return err;
        }
 
        err = rtnl_link_register(&vti_link_ops);
@@ -395,7 +566,9 @@ static int __init vti_init(void)
        return err;
 
 rtnl_link_failed:
-       xfrm4_mode_tunnel_input_deregister(&vti_handler);
+       xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+       xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+       xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
        unregister_pernet_device(&vti_net_ops);
        return err;
 }
@@ -403,8 +576,13 @@ rtnl_link_failed:
 static void __exit vti_fini(void)
 {
        rtnl_link_unregister(&vti_link_ops);
-       if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
+       if (xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP))
                pr_info("vti close: can't deregister tunnel\n");
+       if (xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH))
+               pr_info("vti close: can't deregister tunnel\n");
+       if (xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP))
+               pr_info("vti close: can't deregister tunnel\n");
+
 
        unregister_pernet_device(&vti_net_ops);
 }
index 826be4cb482a29b401f2314da6581e1127a7a731..c0855d50a3fa775831ada20254b74d94f5d410ae 100644 (file)
@@ -23,7 +23,7 @@
 #include <net/protocol.h>
 #include <net/sock.h>
 
-static void ipcomp4_err(struct sk_buff *skb, u32 info)
+static int ipcomp4_err(struct sk_buff *skb, u32 info)
 {
        struct net *net = dev_net(skb->dev);
        __be32 spi;
@@ -34,24 +34,26 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
        switch (icmp_hdr(skb)->type) {
        case ICMP_DEST_UNREACH:
                if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
-                       return;
+                       return 0;
        case ICMP_REDIRECT:
                break;
        default:
-               return;
+               return 0;
        }
 
        spi = htonl(ntohs(ipch->cpi));
        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
                              spi, IPPROTO_COMP, AF_INET);
        if (!x)
-               return;
+               return 0;
 
        if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
                ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
        else
                ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
        xfrm_state_put(x);
+
+       return 0;
 }
 
 /* We always hold one tunnel user reference to indicate a tunnel */
@@ -147,6 +149,11 @@ out:
        return err;
 }
 
+static int ipcomp4_rcv_cb(struct sk_buff *skb, int err)
+{
+       return 0;
+}
+
 static const struct xfrm_type ipcomp_type = {
        .description    = "IPCOMP4",
        .owner          = THIS_MODULE,
@@ -157,11 +164,12 @@ static const struct xfrm_type ipcomp_type = {
        .output         = ipcomp_output
 };
 
-static const struct net_protocol ipcomp4_protocol = {
+static struct xfrm4_protocol ipcomp4_protocol = {
        .handler        =       xfrm4_rcv,
+       .input_handler  =       xfrm_input,
+       .cb_handler     =       ipcomp4_rcv_cb,
        .err_handler    =       ipcomp4_err,
-       .no_policy      =       1,
-       .netns_ok       =       1,
+       .priority       =       0,
 };
 
 static int __init ipcomp4_init(void)
@@ -170,7 +178,7 @@ static int __init ipcomp4_init(void)
                pr_info("%s: can't add xfrm type\n", __func__);
                return -EAGAIN;
        }
-       if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
+       if (xfrm4_protocol_register(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                xfrm_unregister_type(&ipcomp_type, AF_INET);
                return -EAGAIN;
@@ -180,7 +188,7 @@ static int __init ipcomp4_init(void)
 
 static void __exit ipcomp4_fini(void)
 {
-       if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0)
+       if (xfrm4_protocol_deregister(&ipcomp4_protocol, IPPROTO_COMP) < 0)
                pr_info("%s: can't remove protocol\n", __func__);
        if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0)
                pr_info("%s: can't remove xfrm type\n", __func__);
index c3e0adea9c277585f01dff2f1740787bb3d19a66..7ebd6e37875cc95b08d294ff64306925d05e550e 100644 (file)
@@ -61,7 +61,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
                skb_dst_set(skb, NULL);
                dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0);
                if (IS_ERR(dst))
-                       return PTR_ERR(dst);;
+                       return PTR_ERR(dst);
                skb_dst_set(skb, dst);
        }
 #endif
index 2d11c094296e77ceb36b4121df01d7e5318de591..f4b19e5dde54c4b2971610db527202a640b40f09 100644 (file)
@@ -727,7 +727,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
        sock_tx_timestamp(sk, &ipc.tx_flags);
 
        if (msg->msg_controllen) {
-               err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+               err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
                if (err)
                        return err;
                if (ipc.opt)
index a6c8a80ec9d67299e39008f18a3adb0dcfae1389..ad737fad6d8b82dec74fab1260015e539647271d 100644 (file)
@@ -273,6 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
        SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
        SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
+       SNMP_MIB_ITEM("TCPFastOpenActiveFail", LINUX_MIB_TCPFASTOPENACTIVEFAIL),
        SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
        SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
        SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
@@ -280,6 +281,11 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
        SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
        SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING),
+       SNMP_MIB_ITEM("TCPFromZeroWindowAdv", LINUX_MIB_TCPFROMZEROWINDOWADV),
+       SNMP_MIB_ITEM("TCPToZeroWindowAdv", LINUX_MIB_TCPTOZEROWINDOWADV),
+       SNMP_MIB_ITEM("TCPWantZeroWindowAdv", LINUX_MIB_TCPWANTZEROWINDOWADV),
+       SNMP_MIB_ITEM("TCPSynRetrans", LINUX_MIB_TCPSYNRETRANS),
+       SNMP_MIB_ITEM("TCPOrigDataSent", LINUX_MIB_TCPORIGDATASENT),
        SNMP_MIB_SENTINEL
 };
 
index c04518f4850a4c0a3d1de182d75cb2a963c7f583..a9dbe58bdfe767e62642db954b89532dab928a96 100644 (file)
@@ -524,7 +524,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.oif = sk->sk_bound_dev_if;
 
        if (msg->msg_controllen) {
-               err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+               err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
                if (err)
                        goto out;
                if (ipc.opt)
index 4c011ec69ed43efacdf692592a849d691ca93d2d..1be9e990514da98dbc651f847f5b429d72889da6 100644 (file)
@@ -139,11 +139,6 @@ static void                 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
 static void            ipv4_dst_destroy(struct dst_entry *dst);
 
-static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
-                           int how)
-{
-}
-
 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
 {
        WARN_ON(1);
@@ -162,7 +157,6 @@ static struct dst_ops ipv4_dst_ops = {
        .mtu =                  ipv4_mtu,
        .cow_metrics =          ipv4_cow_metrics,
        .destroy =              ipv4_dst_destroy,
-       .ifdown =               ipv4_dst_ifdown,
        .negative_advice =      ipv4_negative_advice,
        .link_failure =         ipv4_link_failure,
        .update_pmtu =          ip_rt_update_pmtu,
@@ -697,7 +691,6 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
 
 out_unlock:
        spin_unlock_bh(&fnhe_lock);
-       return;
 }
 
 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
@@ -2475,11 +2468,6 @@ errout_free:
        goto errout;
 }
 
-int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
-{
-       return skb->len;
-}
-
 void ip_rt_multicast_event(struct in_device *in_dev)
 {
        rt_cache_flush(dev_net(in_dev->dev));
index 97c8f5620c430930c0b9c7958db079a968bd807b..4bd6d52eeffb6c7ddc3e98054a7070826a4c4dcd 100644 (file)
@@ -387,7 +387,7 @@ void tcp_init_sock(struct sock *sk)
        INIT_LIST_HEAD(&tp->tsq_node);
 
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
-       tp->mdev = TCP_TIMEOUT_INIT;
+       tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
 
        /* So many TCP implementations out there (incorrectly) count the
         * initial SYN frame in their delayed-ACK and congestion control
@@ -2341,7 +2341,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        sk->sk_shutdown = 0;
        sock_reset_flag(sk, SOCK_DONE);
-       tp->srtt = 0;
+       tp->srtt_us = 0;
        if ((tp->write_seq += tp->max_window + 2) == 0)
                tp->write_seq = 1;
        icsk->icsk_backoff = 0;
@@ -2785,8 +2785,8 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
 
        info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
        info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
-       info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
-       info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
+       info->tcpi_rtt = tp->srtt_us >> 3;
+       info->tcpi_rttvar = tp->mdev_us >> 2;
        info->tcpi_snd_ssthresh = tp->snd_ssthresh;
        info->tcpi_snd_cwnd = tp->snd_cwnd;
        info->tcpi_advmss = tp->advmss;
@@ -2796,6 +2796,11 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
        info->tcpi_rcv_space = tp->rcvq_space.space;
 
        info->tcpi_total_retrans = tp->total_retrans;
+
+       info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
+                                       sk->sk_pacing_rate : ~0ULL;
+       info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
+                                       sk->sk_max_pacing_rate : ~0ULL;
 }
 EXPORT_SYMBOL_GPL(tcp_get_info);
 
index 2388275adb9bd0fcfb8ea7a97a22ac661b4df745..2b9464c93b8859fcbef0f900f70a4bed2dc6e617 100644 (file)
@@ -361,21 +361,12 @@ u32 tcp_reno_ssthresh(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
 
-/* Lower bound on congestion window with halving. */
-u32 tcp_reno_min_cwnd(const struct sock *sk)
-{
-       const struct tcp_sock *tp = tcp_sk(sk);
-       return tp->snd_ssthresh/2;
-}
-EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
-
 struct tcp_congestion_ops tcp_reno = {
        .flags          = TCP_CONG_NON_RESTRICTED,
        .name           = "reno",
        .owner          = THIS_MODULE,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
-       .min_cwnd       = tcp_reno_min_cwnd,
 };
 
 /* Initial congestion control used (until SYN)
@@ -387,6 +378,5 @@ struct tcp_congestion_ops tcp_init_congestion_ops  = {
        .owner          = THIS_MODULE,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
-       .min_cwnd       = tcp_reno_min_cwnd,
 };
 EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
index 828e4c3ffbaf2d724086a0d408781dd99056a5bf..8bf224516ba2a26a661d16f89aaee32301d09397 100644 (file)
@@ -476,10 +476,6 @@ static int __init cubictcp_register(void)
        /* divide by bic_scale and by constant Srtt (100ms) */
        do_div(cube_factor, bic_scale * 10);
 
-       /* hystart needs ms clock resolution */
-       if (hystart && HZ < 1000)
-               cubictcp.flags |= TCP_CONG_RTT_STAMP;
-
        return tcp_register_congestion_control(&cubictcp);
 }
 
index 8ed9305dfdf4f63dc06e951be32a9c5e0b023d1e..8b9e7bad77c09a0c07706b955c29b81d4e71a542 100644 (file)
@@ -162,7 +162,6 @@ static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
        .init           = hstcp_init,
        .ssthresh       = hstcp_ssthresh,
        .cong_avoid     = hstcp_cong_avoid,
-       .min_cwnd       = tcp_reno_min_cwnd,
 
        .owner          = THIS_MODULE,
        .name           = "highspeed"
index 478fe82611bff24459349cb14fd1ce6d8a9f26ac..a15a799bf76888f3633b27cf57e8e05f30339601 100644 (file)
@@ -21,7 +21,7 @@ struct hybla {
        u32   rho2;           /* Rho * Rho, integer part */
        u32   rho_3ls;        /* Rho parameter, <<3 */
        u32   rho2_7ls;       /* Rho^2, <<7     */
-       u32   minrtt;         /* Minimum smoothed round trip time value seen */
+       u32   minrtt_us;      /* Minimum smoothed round trip time value seen */
 };
 
 /* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
@@ -35,7 +35,9 @@ static inline void hybla_recalc_param (struct sock *sk)
 {
        struct hybla *ca = inet_csk_ca(sk);
 
-       ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
+       ca->rho_3ls = max_t(u32,
+                           tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC),
+                           8U);
        ca->rho = ca->rho_3ls >> 3;
        ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
        ca->rho2 = ca->rho2_7ls >> 7;
@@ -59,7 +61,7 @@ static void hybla_init(struct sock *sk)
        hybla_recalc_param(sk);
 
        /* set minimum rtt as this is the 1st ever seen */
-       ca->minrtt = tp->srtt;
+       ca->minrtt_us = tp->srtt_us;
        tp->snd_cwnd = ca->rho;
 }
 
@@ -94,9 +96,9 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
        int is_slowstart = 0;
 
        /*  Recalculate rho only if this srtt is the lowest */
-       if (tp->srtt < ca->minrtt){
+       if (tp->srtt_us < ca->minrtt_us) {
                hybla_recalc_param(sk);
-               ca->minrtt = tp->srtt;
+               ca->minrtt_us = tp->srtt_us;
        }
 
        if (!tcp_is_cwnd_limited(sk, in_flight))
@@ -166,7 +168,6 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
 static struct tcp_congestion_ops tcp_hybla __read_mostly = {
        .init           = hybla_init,
        .ssthresh       = tcp_reno_ssthresh,
-       .min_cwnd       = tcp_reno_min_cwnd,
        .cong_avoid     = hybla_cong_avoid,
        .set_state      = hybla_state,
 
index e498a62b8f972d29126f24569c2dbc85037b80a7..863d105e30150391e9ac3ce8545c0411e4d083ab 100644 (file)
@@ -325,10 +325,8 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
 }
 
 static struct tcp_congestion_ops tcp_illinois __read_mostly = {
-       .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_illinois_init,
        .ssthresh       = tcp_illinois_ssthresh,
-       .min_cwnd       = tcp_reno_min_cwnd,
        .cong_avoid     = tcp_illinois_cong_avoid,
        .set_state      = tcp_illinois_state,
        .get_info       = tcp_illinois_info,
index eeaac399420de043bb466603fcb03ff83076438a..e1661f46fd19fb588555e9f769b9eed2c02fd27d 100644 (file)
@@ -667,11 +667,11 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
  * To save cycles in the RFC 1323 implementation it was better to break
  * it up into three procedures. -- erics
  */
-static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
+static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       long m = mrtt; /* RTT */
-       u32 srtt = tp->srtt;
+       long m = mrtt_us; /* RTT */
+       u32 srtt = tp->srtt_us;
 
        /*      The following amusing code comes from Jacobson's
         *      article in SIGCOMM '88.  Note that rtt and mdev
@@ -694,7 +694,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
                srtt += m;              /* rtt = 7/8 rtt + 1/8 new */
                if (m < 0) {
                        m = -m;         /* m is now abs(error) */
-                       m -= (tp->mdev >> 2);   /* similar update on mdev */
+                       m -= (tp->mdev_us >> 2);   /* similar update on mdev */
                        /* This is similar to one of Eifel findings.
                         * Eifel blocks mdev updates when rtt decreases.
                         * This solution is a bit different: we use finer gain
@@ -706,28 +706,29 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
                        if (m > 0)
                                m >>= 3;
                } else {
-                       m -= (tp->mdev >> 2);   /* similar update on mdev */
+                       m -= (tp->mdev_us >> 2);   /* similar update on mdev */
                }
-               tp->mdev += m;          /* mdev = 3/4 mdev + 1/4 new */
-               if (tp->mdev > tp->mdev_max) {
-                       tp->mdev_max = tp->mdev;
-                       if (tp->mdev_max > tp->rttvar)
-                               tp->rttvar = tp->mdev_max;
+               tp->mdev_us += m;               /* mdev = 3/4 mdev + 1/4 new */
+               if (tp->mdev_us > tp->mdev_max_us) {
+                       tp->mdev_max_us = tp->mdev_us;
+                       if (tp->mdev_max_us > tp->rttvar_us)
+                               tp->rttvar_us = tp->mdev_max_us;
                }
                if (after(tp->snd_una, tp->rtt_seq)) {
-                       if (tp->mdev_max < tp->rttvar)
-                               tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2;
+                       if (tp->mdev_max_us < tp->rttvar_us)
+                               tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
                        tp->rtt_seq = tp->snd_nxt;
-                       tp->mdev_max = tcp_rto_min(sk);
+                       tp->mdev_max_us = tcp_rto_min_us(sk);
                }
        } else {
                /* no previous measure. */
                srtt = m << 3;          /* take the measured time to be rtt */
-               tp->mdev = m << 1;      /* make sure rto = 3*rtt */
-               tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
+               tp->mdev_us = m << 1;   /* make sure rto = 3*rtt */
+               tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
+               tp->mdev_max_us = tp->rttvar_us;
                tp->rtt_seq = tp->snd_nxt;
        }
-       tp->srtt = max(1U, srtt);
+       tp->srtt_us = max(1U, srtt);
 }
 
 /* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -742,20 +743,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
        u64 rate;
 
        /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
-       rate = (u64)tp->mss_cache * 2 * (HZ << 3);
+       rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3);
 
        rate *= max(tp->snd_cwnd, tp->packets_out);
 
-       /* Correction for small srtt and scheduling constraints.
-        * For small rtt, consider noise is too high, and use
-        * the minimal value (srtt = 1 -> 125 us for HZ=1000)
-        *
-        * We probably need usec resolution in the future.
-        * Note: This also takes care of possible srtt=0 case,
-        * when tcp_rtt_estimator() was not yet called.
-        */
-       if (tp->srtt > 8 + 2)
-               do_div(rate, tp->srtt);
+       if (likely(tp->srtt_us))
+               do_div(rate, tp->srtt_us);
 
        /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
         * without any lock. We want to make sure compiler wont store
@@ -1122,10 +1115,10 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
 }
 
 struct tcp_sacktag_state {
-       int reord;
-       int fack_count;
-       int flag;
-       s32 rtt; /* RTT measured by SACKing never-retransmitted data */
+       int     reord;
+       int     fack_count;
+       long    rtt_us; /* RTT measured by SACKing never-retransmitted data */
+       int     flag;
 };
 
 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1186,7 +1179,8 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
 static u8 tcp_sacktag_one(struct sock *sk,
                          struct tcp_sacktag_state *state, u8 sacked,
                          u32 start_seq, u32 end_seq,
-                         int dup_sack, int pcount, u32 xmit_time)
+                         int dup_sack, int pcount,
+                         const struct skb_mstamp *xmit_time)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int fack_count = state->fack_count;
@@ -1227,8 +1221,13 @@ static u8 tcp_sacktag_one(struct sock *sk,
                                if (!after(end_seq, tp->high_seq))
                                        state->flag |= FLAG_ORIG_SACK_ACKED;
                                /* Pick the earliest sequence sacked for RTT */
-                               if (state->rtt < 0)
-                                       state->rtt = tcp_time_stamp - xmit_time;
+                               if (state->rtt_us < 0) {
+                                       struct skb_mstamp now;
+
+                                       skb_mstamp_get(&now);
+                                       state->rtt_us = skb_mstamp_us_delta(&now,
+                                                               xmit_time);
+                               }
                        }
 
                        if (sacked & TCPCB_LOST) {
@@ -1287,7 +1286,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
         */
        tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
                        start_seq, end_seq, dup_sack, pcount,
-                       TCP_SKB_CB(skb)->when);
+                       &skb->skb_mstamp);
 
        if (skb == tp->lost_skb_hint)
                tp->lost_cnt_hint += pcount;
@@ -1565,7 +1564,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                                                TCP_SKB_CB(skb)->end_seq,
                                                dup_sack,
                                                tcp_skb_pcount(skb),
-                                               TCP_SKB_CB(skb)->when);
+                                               &skb->skb_mstamp);
 
                        if (!before(TCP_SKB_CB(skb)->seq,
                                    tcp_highest_sack_seq(tp)))
@@ -1622,7 +1621,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
 
 static int
 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
-                       u32 prior_snd_una, s32 *sack_rtt)
+                       u32 prior_snd_una, long *sack_rtt_us)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1640,7 +1639,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
        state.flag = 0;
        state.reord = tp->packets_out;
-       state.rtt = -1;
+       state.rtt_us = -1L;
 
        if (!tp->sacked_out) {
                if (WARN_ON(tp->fackets_out))
@@ -1824,7 +1823,7 @@ out:
        WARN_ON((int)tp->retrans_out < 0);
        WARN_ON((int)tcp_packets_in_flight(tp) < 0);
 #endif
-       *sack_rtt = state.rtt;
+       *sack_rtt_us = state.rtt_us;
        return state.flag;
 }
 
@@ -2035,10 +2034,12 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
         * available, or RTO is scheduled to fire first.
         */
        if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
-           (flag & FLAG_ECE) || !tp->srtt)
+           (flag & FLAG_ECE) || !tp->srtt_us)
                return false;
 
-       delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
+       delay = max(usecs_to_jiffies(tp->srtt_us >> 5),
+                   msecs_to_jiffies(2));
+
        if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
                return false;
 
@@ -2885,7 +2886,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 }
 
 static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
-                                     s32 seq_rtt, s32 sack_rtt)
+                                     long seq_rtt_us, long sack_rtt_us)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2895,10 +2896,10 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
         * is acked (RFC6298).
         */
        if (flag & FLAG_RETRANS_DATA_ACKED)
-               seq_rtt = -1;
+               seq_rtt_us = -1L;
 
-       if (seq_rtt < 0)
-               seq_rtt = sack_rtt;
+       if (seq_rtt_us < 0)
+               seq_rtt_us = sack_rtt_us;
 
        /* RTTM Rule: A TSecr value received in a segment is used to
         * update the averaged RTT measurement only if the segment
@@ -2906,14 +2907,14 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
         * left edge of the send window.
         * See draft-ietf-tcplw-high-performance-00, section 3.3.
         */
-       if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
+       if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
            flag & FLAG_ACKED)
-               seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+               seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr);
 
-       if (seq_rtt < 0)
+       if (seq_rtt_us < 0)
                return false;
 
-       tcp_rtt_estimator(sk, seq_rtt);
+       tcp_rtt_estimator(sk, seq_rtt_us);
        tcp_set_rto(sk);
 
        /* RFC6298: only reset backoff on valid RTT measurement. */
@@ -2925,16 +2926,16 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
 static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       s32 seq_rtt = -1;
+       long seq_rtt_us = -1L;
 
        if (synack_stamp && !tp->total_retrans)
-               seq_rtt = tcp_time_stamp - synack_stamp;
+               seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - synack_stamp);
 
        /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
         * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
         */
-       if (!tp->srtt)
-               tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
+       if (!tp->srtt_us)
+               tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
 }
 
 static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
@@ -3023,26 +3024,27 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
  * arrived at the other end.
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
-                              u32 prior_snd_una, s32 sack_rtt)
+                              u32 prior_snd_una, long sack_rtt_us)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
-       struct sk_buff *skb;
-       u32 now = tcp_time_stamp;
+       struct skb_mstamp first_ackt, last_ackt, now;
+       struct tcp_sock *tp = tcp_sk(sk);
+       u32 prior_sacked = tp->sacked_out;
+       u32 reord = tp->packets_out;
        bool fully_acked = true;
-       int flag = 0;
+       long ca_seq_rtt_us = -1L;
+       long seq_rtt_us = -1L;
+       struct sk_buff *skb;
        u32 pkts_acked = 0;
-       u32 reord = tp->packets_out;
-       u32 prior_sacked = tp->sacked_out;
-       s32 seq_rtt = -1;
-       s32 ca_seq_rtt = -1;
-       ktime_t last_ackt = net_invalid_timestamp();
        bool rtt_update;
+       int flag = 0;
+
+       first_ackt.v64 = 0;
 
        while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
-               u32 acked_pcount;
                u8 sacked = scb->sacked;
+               u32 acked_pcount;
 
                /* Determine how many packets and what bytes were acked, tso and else */
                if (after(scb->end_seq, tp->snd_una)) {
@@ -3064,11 +3066,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                tp->retrans_out -= acked_pcount;
                        flag |= FLAG_RETRANS_DATA_ACKED;
                } else {
-                       ca_seq_rtt = now - scb->when;
-                       last_ackt = skb->tstamp;
-                       if (seq_rtt < 0) {
-                               seq_rtt = ca_seq_rtt;
-                       }
+                       last_ackt = skb->skb_mstamp;
+                       WARN_ON_ONCE(last_ackt.v64 == 0);
+                       if (!first_ackt.v64)
+                               first_ackt = last_ackt;
+
                        if (!(sacked & TCPCB_SACKED_ACKED))
                                reord = min(pkts_acked, reord);
                        if (!after(scb->end_seq, tp->high_seq))
@@ -3114,7 +3116,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
                flag |= FLAG_SACK_RENEGING;
 
-       rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
+       skb_mstamp_get(&now);
+       if (first_ackt.v64) {
+               seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
+               ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+       }
+
+       rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
 
        if (flag & FLAG_ACKED) {
                const struct tcp_congestion_ops *ca_ops
@@ -3142,25 +3150,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
 
-               if (ca_ops->pkts_acked) {
-                       s32 rtt_us = -1;
-
-                       /* Is the ACK triggering packet unambiguous? */
-                       if (!(flag & FLAG_RETRANS_DATA_ACKED)) {
-                               /* High resolution needed and available? */
-                               if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
-                                   !ktime_equal(last_ackt,
-                                                net_invalid_timestamp()))
-                                       rtt_us = ktime_us_delta(ktime_get_real(),
-                                                               last_ackt);
-                               else if (ca_seq_rtt >= 0)
-                                       rtt_us = jiffies_to_usecs(ca_seq_rtt);
-                       }
+               if (ca_ops->pkts_acked)
+                       ca_ops->pkts_acked(sk, pkts_acked, ca_seq_rtt_us);
 
-                       ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
-               }
-       } else if (skb && rtt_update && sack_rtt >= 0 &&
-                  sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
+       } else if (skb && rtt_update && sack_rtt_us >= 0 &&
+                  sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
                /* Do not re-arm RTO if the sack RTT is measured from data sent
                 * after when the head was last (re)transmitted. Otherwise the
                 * timeout may continue to extend in loss recovery.
@@ -3370,12 +3364,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
        bool is_dupack = false;
-       u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt;
+       u32 prior_in_flight;
        u32 prior_fackets;
        int prior_packets = tp->packets_out;
        const int prior_unsacked = tp->packets_out - tp->sacked_out;
        int acked = 0; /* Number of packets newly acked */
-       s32 sack_rtt = -1;
+       long sack_rtt_us = -1L;
 
        /* If the ack is older than previous acks
         * then we can probably ignore it.
@@ -3433,7 +3427,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-                                                       &sack_rtt);
+                                                       &sack_rtt_us);
 
                if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
                        flag |= FLAG_ECE;
@@ -3452,7 +3446,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        /* See if we can take anything off of the retransmit queue. */
        acked = tp->packets_out;
-       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
+       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
+                                   sack_rtt_us);
        acked -= tp->packets_out;
 
        /* Advance cwnd if state allows */
@@ -3475,8 +3470,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS)
                tcp_schedule_loss_probe(sk);
-       if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
-               tcp_update_pacing_rate(sk);
+       tcp_update_pacing_rate(sk);
        return 1;
 
 no_queue:
@@ -3505,7 +3499,7 @@ old_ack:
         */
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-                                               &sack_rtt);
+                                               &sack_rtt_us);
                tcp_fastretrans_alert(sk, acked, prior_unsacked,
                                      is_dupack, flag);
        }
@@ -5401,9 +5395,12 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
                                break;
                }
                tcp_rearm_rto(sk);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                return true;
        }
        tp->syn_data_acked = tp->syn_data;
+       if (tp->syn_data_acked)
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
        return false;
 }
 
index 1e4eac779f51c81bf5472d13ed446fefb0827753..6379894ec210c0f3077366a539417385671c5faa 100644 (file)
@@ -435,7 +435,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                        break;
 
                icsk->icsk_backoff--;
-               inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
+               inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
                        TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
                tcp_bound_rto(sk);
 
@@ -854,8 +854,10 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
 {
        int res = tcp_v4_send_synack(sk, NULL, req, 0);
 
-       if (!res)
+       if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+       }
        return res;
 }
 
@@ -878,8 +880,6 @@ bool tcp_syn_flood_action(struct sock *sk,
        bool want_cookie = false;
        struct listen_sock *lopt;
 
-
-
 #ifdef CONFIG_SYN_COOKIES
        if (sysctl_tcp_syncookies) {
                msg = "Sending cookies";
index 991d62a2f9bb3abb1d155d075c202c1ea558ca7f..c9aecae313276d134ef56d1385ac44266c200a51 100644 (file)
@@ -315,11 +315,9 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
 }
 
 static struct tcp_congestion_ops tcp_lp __read_mostly = {
-       .flags = TCP_CONG_RTT_STAMP,
        .init = tcp_lp_init,
        .ssthresh = tcp_reno_ssthresh,
        .cong_avoid = tcp_lp_cong_avoid,
-       .min_cwnd = tcp_reno_min_cwnd,
        .pkts_acked = tcp_lp_pkts_acked,
 
        .owner = THIS_MODULE,
index d547075d830014d5932fbc98741f8c77b4c1e590..dcaf72f10216c22f00ef918963d260d4ce472b99 100644 (file)
@@ -33,6 +33,11 @@ struct tcp_fastopen_metrics {
        struct  tcp_fastopen_cookie     cookie;
 };
 
+/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
+ * Kernel only stores RTT and RTTVAR in usec resolution
+ */
+#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
+
 struct tcp_metrics_block {
        struct tcp_metrics_block __rcu  *tcpm_next;
        struct inetpeer_addr            tcpm_saddr;
@@ -41,7 +46,7 @@ struct tcp_metrics_block {
        u32                             tcpm_ts;
        u32                             tcpm_ts_stamp;
        u32                             tcpm_lock;
-       u32                             tcpm_vals[TCP_METRIC_MAX + 1];
+       u32                             tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
        struct tcp_fastopen_metrics     tcpm_fastopen;
 
        struct rcu_head                 rcu_head;
@@ -59,12 +64,6 @@ static u32 tcp_metric_get(struct tcp_metrics_block *tm,
        return tm->tcpm_vals[idx];
 }
 
-static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
-                                 enum tcp_metric_index idx)
-{
-       return msecs_to_jiffies(tm->tcpm_vals[idx]);
-}
-
 static void tcp_metric_set(struct tcp_metrics_block *tm,
                           enum tcp_metric_index idx,
                           u32 val)
@@ -72,13 +71,6 @@ static void tcp_metric_set(struct tcp_metrics_block *tm,
        tm->tcpm_vals[idx] = val;
 }
 
-static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
-                                enum tcp_metric_index idx,
-                                u32 val)
-{
-       tm->tcpm_vals[idx] = jiffies_to_msecs(val);
-}
-
 static bool addr_same(const struct inetpeer_addr *a,
                      const struct inetpeer_addr *b)
 {
@@ -101,9 +93,11 @@ struct tcpm_hash_bucket {
 
 static DEFINE_SPINLOCK(tcp_metrics_lock);
 
-static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
+static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+                         const struct dst_entry *dst,
                          bool fastopen_clear)
 {
+       u32 msval;
        u32 val;
 
        tm->tcpm_stamp = jiffies;
@@ -121,8 +115,11 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
                val |= 1 << TCP_METRIC_REORDERING;
        tm->tcpm_lock = val;
 
-       tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
-       tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
+       msval = dst_metric_raw(dst, RTAX_RTT);
+       tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
+
+       msval = dst_metric_raw(dst, RTAX_RTTVAR);
+       tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
        tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
        tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
        tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
@@ -384,7 +381,7 @@ void tcp_update_metrics(struct sock *sk)
                dst_confirm(dst);
 
        rcu_read_lock();
-       if (icsk->icsk_backoff || !tp->srtt) {
+       if (icsk->icsk_backoff || !tp->srtt_us) {
                /* This session failed to estimate rtt. Why?
                 * Probably, no packets returned in time.  Reset our
                 * results.
@@ -399,8 +396,8 @@ void tcp_update_metrics(struct sock *sk)
        if (!tm)
                goto out_unlock;
 
-       rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
-       m = rtt - tp->srtt;
+       rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
+       m = rtt - tp->srtt_us;
 
        /* If newly calculated rtt larger than stored one, store new
         * one. Otherwise, use EWMA. Remember, rtt overestimation is
@@ -408,10 +405,10 @@ void tcp_update_metrics(struct sock *sk)
         */
        if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
                if (m <= 0)
-                       rtt = tp->srtt;
+                       rtt = tp->srtt_us;
                else
                        rtt -= (m >> 3);
-               tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
+               tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
        }
 
        if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
@@ -422,16 +419,16 @@ void tcp_update_metrics(struct sock *sk)
 
                /* Scale deviation to rttvar fixed point */
                m >>= 1;
-               if (m < tp->mdev)
-                       m = tp->mdev;
+               if (m < tp->mdev_us)
+                       m = tp->mdev_us;
 
-               var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
+               var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
                if (m >= var)
                        var = m;
                else
                        var -= (var - m) >> 2;
 
-               tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
+               tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
        }
 
        if (tcp_in_initial_slowstart(tp)) {
@@ -528,7 +525,7 @@ void tcp_init_metrics(struct sock *sk)
                tp->reordering = val;
        }
 
-       crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
+       crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
        rcu_read_unlock();
 reset:
        /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
@@ -551,18 +548,20 @@ reset:
         * to low value, and then abruptly stops to do it and starts to delay
         * ACKs, wait for troubles.
         */
-       if (crtt > tp->srtt) {
+       if (crtt > tp->srtt_us) {
                /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
-               crtt >>= 3;
+               crtt /= 8 * USEC_PER_MSEC;
                inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
-       } else if (tp->srtt == 0) {
+       } else if (tp->srtt_us == 0) {
                /* RFC6298: 5.7 We've failed to get a valid RTT sample from
                 * 3WHS. This is most likely due to retransmission,
                 * including spurious one. Reset the RTO back to 3secs
                 * from the more aggressive 1sec to avoid more spurious
                 * retransmission.
                 */
-               tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
+               tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
+               tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
+
                inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
        }
        /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
@@ -809,10 +808,26 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
                nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
                if (!nest)
                        goto nla_put_failure;
-               for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
-                       if (!tm->tcpm_vals[i])
+               for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
+                       u32 val = tm->tcpm_vals[i];
+
+                       if (!val)
                                continue;
-                       if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
+                       if (i == TCP_METRIC_RTT) {
+                               if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
+                                               val) < 0)
+                                       goto nla_put_failure;
+                               n++;
+                               val = max(val / 1000, 1U);
+                       }
+                       if (i == TCP_METRIC_RTTVAR) {
+                               if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
+                                               val) < 0)
+                                       goto nla_put_failure;
+                               n++;
+                               val = max(val / 1000, 1U);
+                       }
+                       if (nla_put_u32(msg, i + 1, val) < 0)
                                goto nla_put_failure;
                        n++;
                }
index 7a436c517e443f5c201251dbb90552f703f7a9b5..ca788ada5bd3619f2db2b3865e6469eb7c60f015 100644 (file)
@@ -398,8 +398,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
 
                tcp_init_wl(newtp, treq->rcv_isn);
 
-               newtp->srtt = 0;
-               newtp->mdev = TCP_TIMEOUT_INIT;
+               newtp->srtt_us = 0;
+               newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
                newicsk->icsk_rto = TCP_TIMEOUT_INIT;
 
                newtp->packets_out = 0;
index 17a11e65e57fea3fa3728ce905df6130c580c48a..699fb102e9710694f342951cf194facd153f7d37 100644 (file)
@@ -86,6 +86,9 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                tcp_rearm_rto(sk);
        }
+
+       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
+                     tcp_skb_pcount(skb));
 }
 
 /* SND.NXT, if window was not shrunk.
@@ -269,6 +272,7 @@ EXPORT_SYMBOL(tcp_select_initial_window);
 static u16 tcp_select_window(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 old_win = tp->rcv_wnd;
        u32 cur_win = tcp_receive_window(tp);
        u32 new_win = __tcp_select_window(sk);
 
@@ -281,6 +285,9 @@ static u16 tcp_select_window(struct sock *sk)
                 *
                 * Relax Will Robinson.
                 */
+               if (new_win == 0)
+                       NET_INC_STATS(sock_net(sk),
+                                     LINUX_MIB_TCPWANTZEROWINDOWADV);
                new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
        }
        tp->rcv_wnd = new_win;
@@ -298,8 +305,14 @@ static u16 tcp_select_window(struct sock *sk)
        new_win >>= tp->rx_opt.rcv_wscale;
 
        /* If we advertise zero window, disable fast path. */
-       if (new_win == 0)
+       if (new_win == 0) {
                tp->pred_flags = 0;
+               if (old_win)
+                       NET_INC_STATS(sock_net(sk),
+                                     LINUX_MIB_TCPTOZEROWINDOWADV);
+       } else if (old_win == 0) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
+       }
 
        return new_win;
 }
@@ -867,11 +880,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        if (clone_it) {
                const struct sk_buff *fclone = skb + 1;
 
-               /* If congestion control is doing timestamping, we must
-                * take such a timestamp before we potentially clone/copy.
-                */
-               if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
-                       __net_timestamp(skb);
+               skb_mstamp_get(&skb->skb_mstamp);
 
                if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
                             fclone->fclone == SKB_FCLONE_CLONE))
@@ -884,6 +893,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                        skb = skb_clone(skb, gfp_mask);
                if (unlikely(!skb))
                        return -ENOBUFS;
+               /* Our usage of tstamp should remain private */
+               skb->tstamp.tv64 = 0;
        }
 
        inet = inet_sk(sk);
@@ -1426,7 +1437,7 @@ static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
  *    With Minshall's modification: all sent small packets are ACKed.
  */
 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
-                           unsigned int mss_now, int nonagle)
+                           int nonagle)
 {
        return partial &&
                ((nonagle & TCP_NAGLE_CORK) ||
@@ -1458,7 +1469,7 @@ static unsigned int tcp_mss_split_point(const struct sock *sk,
         * to include this last segment in this skb.
         * Otherwise, we'll split the skb at last MSS boundary
         */
-       if (tcp_nagle_check(partial != 0, tp, mss_now, nonagle))
+       if (tcp_nagle_check(partial != 0, tp, nonagle))
                return needed - partial;
 
        return needed;
@@ -1521,7 +1532,7 @@ static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buf
        if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
                return true;
 
-       if (!tcp_nagle_check(skb->len < cur_mss, tp, cur_mss, nonagle))
+       if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
                return true;
 
        return false;
@@ -1975,7 +1986,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        u32 timeout, tlp_time_stamp, rto_time_stamp;
-       u32 rtt = tp->srtt >> 3;
+       u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
 
        if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
                return false;
@@ -1997,7 +2008,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        /* Schedule a loss probe in 2*RTT for SACK capable connections
         * in Open state, that are either limited by cwnd or application.
         */
-       if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
+       if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out ||
            !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
                return false;
 
@@ -2082,7 +2093,6 @@ rearm_timer:
        if (likely(!err))
                NET_INC_STATS_BH(sock_net(sk),
                                 LINUX_MIB_TCPLOSSPROBES);
-       return;
 }
 
 /* Push out any pending frames which were held back due to
@@ -2180,7 +2190,8 @@ u32 __tcp_select_window(struct sock *sk)
         */
        int mss = icsk->icsk_ack.rcv_mss;
        int free_space = tcp_space(sk);
-       int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
+       int allowed_space = tcp_full_space(sk);
+       int full_space = min_t(int, tp->window_clamp, allowed_space);
        int window;
 
        if (mss > full_space)
@@ -2193,7 +2204,19 @@ u32 __tcp_select_window(struct sock *sk)
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh,
                                               4U * tp->advmss);
 
-               if (free_space < mss)
+               /* free_space might become our new window, make sure we don't
+                * increase it due to wscale.
+                */
+               free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
+
+               /* if free space is less than mss estimate, or is below 1/16th
+                * of the maximum allowed, try to move to zero-window, else
+                * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
+                * new incoming data is dropped due to memory limits.
+                * With large window, mss test triggers way too late in order
+                * to announce zero window in time before rmem limit kicks in.
+                */
+               if (free_space < (allowed_space >> 4) || free_space < mss)
                        return 0;
        }
 
@@ -2431,7 +2454,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        if (err == 0) {
                /* Update global TCP statistics. */
                TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
-
+               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
                tp->total_retrans++;
 
 #if FASTRETRANS_DEBUG > 0
@@ -2717,7 +2741,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        int tcp_header_size;
        int mss;
 
-       skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
+       skb = sock_wmalloc(sk, MAX_TCP_HEADER, 1, GFP_ATOMIC);
        if (unlikely(!skb)) {
                dst_release(dst);
                return NULL;
@@ -2787,7 +2811,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        th->window = htons(min(req->rcv_wnd, 65535U));
        tcp_options_write((__be32 *)(th + 1), tp, &opts);
        th->doff = (tcp_header_size >> 2);
-       TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
+       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS);
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Okay, we have all we need - do the md5 hash if needed */
@@ -2959,9 +2983,15 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
        tcp_connect_queue_skb(sk, data);
        fo->copied = data->len;
 
+       /* syn_data is about to be sent, we need to take current time stamps
+        * for the packets that are in write queue : SYN packet and DATA
+        */
+       skb_mstamp_get(&syn->skb_mstamp);
+       data->skb_mstamp = syn->skb_mstamp;
+
        if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
                tp->syn_data = (fo->copied > 0);
-               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
                goto done;
        }
        syn_data = NULL;
@@ -3049,8 +3079,9 @@ void tcp_send_delayed_ack(struct sock *sk)
                 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
                 * directly.
                 */
-               if (tp->srtt) {
-                       int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
+               if (tp->srtt_us) {
+                       int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
+                                       TCP_DELACK_MIN);
 
                        if (rtt < max_ato)
                                max_ato = rtt;
index 1f2d37613c9e068058171abd53efa8709d63657e..3b66610d41562035c541304924fc27a4eb416a6e 100644 (file)
@@ -154,7 +154,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                        p->snd_wnd = tp->snd_wnd;
                        p->rcv_wnd = tp->rcv_wnd;
                        p->ssthresh = tcp_current_ssthresh(sk);
-                       p->srtt = tp->srtt >> 3;
+                       p->srtt = tp->srtt_us >> 3;
 
                        tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
                }
index 19ea6c2951f3b35a29a9ddc04f8578f7abe3f87c..0ac50836da4d42832f3aa35c9a4cebbf79f69981 100644 (file)
@@ -39,7 +39,6 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
 static struct tcp_congestion_ops tcp_scalable __read_mostly = {
        .ssthresh       = tcp_scalable_ssthresh,
        .cong_avoid     = tcp_scalable_cong_avoid,
-       .min_cwnd       = tcp_reno_min_cwnd,
 
        .owner          = THIS_MODULE,
        .name           = "scalable",
index 64f0354c84c7a8956230f2794b4dcb56331d98fc..286227abed10656a5efeb7026a9741ad1b6c6207 100644 (file)
@@ -165,6 +165,9 @@ static int tcp_write_timeout(struct sock *sk)
                        dst_negative_advice(sk);
                        if (tp->syn_fastopen || tp->syn_data)
                                tcp_fastopen_cache_set(sk, 0, NULL, true);
+                       if (tp->syn_data)
+                               NET_INC_STATS_BH(sock_net(sk),
+                                                LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                }
                retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
                syn_set = true;
index 06cae62bf20845fe67b6eff1c7919ec4f342569f..48539fff6357a4e778c537b99bb9a7fd49eb43b3 100644 (file)
@@ -306,11 +306,9 @@ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
 EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
 
 static struct tcp_congestion_ops tcp_vegas __read_mostly = {
-       .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_vegas_init,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_vegas_cong_avoid,
-       .min_cwnd       = tcp_reno_min_cwnd,
        .pkts_acked     = tcp_vegas_pkts_acked,
        .set_state      = tcp_vegas_state,
        .cwnd_event     = tcp_vegas_cwnd_event,
index 326475a94865c2fd0cbcf7bffe97c21d37159683..1b8e28fcd7e1cab3edd586db1b716742a4402fd7 100644 (file)
@@ -203,7 +203,6 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
 }
 
 static struct tcp_congestion_ops tcp_veno __read_mostly = {
-       .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_veno_init,
        .ssthresh       = tcp_veno_ssthresh,
        .cong_avoid     = tcp_veno_cong_avoid,
index 76a1e23259e1fa713bb447486e4e94848e419d41..b94a04ae2ed5672eca79a172c5c3467a677186a1 100644 (file)
@@ -276,7 +276,6 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
        .init           = tcp_westwood_init,
        .ssthresh       = tcp_reno_ssthresh,
        .cong_avoid     = tcp_reno_cong_avoid,
-       .min_cwnd       = tcp_westwood_bw_rttmin,
        .cwnd_event     = tcp_westwood_event,
        .get_info       = tcp_westwood_info,
        .pkts_acked     = tcp_westwood_pkts_acked,
index 1a8d271f994da4f5e5ad765b342c82e1efe198fa..5ede0e727945add71904a2d3c57d334e77e94baf 100644 (file)
@@ -227,11 +227,9 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) {
 }
 
 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
-       .flags          = TCP_CONG_RTT_STAMP,
        .init           = tcp_yeah_init,
        .ssthresh       = tcp_yeah_ssthresh,
        .cong_avoid     = tcp_yeah_cong_avoid,
-       .min_cwnd       = tcp_reno_min_cwnd,
        .set_state      = tcp_vegas_state,
        .cwnd_event     = tcp_vegas_cwnd_event,
        .get_info       = tcp_vegas_get_info,
index 77bd16fa9f34381a79e40f3b9c4b0adb4631f186..4468e1adc094a1f6f12eb20cf7c79a9b9187826d 100644 (file)
@@ -931,7 +931,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        sock_tx_timestamp(sk, &ipc.tx_flags);
 
        if (msg->msg_controllen) {
-               err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+               err = ip_cmsg_send(sock_net(sk), msg, &ipc,
+                                  sk->sk_family == AF_INET6);
                if (err)
                        return err;
                if (ipc.opt)
index 1f12c8b4586497931831515e06a8005140863ff5..aac6197b7a7132f31af9a80d960d94d4a9f92290 100644 (file)
@@ -37,15 +37,6 @@ drop:
        return NET_RX_DROP;
 }
 
-int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
-                   int encap_type)
-{
-       XFRM_SPI_SKB_CB(skb)->family = AF_INET;
-       XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
-       return xfrm_input(skb, nexthdr, spi, encap_type);
-}
-EXPORT_SYMBOL(xfrm4_rcv_encap);
-
 int xfrm4_transport_finish(struct sk_buff *skb, int async)
 {
        struct iphdr *iph = ip_hdr(skb);
index 31b18152528fe4dbf9e500ae0c9a2a1a5a3a2adf..05f2b484954feda957d04ff2f0300eedf9c97263 100644 (file)
 #include <net/ip.h>
 #include <net/xfrm.h>
 
-/* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
-static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
-
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
-{
-       struct xfrm_tunnel_notifier __rcu **pprev;
-       struct xfrm_tunnel_notifier *t;
-       int ret = -EEXIST;
-       int priority = handler->priority;
-
-       mutex_lock(&xfrm4_mode_tunnel_input_mutex);
-
-       for (pprev = &rcv_notify_handlers;
-            (t = rcu_dereference_protected(*pprev,
-            lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
-            pprev = &t->next) {
-               if (t->priority > priority)
-                       break;
-               if (t->priority == priority)
-                       goto err;
-
-       }
-
-       handler->next = *pprev;
-       rcu_assign_pointer(*pprev, handler);
-
-       ret = 0;
-
-err:
-       mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
-
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
-{
-       struct xfrm_tunnel_notifier __rcu **pprev;
-       struct xfrm_tunnel_notifier *t;
-       int ret = -ENOENT;
-
-       mutex_lock(&xfrm4_mode_tunnel_input_mutex);
-       for (pprev = &rcv_notify_handlers;
-            (t = rcu_dereference_protected(*pprev,
-            lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
-            pprev = &t->next) {
-               if (t == handler) {
-                       *pprev = handler->next;
-                       ret = 0;
-                       break;
-               }
-       }
-       mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
-       synchronize_net();
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_deregister);
-
 static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
 {
        struct iphdr *inner_iph = ipip_hdr(skb);
@@ -127,14 +68,8 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
        return 0;
 }
 
-#define for_each_input_rcu(head, handler)      \
-       for (handler = rcu_dereference(head);   \
-            handler != NULL;                   \
-            handler = rcu_dereference(handler->next))
-
 static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct xfrm_tunnel_notifier *handler;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -143,9 +78,6 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
                goto out;
 
-       for_each_input_rcu(rcv_notify_handlers, handler)
-               handler->handler(skb);
-
        err = skb_unclone(skb, GFP_ATOMIC);
        if (err)
                goto out;
index e1a63930a96789b7df67a8b9914cd3bb69fb1cd9..6156f68a1e90b53f7504a1e6f729b60c29d52b3a 100644 (file)
@@ -325,6 +325,7 @@ void __init xfrm4_init(void)
 
        xfrm4_state_init();
        xfrm4_policy_init();
+       xfrm4_protocol_init();
 #ifdef CONFIG_SYSCTL
        register_pernet_subsys(&xfrm4_net_ops);
 #endif
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
new file mode 100644 (file)
index 0000000..7f7b243
--- /dev/null
@@ -0,0 +1,286 @@
+/* xfrm4_protocol.c - Generic xfrm protocol multiplexer.
+ *
+ * Copyright (C) 2013 secunet Security Networks AG
+ *
+ * Author:
+ * Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * Based on:
+ * net/ipv4/tunnel4.c
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+static struct xfrm4_protocol __rcu *esp4_handlers __read_mostly;
+static struct xfrm4_protocol __rcu *ah4_handlers __read_mostly;
+static struct xfrm4_protocol __rcu *ipcomp4_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm4_protocol_mutex);
+
+static inline struct xfrm4_protocol __rcu **proto_handlers(u8 protocol)
+{
+       switch (protocol) {
+       case IPPROTO_ESP:
+               return &esp4_handlers;
+       case IPPROTO_AH:
+               return &ah4_handlers;
+       case IPPROTO_COMP:
+               return &ipcomp4_handlers;
+       }
+
+       return NULL;
+}
+
+#define for_each_protocol_rcu(head, handler)           \
+       for (handler = rcu_dereference(head);           \
+            handler != NULL;                           \
+            handler = rcu_dereference(handler->next))  \
+
+int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
+{
+       int ret;
+       struct xfrm4_protocol *handler;
+
+       for_each_protocol_rcu(*proto_handlers(protocol), handler)
+               if ((ret = handler->cb_handler(skb, err)) <= 0)
+                       return ret;
+
+       return 0;
+}
+EXPORT_SYMBOL(xfrm4_rcv_cb);
+
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+                   int encap_type)
+{
+       int ret;
+       struct xfrm4_protocol *handler;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+       XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+       XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+       for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+               if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
+                       return ret;
+
+       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+       kfree_skb(skb);
+       return 0;
+}
+EXPORT_SYMBOL(xfrm4_rcv_encap);
+
+static int xfrm4_esp_rcv(struct sk_buff *skb)
+{
+       int ret;
+       struct xfrm4_protocol *handler;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+
+       for_each_protocol_rcu(esp4_handlers, handler)
+               if ((ret = handler->handler(skb)) != -EINVAL)
+                       return ret;
+
+       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static void xfrm4_esp_err(struct sk_buff *skb, u32 info)
+{
+       struct xfrm4_protocol *handler;
+
+       for_each_protocol_rcu(esp4_handlers, handler)
+               if (!handler->err_handler(skb, info))
+                       break;
+}
+
+static int xfrm4_ah_rcv(struct sk_buff *skb)
+{
+       int ret;
+       struct xfrm4_protocol *handler;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+
+       for_each_protocol_rcu(ah4_handlers, handler)
+               if ((ret = handler->handler(skb)) != -EINVAL)
+                       return ret;;
+
+       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static void xfrm4_ah_err(struct sk_buff *skb, u32 info)
+{
+       struct xfrm4_protocol *handler;
+
+       for_each_protocol_rcu(ah4_handlers, handler)
+               if (!handler->err_handler(skb, info))
+                       break;
+}
+
+static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
+{
+       int ret;
+       struct xfrm4_protocol *handler;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+
+       for_each_protocol_rcu(ipcomp4_handlers, handler)
+               if ((ret = handler->handler(skb)) != -EINVAL)
+                       return ret;
+
+       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static void xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
+{
+       struct xfrm4_protocol *handler;
+
+       for_each_protocol_rcu(ipcomp4_handlers, handler)
+               if (!handler->err_handler(skb, info))
+                       break;
+}
+
+static const struct net_protocol esp4_protocol = {
+       .handler        =       xfrm4_esp_rcv,
+       .err_handler    =       xfrm4_esp_err,
+       .no_policy      =       1,
+       .netns_ok       =       1,
+};
+
+static const struct net_protocol ah4_protocol = {
+       .handler        =       xfrm4_ah_rcv,
+       .err_handler    =       xfrm4_ah_err,
+       .no_policy      =       1,
+       .netns_ok       =       1,
+};
+
+static const struct net_protocol ipcomp4_protocol = {
+       .handler        =       xfrm4_ipcomp_rcv,
+       .err_handler    =       xfrm4_ipcomp_err,
+       .no_policy      =       1,
+       .netns_ok       =       1,
+};
+
+static struct xfrm_input_afinfo xfrm4_input_afinfo = {
+       .family         =       AF_INET,
+       .owner          =       THIS_MODULE,
+       .callback       =       xfrm4_rcv_cb,
+};
+
+static inline const struct net_protocol *netproto(unsigned char protocol)
+{
+       switch (protocol) {
+       case IPPROTO_ESP:
+               return &esp4_protocol;
+       case IPPROTO_AH:
+               return &ah4_protocol;
+       case IPPROTO_COMP:
+               return &ipcomp4_protocol;
+       }
+
+       return NULL;
+}
+
+int xfrm4_protocol_register(struct xfrm4_protocol *handler,
+                           unsigned char protocol)
+{
+       struct xfrm4_protocol __rcu **pprev;
+       struct xfrm4_protocol *t;
+       bool add_netproto = false;
+       int ret = -EEXIST;
+       int priority = handler->priority;
+
+       mutex_lock(&xfrm4_protocol_mutex);
+
+       if (!rcu_dereference_protected(*proto_handlers(protocol),
+                                      lockdep_is_held(&xfrm4_protocol_mutex)))
+               add_netproto = true;
+
+       for (pprev = proto_handlers(protocol);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&xfrm4_protocol_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority < priority)
+                       break;
+               if (t->priority == priority)
+                       goto err;
+       }
+
+       handler->next = *pprev;
+       rcu_assign_pointer(*pprev, handler);
+
+       ret = 0;
+
+err:
+       mutex_unlock(&xfrm4_protocol_mutex);
+
+       if (add_netproto) {
+               if (inet_add_protocol(netproto(protocol), protocol)) {
+                       pr_err("%s: can't add protocol\n", __func__);
+                       ret = -EAGAIN;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(xfrm4_protocol_register);
+
+int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
+                             unsigned char protocol)
+{
+       struct xfrm4_protocol __rcu **pprev;
+       struct xfrm4_protocol *t;
+       int ret = -ENOENT;
+
+       mutex_lock(&xfrm4_protocol_mutex);
+
+       for (pprev = proto_handlers(protocol);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&xfrm4_protocol_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
+                       *pprev = handler->next;
+                       ret = 0;
+                       break;
+               }
+       }
+
+       if (!rcu_dereference_protected(*proto_handlers(protocol),
+                                      lockdep_is_held(&xfrm4_protocol_mutex))) {
+               if (inet_del_protocol(netproto(protocol), protocol) < 0) {
+                       pr_err("%s: can't remove protocol\n", __func__);
+                       ret = -EAGAIN;
+               }
+       }
+
+       mutex_unlock(&xfrm4_protocol_mutex);
+
+       synchronize_net();
+
+       return ret;
+}
+EXPORT_SYMBOL(xfrm4_protocol_deregister);
+
+void __init xfrm4_protocol_init(void)
+{
+       xfrm_input_register_afinfo(&xfrm4_input_afinfo);
+}
+EXPORT_SYMBOL(xfrm4_protocol_init);
index 17bb830872db21e07080ed9f6a0ef93cbccee9b7..2fe68364bb20610c39f20a21b4d32498ff5741d1 100644 (file)
@@ -16,7 +16,7 @@ ipv6-$(CONFIG_SYSCTL) = sysctl_net_ipv6.o
 ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
 
 ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
-       xfrm6_output.o
+       xfrm6_output.o xfrm6_protocol.o
 ipv6-$(CONFIG_NETFILTER) += netfilter.o
 ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o
 ipv6-$(CONFIG_PROC_FS) += proc.o
index b30ad3741b46732ee2c38bc851fa759ac02ff698..731e1e1722d9b4322f3907e57e3a1fd1994230b5 100644 (file)
@@ -6,7 +6,7 @@
  */
 /*
  * Author:
- *     YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org>
+ *     YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org>
  */
 
 #include <linux/kernel.h>
 #if 0
 #define ADDRLABEL(x...) printk(x)
 #else
-#define ADDRLABEL(x...) do { ; } while(0)
+#define ADDRLABEL(x...) do { ; } while (0)
 #endif
 
 /*
  * Policy Table
  */
-struct ip6addrlbl_entry
-{
+struct ip6addrlbl_entry {
 #ifdef CONFIG_NET_NS
        struct net *lbl_net;
 #endif
@@ -88,39 +87,39 @@ static const __net_initconst struct ip6addrlbl_init_table
        {       /* ::/0 */
                .prefix = &in6addr_any,
                .label = 1,
-       },    /* fc00::/7 */
-               .prefix = &(struct in6_addr){{{ 0xfc }}},
+       }, {    /* fc00::/7 */
+               .prefix = &(struct in6_addr){ { { 0xfc } } } ,
                .prefixlen = 7,
                .label = 5,
-       },    /* fec0::/10 */
-               .prefix = &(struct in6_addr){{{ 0xfe, 0xc0 }}},
+       }, {    /* fec0::/10 */
+               .prefix = &(struct in6_addr){ { { 0xfe, 0xc0 } } },
                .prefixlen = 10,
                .label = 11,
-       },    /* 2002::/16 */
-               .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}},
+       }, {    /* 2002::/16 */
+               .prefix = &(struct in6_addr){ { { 0x20, 0x02 } } },
                .prefixlen = 16,
                .label = 2,
-       },    /* 3ffe::/16 */
-               .prefix = &(struct in6_addr){{{ 0x3f, 0xfe }}},
+       }, {    /* 3ffe::/16 */
+               .prefix = &(struct in6_addr){ { { 0x3f, 0xfe } } },
                .prefixlen = 16,
                .label = 12,
-       },    /* 2001::/32 */
-               .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
+       }, {    /* 2001::/32 */
+               .prefix = &(struct in6_addr){ { { 0x20, 0x01 } } },
                .prefixlen = 32,
                .label = 6,
-       },    /* 2001:10::/28 */
-               .prefix = &(struct in6_addr){{{ 0x20, 0x01, 0x00, 0x10 }}},
+       }, {    /* 2001:10::/28 */
+               .prefix = &(struct in6_addr){ { { 0x20, 0x01, 0x00, 0x10 } } },
                .prefixlen = 28,
                .label = 7,
-       },    /* ::ffff:0:0 */
-               .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}},
+       }, {    /* ::ffff:0:0 */
+               .prefix = &(struct in6_addr){ { { [10] = 0xff, [11] = 0xff } } },
                .prefixlen = 96,
                .label = 4,
-       },    /* ::/96 */
+       }, {    /* ::/96 */
                .prefix = &in6addr_any,
                .prefixlen = 96,
                .label = 3,
-       },    /* ::1/128 */
+       }, {    /* ::1/128 */
                .prefix = &in6addr_loopback,
                .prefixlen = 128,
                .label = 0,
@@ -441,7 +440,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (label == IPV6_ADDR_LABEL_DEFAULT)
                return -EINVAL;
 
-       switch(nlh->nlmsg_type) {
+       switch (nlh->nlmsg_type) {
        case RTM_NEWADDRLABEL:
                if (ifal->ifal_index &&
                    !__dev_get_by_index(net, ifal->ifal_index))
@@ -505,12 +504,13 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
        hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
                if (idx >= s_idx &&
                    net_eq(ip6addrlbl_net(p), net)) {
-                       if ((err = ip6addrlbl_fill(skb, p,
-                                                  ip6addrlbl_table.seq,
-                                                  NETLINK_CB(cb->skb).portid,
-                                                  cb->nlh->nlmsg_seq,
-                                                  RTM_NEWADDRLABEL,
-                                                  NLM_F_MULTI)) <= 0)
+                       err = ip6addrlbl_fill(skb, p,
+                                             ip6addrlbl_table.seq,
+                                             NETLINK_CB(cb->skb).portid,
+                                             cb->nlh->nlmsg_seq,
+                                             RTM_NEWADDRLABEL,
+                                             NLM_F_MULTI);
+                       if (err <= 0)
                                break;
                }
                idx++;
@@ -527,7 +527,7 @@ static inline int ip6addrlbl_msgsize(void)
                + nla_total_size(4);    /* IFAL_LABEL */
 }
 
-static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdrnlh)
+static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(in_skb->sk);
        struct ifaddrlblmsg *ifal;
@@ -568,7 +568,8 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh)
                goto out;
        }
 
-       if (!(skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL))) {
+       skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL);
+       if (!skb) {
                ip6addrlbl_put(p);
                return -ENOBUFS;
        }
index 81e496a2e0083c42fe94729a486647aa23c8aed0..72a4930bdc0a0e0d43e1a6ad8670e6a1df1608f4 100644 (file)
@@ -346,6 +346,10 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
        struct ip_auth_hdr *ah;
        struct ah_data *ahp;
        struct tmp_ext *iph_ext;
+       int seqhi_len = 0;
+       __be32 *seqhi;
+       int sglists = 0;
+       struct scatterlist *seqhisg;
 
        ahp = x->data;
        ahash = ahp->ahash;
@@ -359,15 +363,22 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
        if (extlen)
                extlen += sizeof(*iph_ext);
 
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists = 1;
+               seqhi_len = sizeof(*seqhi);
+       }
        err = -ENOMEM;
-       iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen);
+       iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
+                               extlen + seqhi_len);
        if (!iph_base)
                goto out;
 
        iph_ext = ah_tmp_ext(iph_base);
-       icv = ah_tmp_icv(ahash, iph_ext, extlen);
+       seqhi = (__be32 *)((char *)iph_ext + extlen);
+       icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
        req = ah_tmp_req(ahash, icv);
        sg = ah_req_sg(ahash, req);
+       seqhisg = sg + nfrags;
 
        ah = ip_auth_hdr(skb);
        memset(ah->auth_data, 0, ahp->icv_trunc_len);
@@ -411,10 +422,15 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
        ah->spi = x->id.spi;
        ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 
-       sg_init_table(sg, nfrags);
-       skb_to_sgvec(skb, sg, 0, skb->len);
+       sg_init_table(sg, nfrags + sglists);
+       skb_to_sgvec_nomark(skb, sg, 0, skb->len);
 
-       ahash_request_set_crypt(req, sg, icv, skb->len);
+       if (x->props.flags & XFRM_STATE_ESN) {
+               /* Attach seqhi sg right after packet payload */
+               *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+               sg_set_buf(seqhisg, seqhi, seqhi_len);
+       }
+       ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
        ahash_request_set_callback(req, 0, ah6_output_done, skb);
 
        AH_SKB_CB(skb)->tmp = iph_base;
@@ -514,6 +530,10 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
        int nexthdr;
        int nfrags;
        int err = -ENOMEM;
+       int seqhi_len = 0;
+       __be32 *seqhi;
+       int sglists = 0;
+       struct scatterlist *seqhisg;
 
        if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
                goto out;
@@ -550,14 +570,22 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
 
        skb_push(skb, hdr_len);
 
-       work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
+       if (x->props.flags & XFRM_STATE_ESN) {
+               sglists = 1;
+               seqhi_len = sizeof(*seqhi);
+       }
+
+       work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
+                               ahp->icv_trunc_len + seqhi_len);
        if (!work_iph)
                goto out;
 
-       auth_data = ah_tmp_auth(work_iph, hdr_len);
-       icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
+       auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
+       seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
+       icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
        req = ah_tmp_req(ahash, icv);
        sg = ah_req_sg(ahash, req);
+       seqhisg = sg + nfrags;
 
        memcpy(work_iph, ip6h, hdr_len);
        memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
@@ -572,10 +600,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
        ip6h->flow_lbl[2] = 0;
        ip6h->hop_limit   = 0;
 
-       sg_init_table(sg, nfrags);
-       skb_to_sgvec(skb, sg, 0, skb->len);
+       sg_init_table(sg, nfrags + sglists);
+       skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+
+       if (x->props.flags & XFRM_STATE_ESN) {
+               /* Attach seqhi sg right after packet payload */
+               *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+               sg_set_buf(seqhisg, seqhi, seqhi_len);
+       }
 
-       ahash_request_set_crypt(req, sg, icv, skb->len);
+       ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
        ahash_request_set_callback(req, 0, ah6_input_done, skb);
 
        AH_SKB_CB(skb)->tmp = work_iph;
@@ -609,8 +643,8 @@ out:
        return err;
 }
 
-static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-                   u8 type, u8 code, int offset, __be32 info)
+static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                  u8 type, u8 code, int offset, __be32 info)
 {
        struct net *net = dev_net(skb->dev);
        struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
@@ -619,17 +653,19 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
-               return;
+               return 0;
 
        x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
        if (!x)
-               return;
+               return 0;
 
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
        else
                ip6_update_pmtu(skb, net, info, 0, 0);
        xfrm_state_put(x);
+
+       return 0;
 }
 
 static int ah6_init_state(struct xfrm_state *x)
@@ -714,6 +750,11 @@ static void ah6_destroy(struct xfrm_state *x)
        kfree(ahp);
 }
 
+static int ah6_rcv_cb(struct sk_buff *skb, int err)
+{
+       return 0;
+}
+
 static const struct xfrm_type ah6_type =
 {
        .description    = "AH6",
@@ -727,10 +768,11 @@ static const struct xfrm_type ah6_type =
        .hdr_offset     = xfrm6_find_1stfragopt,
 };
 
-static const struct inet6_protocol ah6_protocol = {
+static struct xfrm6_protocol ah6_protocol = {
        .handler        =       xfrm6_rcv,
+       .cb_handler     =       ah6_rcv_cb,
        .err_handler    =       ah6_err,
-       .flags          =       INET6_PROTO_NOPOLICY,
+       .priority       =       0,
 };
 
 static int __init ah6_init(void)
@@ -740,7 +782,7 @@ static int __init ah6_init(void)
                return -EAGAIN;
        }
 
-       if (inet6_add_protocol(&ah6_protocol, IPPROTO_AH) < 0) {
+       if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                xfrm_unregister_type(&ah6_type, AF_INET6);
                return -EAGAIN;
@@ -751,7 +793,7 @@ static int __init ah6_init(void)
 
 static void __exit ah6_fini(void)
 {
-       if (inet6_del_protocol(&ah6_protocol, IPPROTO_AH) < 0)
+       if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
                pr_info("%s: can't remove protocol\n", __func__);
 
        if (xfrm_unregister_type(&ah6_type, AF_INET6) < 0)
index 6eef8a7e35f2c54514e6bbb8871d18ff18ae691e..d15da1377149d3a0fdf846a7a07364e6d1251845 100644 (file)
@@ -421,8 +421,8 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
                 net_adj) & ~(blksize - 1)) + net_adj - 2;
 }
 
-static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-                    u8 type, u8 code, int offset, __be32 info)
+static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                   u8 type, u8 code, int offset, __be32 info)
 {
        struct net *net = dev_net(skb->dev);
        const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
@@ -431,18 +431,20 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
-               return;
+               return 0;
 
        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
                              esph->spi, IPPROTO_ESP, AF_INET6);
        if (!x)
-               return;
+               return 0;
 
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
        else
                ip6_update_pmtu(skb, net, info, 0, 0);
        xfrm_state_put(x);
+
+       return 0;
 }
 
 static void esp6_destroy(struct xfrm_state *x)
@@ -614,6 +616,11 @@ error:
        return err;
 }
 
+static int esp6_rcv_cb(struct sk_buff *skb, int err)
+{
+       return 0;
+}
+
 static const struct xfrm_type esp6_type =
 {
        .description    = "ESP6",
@@ -628,10 +635,11 @@ static const struct xfrm_type esp6_type =
        .hdr_offset     = xfrm6_find_1stfragopt,
 };
 
-static const struct inet6_protocol esp6_protocol = {
-       .handler        =       xfrm6_rcv,
+static struct xfrm6_protocol esp6_protocol = {
+       .handler        =       xfrm6_rcv,
+       .cb_handler     =       esp6_rcv_cb,
        .err_handler    =       esp6_err,
-       .flags          =       INET6_PROTO_NOPOLICY,
+       .priority       =       0,
 };
 
 static int __init esp6_init(void)
@@ -640,7 +648,7 @@ static int __init esp6_init(void)
                pr_info("%s: can't add xfrm type\n", __func__);
                return -EAGAIN;
        }
-       if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
+       if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                xfrm_unregister_type(&esp6_type, AF_INET6);
                return -EAGAIN;
@@ -651,7 +659,7 @@ static int __init esp6_init(void)
 
 static void __exit esp6_fini(void)
 {
-       if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
+       if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
                pr_info("%s: can't remove protocol\n", __func__);
        if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
                pr_info("%s: can't remove xfrm type\n", __func__);
index f2610e15766027ce3a7408862d03f4c427c555ea..7b326529e6a2cba57695697cfff436357c347b2c 100644 (file)
@@ -520,7 +520,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
                              np->tclass, NULL, &fl6, (struct rt6_info *)dst,
                              MSG_DONTWAIT, np->dontfrag);
        if (err) {
-               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
+               ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
                ip6_flush_pending_frames(sk);
        } else {
                err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
index 72d198b8e4d2966d81fc5c183b12333e49e56d3f..ee7a97f510cbd9f94fa24eafa43ddba201c75f3e 100644 (file)
@@ -79,7 +79,9 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
                /* RFC 2460 section 8.1 says that we SHOULD log
                   this error. Well, it is reasonable.
                 */
-               LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
+               LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
+                              &ipv6_hdr(skb)->saddr, ntohs(uh->source),
+                              &ipv6_hdr(skb)->daddr, ntohs(uh->dest));
                return 1;
        }
        if (skb->ip_summed == CHECKSUM_COMPLETE &&
index 075602fc6b6a915f15a7039cf1daaa640e02ce82..34e0ded5c14b028ebbb1bb03c1b30e8f25f98811 100644 (file)
@@ -9,14 +9,12 @@
  *      modify it under the terms of the GNU General Public License
  *      as published by the Free Software Foundation; either version
  *      2 of the License, or (at your option) any later version.
- */
-
-/*
- *     Changes:
- *     Yuji SEKIYA @USAGI:     Support default route on router node;
- *                             remove ip6_null_entry from the top of
- *                             routing table.
- *     Ville Nuorvala:         Fixed routing subtrees.
+ *
+ *     Changes:
+ *     Yuji SEKIYA @USAGI:     Support default route on router node;
+ *                             remove ip6_null_entry from the top of
+ *                             routing table.
+ *     Ville Nuorvala:         Fixed routing subtrees.
  */
 
 #define pr_fmt(fmt) "IPv6: " fmt
 #define RT6_TRACE(x...) do { ; } while (0)
 #endif
 
-static struct kmem_cache * fib6_node_kmem __read_mostly;
+static struct kmem_cache *fib6_node_kmem __read_mostly;
 
-enum fib_walk_state_t
-{
+enum fib_walk_state_t {
 #ifdef CONFIG_IPV6_SUBTREES
        FWS_S,
 #endif
@@ -59,8 +56,7 @@ enum fib_walk_state_t
        FWS_U
 };
 
-struct fib6_cleaner_t
-{
+struct fib6_cleaner_t {
        struct fib6_walker_t w;
        struct net *net;
        int (*func)(struct rt6_info *, void *arg);
@@ -138,7 +134,7 @@ static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
        const __be32 *addr = token;
        /*
         * Here,
-        *      1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
+        *      1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
         * is optimized version of
         *      htonl(1 << ((~fn_bit)&0x1F))
         * See include/asm-generic/bitops/le.h.
@@ -147,7 +143,7 @@ static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
               addr[fn_bit >> 5];
 }
 
-static __inline__ struct fib6_node * node_alloc(void)
+static __inline__ struct fib6_node *node_alloc(void)
 {
        struct fib6_node *fn;
 
@@ -156,7 +152,7 @@ static __inline__ struct fib6_node * node_alloc(void)
        return fn;
 }
 
-static __inline__ void node_free(struct fib6_node * fn)
+static __inline__ void node_free(struct fib6_node *fn)
 {
        kmem_cache_free(fib6_node_kmem, fn);
 }
@@ -292,7 +288,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
 
 static void fib6_dump_end(struct netlink_callback *cb)
 {
-       struct fib6_walker_t *w = (void*)cb->args[2];
+       struct fib6_walker_t *w = (void *)cb->args[2];
 
        if (w) {
                if (cb->args[4]) {
@@ -302,7 +298,7 @@ static void fib6_dump_end(struct netlink_callback *cb)
                cb->args[2] = 0;
                kfree(w);
        }
-       cb->done = (void*)cb->args[3];
+       cb->done = (void *)cb->args[3];
        cb->args[1] = 3;
 }
 
@@ -485,7 +481,7 @@ static struct fib6_node *fib6_add_1(struct fib6_node *root,
                fn->fn_sernum = sernum;
                dir = addr_bit_set(addr, fn->fn_bit);
                pn = fn;
-               fn = dir ? fn->right: fn->left;
+               fn = dir ? fn->right : fn->left;
        } while (fn);
 
        if (!allow_create) {
@@ -638,12 +634,41 @@ static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
               RTF_GATEWAY;
 }
 
+static int fib6_commit_metrics(struct dst_entry *dst,
+                              struct nlattr *mx, int mx_len)
+{
+       struct nlattr *nla;
+       int remaining;
+       u32 *mp;
+
+       if (dst->flags & DST_HOST) {
+               mp = dst_metrics_write_ptr(dst);
+       } else {
+               mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+               if (!mp)
+                       return -ENOMEM;
+               dst_init_metrics(dst, mp, 0);
+       }
+
+       nla_for_each_attr(nla, mx, mx_len, remaining) {
+               int type = nla_type(nla);
+
+               if (type) {
+                       if (type > RTAX_MAX)
+                               return -EINVAL;
+
+                       mp[type - 1] = nla_get_u32(nla);
+               }
+       }
+       return 0;
+}
+
 /*
  *     Insert routing information in a node.
  */
 
 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
-                           struct nl_info *info)
+                           struct nl_info *info, struct nlattr *mx, int mx_len)
 {
        struct rt6_info *iter = NULL;
        struct rt6_info **ins;
@@ -653,6 +678,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                   (info->nlh->nlmsg_flags & NLM_F_CREATE));
        int found = 0;
        bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
+       int err;
 
        ins = &fn->leaf;
 
@@ -751,6 +777,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                        pr_warn("NLM_F_CREATE should be set when creating new route\n");
 
 add:
+               if (mx) {
+                       err = fib6_commit_metrics(&rt->dst, mx, mx_len);
+                       if (err)
+                               return err;
+               }
                rt->dst.rt6_next = iter;
                *ins = rt;
                rt->rt6i_node = fn;
@@ -770,6 +801,11 @@ add:
                        pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
                        return -ENOENT;
                }
+               if (mx) {
+                       err = fib6_commit_metrics(&rt->dst, mx, mx_len);
+                       if (err)
+                               return err;
+               }
                *ins = rt;
                rt->rt6i_node = fn;
                rt->dst.rt6_next = iter->dst.rt6_next;
@@ -806,7 +842,8 @@ void fib6_force_start_gc(struct net *net)
  *     with source addr info in sub-trees
  */
 
-int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
+int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
+            struct nlattr *mx, int mx_len)
 {
        struct fib6_node *fn, *pn = NULL;
        int err = -ENOMEM;
@@ -900,7 +937,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
        }
 #endif
 
-       err = fib6_add_rt2node(fn, rt, info);
+       err = fib6_add_rt2node(fn, rt, info, mx, mx_len);
        if (!err) {
                fib6_start_gc(info->nl_net, rt);
                if (!(rt->rt6i_flags & RTF_CACHE))
@@ -955,8 +992,8 @@ struct lookup_args {
        const struct in6_addr   *addr;          /* search key                   */
 };
 
-static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
-                                       struct lookup_args *args)
+static struct fib6_node *fib6_lookup_1(struct fib6_node *root,
+                                      struct lookup_args *args)
 {
        struct fib6_node *fn;
        __be32 dir;
@@ -1018,8 +1055,8 @@ backtrack:
        return NULL;
 }
 
-struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
-                              const struct in6_addr *saddr)
+struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
+                             const struct in6_addr *saddr)
 {
        struct fib6_node *fn;
        struct lookup_args args[] = {
@@ -1051,9 +1088,9 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *da
  */
 
 
-static struct fib6_node * fib6_locate_1(struct fib6_node *root,
-                                       const struct in6_addr *addr,
-                                       int plen, int offset)
+static struct fib6_node *fib6_locate_1(struct fib6_node *root,
+                                      const struct in6_addr *addr,
+                                      int plen, int offset)
 {
        struct fib6_node *fn;
 
@@ -1081,9 +1118,9 @@ static struct fib6_node * fib6_locate_1(struct fib6_node *root,
        return NULL;
 }
 
-struct fib6_node * fib6_locate(struct fib6_node *root,
-                              const struct in6_addr *daddr, int dst_len,
-                              const struct in6_addr *saddr, int src_len)
+struct fib6_node *fib6_locate(struct fib6_node *root,
+                             const struct in6_addr *daddr, int dst_len,
+                             const struct in6_addr *saddr, int src_len)
 {
        struct fib6_node *fn;
 
@@ -1151,8 +1188,10 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
 
                children = 0;
                child = NULL;
-               if (fn->right) child = fn->right, children |= 1;
-               if (fn->left) child = fn->left, children |= 2;
+               if (fn->right)
+                       child = fn->right, children |= 1;
+               if (fn->left)
+                       child = fn->left, children |= 2;
 
                if (children == 3 || FIB6_SUBTREE(fn)
 #ifdef CONFIG_IPV6_SUBTREES
@@ -1180,8 +1219,10 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
                } else {
                        WARN_ON(fn->fn_flags & RTN_ROOT);
 #endif
-                       if (pn->right == fn) pn->right = child;
-                       else if (pn->left == fn) pn->left = child;
+                       if (pn->right == fn)
+                               pn->right = child;
+                       else if (pn->left == fn)
+                               pn->left = child;
 #if RT6_DEBUG >= 2
                        else
                                WARN_ON(1);
@@ -1213,10 +1254,10 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
                                        w->node = child;
                                        if (children&2) {
                                                RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
-                                               w->state = w->state>=FWS_R ? FWS_U : FWS_INIT;
+                                               w->state = w->state >= FWS_R ? FWS_U : FWS_INIT;
                                        } else {
                                                RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
-                                               w->state = w->state>=FWS_C ? FWS_U : FWS_INIT;
+                                               w->state = w->state >= FWS_C ? FWS_U : FWS_INIT;
                                        }
                                }
                        }
@@ -1314,7 +1355,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
        struct rt6_info **rtp;
 
 #if RT6_DEBUG >= 2
-       if (rt->dst.obsolete>0) {
+       if (rt->dst.obsolete > 0) {
                WARN_ON(fn != NULL);
                return -ENOENT;
        }
@@ -1707,7 +1748,7 @@ out_rt6_stats:
        kfree(net->ipv6.rt6_stats);
 out_timer:
        return -ENOMEM;
- }
+}
 
 static void fib6_net_exit(struct net *net)
 {
index dfa41bb4e0dc0a97a9de933716ef9dbbabe5475d..0961b5ef866d04803cf91243aec32bb9e2ea8cf7 100644 (file)
@@ -15,9 +15,7 @@
 #include <linux/socket.h>
 #include <linux/net.h>
 #include <linux/netdevice.h>
-#include <linux/if_arp.h>
 #include <linux/in6.h>
-#include <linux/route.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <net/sock.h>
 
 #include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/protocol.h>
-#include <net/ip6_route.h>
 #include <net/addrconf.h>
 #include <net/rawv6.h>
-#include <net/icmp.h>
 #include <net/transp_v6.h>
 
 #include <asm/uaccess.h>
index f3ffb43f59c08634187f939db338413dd9a5514f..c98338b81d30779f9410ea413eb359d72a7dd76e 100644 (file)
@@ -1454,7 +1454,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 static int ip6gre_tap_init(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
-       int i;
 
        tunnel = netdev_priv(dev);
 
@@ -1464,16 +1463,10 @@ static int ip6gre_tap_init(struct net_device *dev)
 
        ip6gre_tnl_link_config(tunnel, 1);
 
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *ip6gre_tap_stats;
-               ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&ip6gre_tap_stats->syncp);
-       }
-
        return 0;
 }
 
index 64d6073731d368e54073f9ca97ac6ad0cecdd415..3284d61577c0f06e9231585fc7a3e93117e40b45 100644 (file)
@@ -367,6 +367,9 @@ int ip6_forward(struct sk_buff *skb)
        if (net->ipv6.devconf_all->forwarding == 0)
                goto error;
 
+       if (skb->pkt_type != PACKET_HOST)
+               goto drop;
+
        if (skb_warn_if_lro(skb))
                goto drop;
 
@@ -376,9 +379,6 @@ int ip6_forward(struct sk_buff *skb)
                goto drop;
        }
 
-       if (skb->pkt_type != PACKET_HOST)
-               goto drop;
-
        skb_forward_csum(skb);
 
        /*
@@ -1230,8 +1230,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
                              sizeof(struct frag_hdr) : 0) +
                             rt->rt6i_nfheader_len;
 
-               maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
-                                mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+               if (ip6_sk_local_df(sk))
+                       maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+               else
+                       maxnonfragsize = mtu;
 
                /* dontfrag active */
                if ((cork->length + length > mtu - headersize) && dontfrag &&
@@ -1538,8 +1540,7 @@ int ip6_push_pending_frames(struct sock *sk)
        }
 
        /* Allow local fragmentation. */
-       if (np->pmtudisc < IPV6_PMTUDISC_DO)
-               skb->local_df = 1;
+       skb->local_df = ip6_sk_local_df(sk);
 
        *final_dst = fl6->daddr;
        __skb_pull(skb, skb_network_header_len(skb));
@@ -1566,8 +1567,8 @@ int ip6_push_pending_frames(struct sock *sk)
        if (proto == IPPROTO_ICMPV6) {
                struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
-               ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
-               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
+               ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
+               ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
        }
 
        err = ip6_local_out(skb);
index 5db8d310f9c07adc656dff9e3e909f611dbc1b1b..e1df691d78befcb2bb041244ed3370b44667b40d 100644 (file)
@@ -108,12 +108,12 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
                                                   per_cpu_ptr(dev->tstats, i);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&tstats->syncp);
+                       start = u64_stats_fetch_begin_irq(&tstats->syncp);
                        tmp.rx_packets = tstats->rx_packets;
                        tmp.rx_bytes = tstats->rx_bytes;
                        tmp.tx_packets = tstats->tx_packets;
                        tmp.tx_bytes =  tstats->tx_bytes;
-               } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
 
                sum.rx_packets += tmp.rx_packets;
                sum.rx_bytes   += tmp.rx_bytes;
@@ -1502,19 +1502,12 @@ static inline int
 ip6_tnl_dev_init_gen(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       int i;
 
        t->dev = dev;
        t->net = dev_net(dev);
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *ip6_tnl_stats;
-               ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&ip6_tnl_stats->syncp);
-       }
        return 0;
 }
 
index 2d19272b8ceea6ade3b935904a7e7903d20a2a2a..b7c0f827140b402685cc29049cb56646471c2cf2 100644 (file)
@@ -278,7 +278,6 @@ static void vti6_dev_uninit(struct net_device *dev)
                RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
        else
                vti6_tnl_unlink(ip6n, t);
-       ip6_tnl_dst_reset(t);
        dev_put(dev);
 }
 
@@ -288,11 +287,8 @@ static int vti6_rcv(struct sk_buff *skb)
        const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 
        rcu_read_lock();
-
        if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
                                 &ipv6h->daddr)) != NULL) {
-               struct pcpu_sw_netstats *tstats;
-
                if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
                        rcu_read_unlock();
                        goto discard;
@@ -309,27 +305,58 @@ static int vti6_rcv(struct sk_buff *skb)
                        goto discard;
                }
 
-               tstats = this_cpu_ptr(t->dev->tstats);
-               u64_stats_update_begin(&tstats->syncp);
-               tstats->rx_packets++;
-               tstats->rx_bytes += skb->len;
-               u64_stats_update_end(&tstats->syncp);
-
-               skb->mark = 0;
-               secpath_reset(skb);
-               skb->dev = t->dev;
+               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
+               skb->mark = be32_to_cpu(t->parms.i_key);
 
                rcu_read_unlock();
-               return 0;
+
+               return xfrm6_rcv(skb);
        }
        rcu_read_unlock();
-       return 1;
-
+       return -EINVAL;
 discard:
        kfree_skb(skb);
        return 0;
 }
 
+static int vti6_rcv_cb(struct sk_buff *skb, int err)
+{
+       unsigned short family;
+       struct net_device *dev;
+       struct pcpu_sw_netstats *tstats;
+       struct xfrm_state *x;
+       struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
+
+       if (!t)
+               return 1;
+
+       dev = t->dev;
+
+       if (err) {
+               dev->stats.rx_errors++;
+               dev->stats.rx_dropped++;
+
+               return 0;
+       }
+
+       x = xfrm_input_state(skb);
+       family = x->inner_mode->afinfo->family;
+
+       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+               return -EPERM;
+
+       skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
+       skb->dev = dev;
+
+       tstats = this_cpu_ptr(dev->tstats);
+       u64_stats_update_begin(&tstats->syncp);
+       tstats->rx_packets++;
+       tstats->rx_bytes += skb->len;
+       u64_stats_update_end(&tstats->syncp);
+
+       return 0;
+}
+
 /**
  * vti6_addr_conflict - compare packet addresses to tunnel's own
  *   @t: the outgoing tunnel device
@@ -349,44 +376,56 @@ vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
        return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
 }
 
+static bool vti6_state_check(const struct xfrm_state *x,
+                            const struct in6_addr *dst,
+                            const struct in6_addr *src)
+{
+       xfrm_address_t *daddr = (xfrm_address_t *)dst;
+       xfrm_address_t *saddr = (xfrm_address_t *)src;
+
+       /* if there is no transform then this tunnel is not functional.
+        * Or if the xfrm is not mode tunnel.
+        */
+       if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
+           x->props.family != AF_INET6)
+               return false;
+
+       if (ipv6_addr_any(dst))
+               return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET6);
+
+       if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET6))
+               return false;
+
+       return true;
+}
+
 /**
  * vti6_xmit - send a packet
  *   @skb: the outgoing socket buffer
  *   @dev: the outgoing tunnel device
+ *   @fl: the flow informations for the xfrm_lookup
  **/
-static int vti6_xmit(struct sk_buff *skb, struct net_device *dev)
+static int
+vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 {
-       struct net *net = dev_net(dev);
        struct ip6_tnl *t = netdev_priv(dev);
        struct net_device_stats *stats = &t->dev->stats;
-       struct dst_entry *dst = NULL, *ndst = NULL;
-       struct flowi6 fl6;
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct dst_entry *dst = skb_dst(skb);
        struct net_device *tdev;
        int err = -1;
 
-       if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
-           !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
-               return err;
-
-       dst = ip6_tnl_dst_check(t);
-       if (!dst) {
-               memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-
-               ndst = ip6_route_output(net, NULL, &fl6);
+       if (!dst)
+               goto tx_err_link_failure;
 
-               if (ndst->error)
-                       goto tx_err_link_failure;
-               ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(&fl6), NULL, 0);
-               if (IS_ERR(ndst)) {
-                       err = PTR_ERR(ndst);
-                       ndst = NULL;
-                       goto tx_err_link_failure;
-               }
-               dst = ndst;
+       dst_hold(dst);
+       dst = xfrm_lookup(t->net, dst, fl, NULL, 0);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               dst = NULL;
+               goto tx_err_link_failure;
        }
 
-       if (!dst->xfrm || dst->xfrm->props.mode != XFRM_MODE_TUNNEL)
+       if (!vti6_state_check(dst->xfrm, &t->parms.raddr, &t->parms.laddr))
                goto tx_err_link_failure;
 
        tdev = dst->dev;
@@ -398,14 +437,21 @@ static int vti6_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_err_dst_release;
        }
 
+       skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+       skb_dst_set(skb, dst);
+       skb->dev = skb_dst(skb)->dev;
 
-       skb_dst_drop(skb);
-       skb_dst_set_noref(skb, dst);
+       err = dst_output(skb);
+       if (net_xmit_eval(err) == 0) {
+               struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
 
-       ip6tunnel_xmit(skb, dev);
-       if (ndst) {
-               dev->mtu = dst_mtu(ndst);
-               ip6_tnl_dst_store(t, ndst);
+               u64_stats_update_begin(&tstats->syncp);
+               tstats->tx_bytes += skb->len;
+               tstats->tx_packets++;
+               u64_stats_update_end(&tstats->syncp);
+       } else {
+               stats->tx_errors++;
+               stats->tx_aborted_errors++;
        }
 
        return 0;
@@ -413,7 +459,7 @@ tx_err_link_failure:
        stats->tx_carrier_errors++;
        dst_link_failure(skb);
 tx_err_dst_release:
-       dst_release(ndst);
+       dst_release(dst);
        return err;
 }
 
@@ -422,16 +468,33 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
        struct net_device_stats *stats = &t->dev->stats;
+       struct ipv6hdr *ipv6h;
+       struct flowi fl;
        int ret;
 
+       memset(&fl, 0, sizeof(fl));
+       skb->mark = be32_to_cpu(t->parms.o_key);
+
        switch (skb->protocol) {
        case htons(ETH_P_IPV6):
-               ret = vti6_xmit(skb, dev);
+               ipv6h = ipv6_hdr(skb);
+
+               if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+                   !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
+                       goto tx_err;
+
+               xfrm_decode_session(skb, &fl, AF_INET6);
+               memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+               break;
+       case htons(ETH_P_IP):
+               xfrm_decode_session(skb, &fl, AF_INET);
+               memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
                break;
        default:
                goto tx_err;
        }
 
+       ret = vti6_xmit(skb, dev, &fl);
        if (ret < 0)
                goto tx_err;
 
@@ -444,24 +507,66 @@ tx_err:
        return NETDEV_TX_OK;
 }
 
+static int vti6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                   u8 type, u8 code, int offset, __be32 info)
+{
+       __be32 spi;
+       struct xfrm_state *x;
+       struct ip6_tnl *t;
+       struct ip_esp_hdr *esph;
+       struct ip_auth_hdr *ah;
+       struct ip_comp_hdr *ipch;
+       struct net *net = dev_net(skb->dev);
+       const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
+       int protocol = iph->nexthdr;
+
+       t = vti6_tnl_lookup(dev_net(skb->dev), &iph->daddr, &iph->saddr);
+       if (!t)
+               return -1;
+
+       switch (protocol) {
+       case IPPROTO_ESP:
+               esph = (struct ip_esp_hdr *)(skb->data + offset);
+               spi = esph->spi;
+               break;
+       case IPPROTO_AH:
+               ah = (struct ip_auth_hdr *)(skb->data + offset);
+               spi = ah->spi;
+               break;
+       case IPPROTO_COMP:
+               ipch = (struct ip_comp_hdr *)(skb->data + offset);
+               spi = htonl(ntohs(ipch->cpi));
+               break;
+       default:
+               return 0;
+       }
+
+       if (type != ICMPV6_PKT_TOOBIG &&
+           type != NDISC_REDIRECT)
+               return 0;
+
+       x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+                             spi, protocol, AF_INET6);
+       if (!x)
+               return 0;
+
+       if (type == NDISC_REDIRECT)
+               ip6_redirect(skb, net, skb->dev->ifindex, 0);
+       else
+               ip6_update_pmtu(skb, net, info, 0, 0);
+       xfrm_state_put(x);
+
+       return 0;
+}
+
 static void vti6_link_config(struct ip6_tnl *t)
 {
-       struct dst_entry *dst;
        struct net_device *dev = t->dev;
        struct __ip6_tnl_parm *p = &t->parms;
-       struct flowi6 *fl6 = &t->fl.u.ip6;
 
        memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
        memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
 
-       /* Set up flowi template */
-       fl6->saddr = p->laddr;
-       fl6->daddr = p->raddr;
-       fl6->flowi6_oif = p->link;
-       fl6->flowi6_mark = be32_to_cpu(p->i_key);
-       fl6->flowi6_proto = p->proto;
-       fl6->flowlabel = 0;
-
        p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV |
                      IP6_TNL_F_CAP_PER_PACKET);
        p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
@@ -472,28 +577,6 @@ static void vti6_link_config(struct ip6_tnl *t)
                dev->flags &= ~IFF_POINTOPOINT;
 
        dev->iflink = p->link;
-
-       if (p->flags & IP6_TNL_F_CAP_XMIT) {
-
-               dst = ip6_route_output(dev_net(dev), NULL, fl6);
-               if (dst->error)
-                       return;
-
-               dst = xfrm_lookup(dev_net(dev), dst, flowi6_to_flowi(fl6),
-                                 NULL, 0);
-               if (IS_ERR(dst))
-                       return;
-
-               if (dst->dev) {
-                       dev->hard_header_len = dst->dev->hard_header_len;
-
-                       dev->mtu = dst_mtu(dst);
-
-                       if (dev->mtu < IPV6_MIN_MTU)
-                               dev->mtu = IPV6_MIN_MTU;
-               }
-               dst_release(dst);
-       }
 }
 
 /**
@@ -720,7 +803,6 @@ static void vti6_dev_setup(struct net_device *dev)
        t = netdev_priv(dev);
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
-       dev->features |= NETIF_F_NETNS_LOCAL;
        dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
 }
 
@@ -731,18 +813,12 @@ static void vti6_dev_setup(struct net_device *dev)
 static inline int vti6_dev_init_gen(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       int i;
 
        t->dev = dev;
        t->net = dev_net(dev);
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *stats;
-               stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&stats->syncp);
-       }
        return 0;
 }
 
@@ -914,11 +990,6 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
        .fill_info      = vti6_fill_info,
 };
 
-static struct xfrm_tunnel_notifier vti6_handler __read_mostly = {
-       .handler        = vti6_rcv,
-       .priority       =       1,
-};
-
 static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
 {
        int h;
@@ -990,6 +1061,27 @@ static struct pernet_operations vti6_net_ops = {
        .size = sizeof(struct vti6_net),
 };
 
+static struct xfrm6_protocol vti_esp6_protocol __read_mostly = {
+       .handler        =       vti6_rcv,
+       .cb_handler     =       vti6_rcv_cb,
+       .err_handler    =       vti6_err,
+       .priority       =       100,
+};
+
+static struct xfrm6_protocol vti_ah6_protocol __read_mostly = {
+       .handler        =       vti6_rcv,
+       .cb_handler     =       vti6_rcv_cb,
+       .err_handler    =       vti6_err,
+       .priority       =       100,
+};
+
+static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = {
+       .handler        =       vti6_rcv,
+       .cb_handler     =       vti6_rcv_cb,
+       .err_handler    =       vti6_err,
+       .priority       =       100,
+};
+
 /**
  * vti6_tunnel_init - register protocol and reserve needed resources
  *
@@ -1003,11 +1095,33 @@ static int __init vti6_tunnel_init(void)
        if (err < 0)
                goto out_pernet;
 
-       err = xfrm6_mode_tunnel_input_register(&vti6_handler);
+       err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
        if (err < 0) {
-               pr_err("%s: can't register vti6\n", __func__);
+               unregister_pernet_device(&vti6_net_ops);
+               pr_err("%s: can't register vti6 protocol\n", __func__);
+
                goto out;
        }
+
+       err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
+       if (err < 0) {
+               xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
+               unregister_pernet_device(&vti6_net_ops);
+               pr_err("%s: can't register vti6 protocol\n", __func__);
+
+               goto out;
+       }
+
+       err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP);
+       if (err < 0) {
+               xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
+               xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
+               unregister_pernet_device(&vti6_net_ops);
+               pr_err("%s: can't register vti6 protocol\n", __func__);
+
+               goto out;
+       }
+
        err = rtnl_link_register(&vti6_link_ops);
        if (err < 0)
                goto rtnl_link_failed;
@@ -1015,7 +1129,9 @@ static int __init vti6_tunnel_init(void)
        return 0;
 
 rtnl_link_failed:
-       xfrm6_mode_tunnel_input_deregister(&vti6_handler);
+       xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP);
+       xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
+       xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
 out:
        unregister_pernet_device(&vti6_net_ops);
 out_pernet:
@@ -1028,8 +1144,12 @@ out_pernet:
 static void __exit vti6_tunnel_cleanup(void)
 {
        rtnl_link_unregister(&vti6_link_ops);
-       if (xfrm6_mode_tunnel_input_deregister(&vti6_handler))
-               pr_info("%s: can't deregister vti6\n", __func__);
+       if (xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP))
+               pr_info("%s: can't deregister protocol\n", __func__);
+       if (xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH))
+               pr_info("%s: can't deregister protocol\n", __func__);
+       if (xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP))
+               pr_info("%s: can't deregister protocol\n", __func__);
 
        unregister_pernet_device(&vti6_net_ops);
 }
index da9becb42e8127283f59c70731dc2890701f769e..d1c793cffcb5f44aba0f922f2ebdd3105d51b49f 100644 (file)
@@ -53,7 +53,7 @@
 #include <linux/icmpv6.h>
 #include <linux/mutex.h>
 
-static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                                u8 type, u8 code, int offset, __be32 info)
 {
        struct net *net = dev_net(skb->dev);
@@ -65,19 +65,21 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
-               return;
+               return 0;
 
        spi = htonl(ntohs(ipcomph->cpi));
        x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
                              spi, IPPROTO_COMP, AF_INET6);
        if (!x)
-               return;
+               return 0;
 
        if (type == NDISC_REDIRECT)
                ip6_redirect(skb, net, skb->dev->ifindex, 0);
        else
                ip6_update_pmtu(skb, net, info, 0, 0);
        xfrm_state_put(x);
+
+       return 0;
 }
 
 static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
@@ -174,6 +176,11 @@ out:
        return err;
 }
 
+static int ipcomp6_rcv_cb(struct sk_buff *skb, int err)
+{
+       return 0;
+}
+
 static const struct xfrm_type ipcomp6_type =
 {
        .description    = "IPCOMP6",
@@ -186,11 +193,12 @@ static const struct xfrm_type ipcomp6_type =
        .hdr_offset     = xfrm6_find_1stfragopt,
 };
 
-static const struct inet6_protocol ipcomp6_protocol =
+static struct xfrm6_protocol ipcomp6_protocol =
 {
        .handler        = xfrm6_rcv,
+       .cb_handler     = ipcomp6_rcv_cb,
        .err_handler    = ipcomp6_err,
-       .flags          = INET6_PROTO_NOPOLICY,
+       .priority       = 0,
 };
 
 static int __init ipcomp6_init(void)
@@ -199,7 +207,7 @@ static int __init ipcomp6_init(void)
                pr_info("%s: can't add xfrm type\n", __func__);
                return -EAGAIN;
        }
-       if (inet6_add_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0) {
+       if (xfrm6_protocol_register(&ipcomp6_protocol, IPPROTO_COMP) < 0) {
                pr_info("%s: can't add protocol\n", __func__);
                xfrm_unregister_type(&ipcomp6_type, AF_INET6);
                return -EAGAIN;
@@ -209,7 +217,7 @@ static int __init ipcomp6_init(void)
 
 static void __exit ipcomp6_fini(void)
 {
-       if (inet6_del_protocol(&ipcomp6_protocol, IPPROTO_COMP) < 0)
+       if (xfrm6_protocol_deregister(&ipcomp6_protocol, IPPROTO_COMP) < 0)
                pr_info("%s: can't remove protocol\n", __func__);
        if (xfrm_unregister_type(&ipcomp6_type, AF_INET6) < 0)
                pr_info("%s: can't remove xfrm type\n", __func__);
index 0a00f449de5e0484686f008e792b27efd67d2356..edb58aff4ae70ac864f3f2b559815064f4db87f6 100644 (file)
@@ -722,7 +722,7 @@ done:
        case IPV6_MTU_DISCOVER:
                if (optlen < sizeof(int))
                        goto e_inval;
-               if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_INTERFACE)
+               if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT)
                        goto e_inval;
                np->pmtudisc = val;
                retv = 0;
index e1e47350784bad893c905eecca300388f9b80faa..08b367c6b9cfe2cb268cf7ec603ecc8f5c588a86 100644 (file)
@@ -1620,11 +1620,12 @@ static void mld_sendpack(struct sk_buff *skb)
                      dst_output);
 out:
        if (!err) {
-               ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT);
-               ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
-               IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
-       } else
-               IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
+               ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
+               ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+               IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+       } else {
+               IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+       }
 
        rcu_read_unlock();
        return;
index 827f795209cf9d607c5a63c188a369bb6c1ad684..6313abd53c9d059717113ab1d4763791a6a72312 100644 (file)
@@ -6,24 +6,24 @@
 #include <net/ipv6.h>
 #include <net/ip6_fib.h>
 #include <net/addrconf.h>
+#include <net/secure_seq.h>
 
 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
 {
        static atomic_t ipv6_fragmentation_id;
+       struct in6_addr addr;
        int old, new;
 
 #if IS_ENABLED(CONFIG_IPV6)
-       if (rt && !(rt->dst.flags & DST_NOPEER)) {
-               struct inet_peer *peer;
-               struct net *net;
-
-               net = dev_net(rt->dst.dev);
-               peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
-               if (peer) {
-                       fhdr->identification = htonl(inet_getid(peer, 0));
-                       inet_putpeer(peer);
-                       return;
-               }
+       struct inet_peer *peer;
+       struct net *net;
+
+       net = dev_net(rt->dst.dev);
+       peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+       if (peer) {
+               fhdr->identification = htonl(inet_getid(peer, 0));
+               inet_putpeer(peer);
+               return;
        }
 #endif
        do {
@@ -32,7 +32,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
                if (!new)
                        new = 1;
        } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
-       fhdr->identification = htonl(new);
+
+       addr = rt->rt6i_dst.addr;
+       addr.s6_addr32[0] ^= (__force __be32)new;
+       fhdr->identification = htonl(secure_ipv6_id(addr.s6_addr32));
 }
 EXPORT_SYMBOL(ipv6_select_ident);
 
index 587bbdcb22b4c04c0186932d463bce29ca32b38e..bda74291c3e0d09c94ff5961cf393cb7695ee518 100644 (file)
@@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                              MSG_DONTWAIT, np->dontfrag);
 
        if (err) {
-               ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev,
-                                  ICMP6_MIB_OUTERRORS);
+               ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
+                               ICMP6_MIB_OUTERRORS);
                ip6_flush_pending_frames(sk);
        } else {
                err = icmpv6_push_pending_frames(sk, &fl6,
index fba54a407bb2b7c2aae62ac2d03df806bc1a794a..5015c50a5ba7db02b9a8cc842bd0e28b7f3e8782 100644 (file)
@@ -149,7 +149,8 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
                unsigned long prev, new;
 
                p = peer->metrics;
-               if (inet_metrics_new(peer))
+               if (inet_metrics_new(peer) ||
+                   (old & DST_METRICS_FORCE_OVERWRITE))
                        memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
 
                new = (unsigned long) p;
@@ -373,12 +374,6 @@ static bool rt6_check_expired(const struct rt6_info *rt)
        return false;
 }
 
-static bool rt6_need_strict(const struct in6_addr *daddr)
-{
-       return ipv6_addr_type(daddr) &
-               (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
-}
-
 /* Multipath route selection:
  *   Hash based function using packet header and flowlabel.
  * Adapted from fib_info_hashfn()
@@ -857,14 +852,15 @@ EXPORT_SYMBOL(rt6_lookup);
    be destroyed.
  */
 
-static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
+static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
+                       struct nlattr *mx, int mx_len)
 {
        int err;
        struct fib6_table *table;
 
        table = rt->rt6i_table;
        write_lock_bh(&table->tb6_lock);
-       err = fib6_add(&table->tb6_root, rt, info);
+       err = fib6_add(&table->tb6_root, rt, info, mx, mx_len);
        write_unlock_bh(&table->tb6_lock);
 
        return err;
@@ -875,7 +871,7 @@ int ip6_ins_rt(struct rt6_info *rt)
        struct nl_info info = {
                .nl_net = dev_net(rt->dst.dev),
        };
-       return __ip6_ins_rt(rt, &info);
+       return __ip6_ins_rt(rt, &info, NULL, 0);
 }
 
 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
@@ -1543,17 +1539,11 @@ int ip6_route_add(struct fib6_config *cfg)
 
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
        rt->rt6i_dst.plen = cfg->fc_dst_len;
-       if (rt->rt6i_dst.plen == 128)
-              rt->dst.flags |= DST_HOST;
-
-       if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
-               u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
-               if (!metrics) {
-                       err = -ENOMEM;
-                       goto out;
-               }
-               dst_init_metrics(&rt->dst, metrics, 0);
+       if (rt->rt6i_dst.plen == 128) {
+               rt->dst.flags |= DST_HOST;
+               dst_metrics_set_force_overwrite(&rt->dst);
        }
+
 #ifdef CONFIG_IPV6_SUBTREES
        ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
        rt->rt6i_src.plen = cfg->fc_src_len;
@@ -1672,31 +1662,13 @@ int ip6_route_add(struct fib6_config *cfg)
        rt->rt6i_flags = cfg->fc_flags;
 
 install_route:
-       if (cfg->fc_mx) {
-               struct nlattr *nla;
-               int remaining;
-
-               nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
-                       int type = nla_type(nla);
-
-                       if (type) {
-                               if (type > RTAX_MAX) {
-                                       err = -EINVAL;
-                                       goto out;
-                               }
-
-                               dst_metric_set(&rt->dst, type, nla_get_u32(nla));
-                       }
-               }
-       }
-
        rt->dst.dev = dev;
        rt->rt6i_idev = idev;
        rt->rt6i_table = table;
 
        cfg->fc_nlinfo.nl_net = dev_net(dev);
 
-       return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
+       return __ip6_ins_rt(rt, &cfg->fc_nlinfo, cfg->fc_mx, cfg->fc_mx_len);
 
 out:
        if (dev)
index b4d74c86586cd1a0256afb59870e32ebbebd56e8..1693c8d885f081e153e115bec78cb2f29a79a6ff 100644 (file)
@@ -1363,7 +1363,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
 static int ipip6_tunnel_init(struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
-       int i;
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
@@ -1372,16 +1371,10 @@ static int ipip6_tunnel_init(struct net_device *dev)
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
 
        ipip6_tunnel_bind_dev(dev);
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *ipip6_tunnel_stats;
-               ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&ipip6_tunnel_stats->syncp);
-       }
-
        tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
        if (!tunnel->dst_cache) {
                free_percpu(dev->tstats);
@@ -1397,7 +1390,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        struct iphdr *iph = &tunnel->parms.iph;
        struct net *net = dev_net(dev);
        struct sit_net *sitn = net_generic(net, sit_net_id);
-       int i;
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
@@ -1408,16 +1400,10 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        iph->ihl                = 5;
        iph->ttl                = 64;
 
-       dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!dev->tstats)
                return -ENOMEM;
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *ipip6_fb_stats;
-               ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
-               u64_stats_init(&ipip6_fb_stats->syncp);
-       }
-
        tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
        if (!tunnel->dst_cache) {
                free_percpu(dev->tstats);
index 889079b2ea852f237dea495cc66a63c968037b6d..5ca56cee2dae06830683927658397f6b7a23ecd4 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/ipsec.h>
 #include <linux/times.h>
 #include <linux/slab.h>
-
+#include <linux/uaccess.h>
 #include <linux/ipv6.h>
 #include <linux/icmpv6.h>
 #include <linux/random.h>
@@ -65,8 +65,6 @@
 #include <net/tcp_memcontrol.h>
 #include <net/busy_poll.h>
 
-#include <asm/uaccess.h>
-
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
@@ -501,8 +499,10 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
        int res;
 
        res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
-       if (!res)
+       if (!res) {
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+       }
        return res;
 }
 
@@ -530,8 +530,8 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
        return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
 }
 
-static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
-                                 int optlen)
+static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
+                                int optlen)
 {
        struct tcp_md5sig cmd;
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
@@ -715,7 +715,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
        .send_ack       =       tcp_v6_reqsk_send_ack,
        .destructor     =       tcp_v6_reqsk_destructor,
        .send_reset     =       tcp_v6_send_reset,
-       .syn_ack_timeout =      tcp_syn_ack_timeout,
+       .syn_ack_timeout =      tcp_syn_ack_timeout,
 };
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -726,7 +726,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 #endif
 
 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
-                                u32 tsval, u32 tsecr,
+                                u32 tsval, u32 tsecr, int oif,
                                 struct tcp_md5sig_key *key, int rst, u8 tclass,
                                 u32 label)
 {
@@ -798,8 +798,10 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
        __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 
        fl6.flowi6_proto = IPPROTO_TCP;
-       if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+       if (rt6_need_strict(&fl6.daddr) || !oif)
                fl6.flowi6_oif = inet6_iif(skb);
+       else
+               fl6.flowi6_oif = oif;
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
@@ -833,6 +835,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
        int genhash;
        struct sock *sk1 = NULL;
 #endif
+       int oif;
 
        if (th->rst)
                return;
@@ -876,7 +879,8 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
                ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
                          (th->doff << 2);
 
-       tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, key, 1, 0, 0);
+       oif = sk ? sk->sk_bound_dev_if : 0;
+       tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 
 #ifdef CONFIG_TCP_MD5SIG
 release_sk1:
@@ -888,11 +892,11 @@ release_sk1:
 }
 
 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
-                           u32 win, u32 tsval, u32 tsecr,
+                           u32 win, u32 tsval, u32 tsecr, int oif,
                            struct tcp_md5sig_key *key, u8 tclass,
                            u32 label)
 {
-       tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, key, 0, tclass,
+       tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
                             label);
 }
 
@@ -904,7 +908,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
        tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
                        tcp_time_stamp + tcptw->tw_ts_offset,
-                       tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
+                       tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
                        tw->tw_tclass, (tw->tw_flowlabel << 12));
 
        inet_twsk_put(tw);
@@ -914,7 +918,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
                                  struct request_sock *req)
 {
        tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1,
-                       req->rcv_wnd, tcp_time_stamp, req->ts_recent,
+                       req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
                        0, 0);
 }
@@ -1259,7 +1263,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Copy over the MD5 key from the original socket */
-       if ((key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr)) != NULL) {
+       key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
+       if (key != NULL) {
                /* We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
                 * memory, then we end up not copying the key
@@ -1303,9 +1308,8 @@ static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
                                              &ipv6_hdr(skb)->saddr,
                                              &ipv6_hdr(skb)->daddr, 0));
 
-       if (skb->len <= 76) {
+       if (skb->len <= 76)
                return __skb_checksum_complete(skb);
-       }
        return 0;
 }
 
@@ -1335,7 +1339,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                return tcp_v4_do_rcv(sk, skb);
 
 #ifdef CONFIG_TCP_MD5SIG
-       if (tcp_v6_inbound_md5_hash (sk, skb))
+       if (tcp_v6_inbound_md5_hash(sk, skb))
                goto discard;
 #endif
 
@@ -1602,7 +1606,8 @@ do_time_wait:
                break;
        case TCP_TW_RST:
                goto no_tcp_socket;
-       case TCP_TW_SUCCESS:;
+       case TCP_TW_SUCCESS:
+               ;
        }
        goto discard_it;
 }
@@ -1647,7 +1652,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb)
 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
        .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
        .twsk_unique    = tcp_twsk_unique,
-       .twsk_destructor= tcp_twsk_destructor,
+       .twsk_destructor = tcp_twsk_destructor,
 };
 
 static const struct inet_connection_sock_af_ops ipv6_specific = {
@@ -1681,7 +1686,6 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
 /*
  *     TCP over IPv4 via INET6 API
  */
-
 static const struct inet_connection_sock_af_ops ipv6_mapped = {
        .queue_xmit        = ip_queue_xmit,
        .send_check        = tcp_v4_send_check,
index cb04f7a16b5e102f2944be051d61511d5645d3f7..901ef6f8addc0cf730909d2656513372cd6cf80f 100644 (file)
 #include <net/ipv6.h>
 #include <net/xfrm.h>
 
-/* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
-static DEFINE_MUTEX(xfrm6_mode_tunnel_input_mutex);
-
-int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
-{
-       struct xfrm_tunnel_notifier __rcu **pprev;
-       struct xfrm_tunnel_notifier *t;
-       int ret = -EEXIST;
-       int priority = handler->priority;
-
-       mutex_lock(&xfrm6_mode_tunnel_input_mutex);
-
-       for (pprev = &rcv_notify_handlers;
-            (t = rcu_dereference_protected(*pprev,
-            lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
-            pprev = &t->next) {
-               if (t->priority > priority)
-                       break;
-               if (t->priority == priority)
-                       goto err;
-
-       }
-
-       handler->next = *pprev;
-       rcu_assign_pointer(*pprev, handler);
-
-       ret = 0;
-
-err:
-       mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_register);
-
-int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
-{
-       struct xfrm_tunnel_notifier __rcu **pprev;
-       struct xfrm_tunnel_notifier *t;
-       int ret = -ENOENT;
-
-       mutex_lock(&xfrm6_mode_tunnel_input_mutex);
-       for (pprev = &rcv_notify_handlers;
-            (t = rcu_dereference_protected(*pprev,
-            lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
-            pprev = &t->next) {
-               if (t == handler) {
-                       *pprev = handler->next;
-                       ret = 0;
-                       break;
-               }
-       }
-       mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
-       synchronize_net();
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_deregister);
-
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
        const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
@@ -130,7 +71,6 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct xfrm_tunnel_notifier *handler;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
@@ -138,9 +78,6 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
 
-       for_each_input_rcu(rcv_notify_handlers, handler)
-               handler->handler(skb);
-
        err = skb_unclone(skb, GFP_ATOMIC);
        if (err)
                goto out;
index 5f8e128c512d664080251bf6958c89021a1110c1..2a0bbda2c76a99dfa687313d230595c251103b45 100644 (file)
@@ -389,11 +389,17 @@ int __init xfrm6_init(void)
        if (ret)
                goto out_policy;
 
+       ret = xfrm6_protocol_init();
+       if (ret)
+               goto out_state;
+
 #ifdef CONFIG_SYSCTL
        register_pernet_subsys(&xfrm6_net_ops);
 #endif
 out:
        return ret;
+out_state:
+       xfrm6_state_fini();
 out_policy:
        xfrm6_policy_fini();
        goto out;
@@ -404,6 +410,7 @@ void xfrm6_fini(void)
 #ifdef CONFIG_SYSCTL
        unregister_pernet_subsys(&xfrm6_net_ops);
 #endif
+       xfrm6_protocol_fini();
        xfrm6_policy_fini();
        xfrm6_state_fini();
        dst_entries_destroy(&xfrm6_dst_ops);
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
new file mode 100644 (file)
index 0000000..6ab989c
--- /dev/null
@@ -0,0 +1,270 @@
+/* xfrm6_protocol.c - Generic xfrm protocol multiplexer for ipv6.
+ *
+ * Copyright (C) 2013 secunet Security Networks AG
+ *
+ * Author:
+ * Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * Based on:
+ * net/ipv4/xfrm4_protocol.c
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/icmpv6.h>
+#include <net/ipv6.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+static struct xfrm6_protocol __rcu *esp6_handlers __read_mostly;
+static struct xfrm6_protocol __rcu *ah6_handlers __read_mostly;
+static struct xfrm6_protocol __rcu *ipcomp6_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm6_protocol_mutex);
+
+static inline struct xfrm6_protocol __rcu **proto_handlers(u8 protocol)
+{
+       switch (protocol) {
+       case IPPROTO_ESP:
+               return &esp6_handlers;
+       case IPPROTO_AH:
+               return &ah6_handlers;
+       case IPPROTO_COMP:
+               return &ipcomp6_handlers;
+       }
+
+       return NULL;
+}
+
+#define for_each_protocol_rcu(head, handler)           \
+       for (handler = rcu_dereference(head);           \
+            handler != NULL;                           \
+            handler = rcu_dereference(handler->next))  \
+
+int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
+{
+       int ret;
+       struct xfrm6_protocol *handler;
+
+       for_each_protocol_rcu(*proto_handlers(protocol), handler)
+               if ((ret = handler->cb_handler(skb, err)) <= 0)
+                       return ret;
+
+       return 0;
+}
+EXPORT_SYMBOL(xfrm6_rcv_cb);
+
+static int xfrm6_esp_rcv(struct sk_buff *skb)
+{
+       int ret;
+       struct xfrm6_protocol *handler;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+
+       for_each_protocol_rcu(esp6_handlers, handler)
+               if ((ret = handler->handler(skb)) != -EINVAL)
+                       return ret;
+
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static void xfrm6_esp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                         u8 type, u8 code, int offset, __be32 info)
+{
+       struct xfrm6_protocol *handler;
+
+       for_each_protocol_rcu(esp6_handlers, handler)
+               if (!handler->err_handler(skb, opt, type, code, offset, info))
+                       break;
+}
+
+static int xfrm6_ah_rcv(struct sk_buff *skb)
+{
+       int ret;
+       struct xfrm6_protocol *handler;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+
+       for_each_protocol_rcu(ah6_handlers, handler)
+               if ((ret = handler->handler(skb)) != -EINVAL)
+                       return ret;
+
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static void xfrm6_ah_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                        u8 type, u8 code, int offset, __be32 info)
+{
+       struct xfrm6_protocol *handler;
+
+       for_each_protocol_rcu(ah6_handlers, handler)
+               if (!handler->err_handler(skb, opt, type, code, offset, info))
+                       break;
+}
+
+static int xfrm6_ipcomp_rcv(struct sk_buff *skb)
+{
+       int ret;
+       struct xfrm6_protocol *handler;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+
+       for_each_protocol_rcu(ipcomp6_handlers, handler)
+               if ((ret = handler->handler(skb)) != -EINVAL)
+                       return ret;
+
+       icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static void xfrm6_ipcomp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+                            u8 type, u8 code, int offset, __be32 info)
+{
+       struct xfrm6_protocol *handler;
+
+       for_each_protocol_rcu(ipcomp6_handlers, handler)
+               if (!handler->err_handler(skb, opt, type, code, offset, info))
+                       break;
+}
+
+static const struct inet6_protocol esp6_protocol = {
+       .handler        =       xfrm6_esp_rcv,
+       .err_handler    =       xfrm6_esp_err,
+       .flags          =       INET6_PROTO_NOPOLICY,
+};
+
+static const struct inet6_protocol ah6_protocol = {
+       .handler        =       xfrm6_ah_rcv,
+       .err_handler    =       xfrm6_ah_err,
+       .flags          =       INET6_PROTO_NOPOLICY,
+};
+
+static const struct inet6_protocol ipcomp6_protocol = {
+       .handler        =       xfrm6_ipcomp_rcv,
+       .err_handler    =       xfrm6_ipcomp_err,
+       .flags          =       INET6_PROTO_NOPOLICY,
+};
+
+static struct xfrm_input_afinfo xfrm6_input_afinfo = {
+       .family         =       AF_INET6,
+       .owner          =       THIS_MODULE,
+       .callback       =       xfrm6_rcv_cb,
+};
+
+static inline const struct inet6_protocol *netproto(unsigned char protocol)
+{
+       switch (protocol) {
+       case IPPROTO_ESP:
+               return &esp6_protocol;
+       case IPPROTO_AH:
+               return &ah6_protocol;
+       case IPPROTO_COMP:
+               return &ipcomp6_protocol;
+       }
+
+       return NULL;
+}
+
+int xfrm6_protocol_register(struct xfrm6_protocol *handler,
+                           unsigned char protocol)
+{
+       struct xfrm6_protocol __rcu **pprev;
+       struct xfrm6_protocol *t;
+       bool add_netproto = false;
+
+       int ret = -EEXIST;
+       int priority = handler->priority;
+
+       mutex_lock(&xfrm6_protocol_mutex);
+
+       if (!rcu_dereference_protected(*proto_handlers(protocol),
+                                      lockdep_is_held(&xfrm6_protocol_mutex)))
+               add_netproto = true;
+
+       for (pprev = proto_handlers(protocol);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&xfrm6_protocol_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority < priority)
+                       break;
+               if (t->priority == priority)
+                       goto err;
+       }
+
+       handler->next = *pprev;
+       rcu_assign_pointer(*pprev, handler);
+
+       ret = 0;
+
+err:
+       mutex_unlock(&xfrm6_protocol_mutex);
+
+       if (add_netproto) {
+               if (inet6_add_protocol(netproto(protocol), protocol)) {
+                       pr_err("%s: can't add protocol\n", __func__);
+                       ret = -EAGAIN;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL(xfrm6_protocol_register);
+
+int xfrm6_protocol_deregister(struct xfrm6_protocol *handler,
+                             unsigned char protocol)
+{
+       struct xfrm6_protocol __rcu **pprev;
+       struct xfrm6_protocol *t;
+       int ret = -ENOENT;
+
+       mutex_lock(&xfrm6_protocol_mutex);
+
+       for (pprev = proto_handlers(protocol);
+            (t = rcu_dereference_protected(*pprev,
+                       lockdep_is_held(&xfrm6_protocol_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
+                       *pprev = handler->next;
+                       ret = 0;
+                       break;
+               }
+       }
+
+       if (!rcu_dereference_protected(*proto_handlers(protocol),
+                                      lockdep_is_held(&xfrm6_protocol_mutex))) {
+               if (inet6_del_protocol(netproto(protocol), protocol) < 0) {
+                       pr_err("%s: can't remove protocol\n", __func__);
+                       ret = -EAGAIN;
+               }
+       }
+
+       mutex_unlock(&xfrm6_protocol_mutex);
+
+       synchronize_net();
+
+       return ret;
+}
+EXPORT_SYMBOL(xfrm6_protocol_deregister);
+
+int __init xfrm6_protocol_init(void)
+{
+       return xfrm_input_register_afinfo(&xfrm6_input_afinfo);
+}
+
+void xfrm6_protocol_fini(void)
+{
+       xfrm_input_unregister_afinfo(&xfrm6_input_afinfo);
+}
index 00b2a6d1c0092a7d0ec668b0b657f520637f31cd..41e4e93cb3aae37df41ff419ef34be9c2255b5cc 100644 (file)
@@ -1368,6 +1368,7 @@ static int ipx_release(struct socket *sock)
                goto out;
 
        lock_sock(sk);
+       sk->sk_shutdown = SHUTDOWN_MASK;
        if (!sock_flag(sk, SOCK_DEAD))
                sk->sk_state_change(sk);
 
@@ -1791,8 +1792,11 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
                                flags & MSG_DONTWAIT, &rc);
-       if (!skb)
+       if (!skb) {
+               if (rc == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN))
+                       rc = 0;
                goto out;
+       }
 
        ipx     = ipx_hdr(skb);
        copied  = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
@@ -1922,6 +1926,26 @@ static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long
 }
 #endif
 
+static int ipx_shutdown(struct socket *sock, int mode)
+{
+       struct sock *sk = sock->sk;
+
+       if (mode < SHUT_RD || mode > SHUT_RDWR)
+               return -EINVAL;
+       /* This maps:
+        * SHUT_RD   (0) -> RCV_SHUTDOWN  (1)
+        * SHUT_WR   (1) -> SEND_SHUTDOWN (2)
+        * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
+        */
+       ++mode;
+
+       lock_sock(sk);
+       sk->sk_shutdown |= mode;
+       release_sock(sk);
+       sk->sk_state_change(sk);
+
+       return 0;
+}
 
 /*
  * Socket family declarations
@@ -1948,7 +1972,7 @@ static const struct proto_ops ipx_dgram_ops = {
        .compat_ioctl   = ipx_compat_ioctl,
 #endif
        .listen         = sock_no_listen,
-       .shutdown       = sock_no_shutdown, /* FIXME: support shutdown */
+       .shutdown       = ipx_shutdown,
        .setsockopt     = ipx_setsockopt,
        .getsockopt     = ipx_getsockopt,
        .sendmsg        = ipx_sendmsg,
index c4b7218058b648856066bb24dacf38d54d16defc..a5e03119107a5563be4a4307fa56885d8e3952ff 100644 (file)
@@ -1382,6 +1382,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                if (sk->sk_type == SOCK_STREAM) {
                        if (copied < rlen) {
                                IUCV_SKB_CB(skb)->offset = offset + copied;
+                               skb_queue_head(&sk->sk_receive_queue, skb);
                                goto done;
                        }
                }
index 79326978517a6842b8538b6a45b95309b8e37d4f..e72589a8400dbf7e35a68468149bc7ce601b48be 100644 (file)
@@ -365,6 +365,7 @@ static const u8 sadb_ext_min_len[] = {
        [SADB_X_EXT_NAT_T_OA]           = (u8) sizeof(struct sadb_address),
        [SADB_X_EXT_SEC_CTX]            = (u8) sizeof(struct sadb_x_sec_ctx),
        [SADB_X_EXT_KMADDRESS]          = (u8) sizeof(struct sadb_x_kmaddress),
+       [SADB_X_EXT_FILTER]             = (u8) sizeof(struct sadb_x_filter),
 };
 
 /* Verify sadb_address_{len,prefixlen} against sa_family.  */
@@ -1799,6 +1800,7 @@ static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
 static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
 {
        u8 proto;
+       struct xfrm_address_filter *filter = NULL;
        struct pfkey_sock *pfk = pfkey_sk(sk);
 
        if (pfk->dump.dump != NULL)
@@ -1808,11 +1810,27 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
        if (proto == 0)
                return -EINVAL;
 
+       if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+               struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+
+               filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+               if (filter == NULL)
+                       return -ENOMEM;
+
+               memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
+                      sizeof(xfrm_address_t));
+               memcpy(&filter->daddr, &xfilter->sadb_x_filter_daddr,
+                      sizeof(xfrm_address_t));
+               filter->family = xfilter->sadb_x_filter_family;
+               filter->splen = xfilter->sadb_x_filter_splen;
+               filter->dplen = xfilter->sadb_x_filter_dplen;
+       }
+
        pfk->dump.msg_version = hdr->sadb_msg_version;
        pfk->dump.msg_portid = hdr->sadb_msg_pid;
        pfk->dump.dump = pfkey_dump_sa;
        pfk->dump.done = pfkey_dump_sa_done;
-       xfrm_state_walk_init(&pfk->dump.u.state, proto);
+       xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
 
        return pfkey_do_dump(pfk);
 }
@@ -3060,6 +3078,24 @@ static u32 get_acqseq(void)
        return res;
 }
 
+static bool pfkey_is_alive(const struct km_event *c)
+{
+       struct netns_pfkey *net_pfkey = net_generic(c->net, pfkey_net_id);
+       struct sock *sk;
+       bool is_alive = false;
+
+       rcu_read_lock();
+       sk_for_each_rcu(sk, &net_pfkey->table) {
+               if (pfkey_sk(sk)->registered) {
+                       is_alive = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return is_alive;
+}
+
 static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
 {
        struct sk_buff *skb;
@@ -3785,6 +3821,7 @@ static struct xfrm_mgr pfkeyv2_mgr =
        .new_mapping    = pfkey_send_new_mapping,
        .notify_policy  = pfkey_send_policy_notify,
        .migrate        = pfkey_send_migrate,
+       .is_alive       = pfkey_is_alive,
 };
 
 static int __net_init pfkey_net_init(struct net *net)
index 85d9d94c0a3c57706540ce9b5efc78309dd69ca0..ab48d4192edd37270c8e666e1bd9be781d249fd1 100644 (file)
@@ -1130,7 +1130,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        /* Queue the packet to IP for output */
        skb->local_df = 1;
 #if IS_ENABLED(CONFIG_IPV6)
-       if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
+       if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
                error = inet6_csk_xmit(skb, NULL);
        else
 #endif
@@ -1150,23 +1150,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        return 0;
 }
 
-/* Automatically called when the skb is freed.
- */
-static void l2tp_sock_wfree(struct sk_buff *skb)
-{
-       sock_put(skb->sk);
-}
-
-/* For data skbs that we transmit, we associate with the tunnel socket
- * but don't do accounting.
- */
-static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
-       sock_hold(sk);
-       skb->sk = sk;
-       skb->destructor = l2tp_sock_wfree;
-}
-
 #if IS_ENABLED(CONFIG_IPV6)
 static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
                                int udp_len)
@@ -1220,7 +1203,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
                return NET_XMIT_DROP;
        }
 
-       skb_orphan(skb);
        /* Setup L2TP header */
        session->build_header(session, __skb_push(skb, hdr_len));
 
@@ -1286,8 +1268,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
                break;
        }
 
-       l2tp_skb_set_owner_w(skb, sk);
-
        l2tp_xmit_core(session, skb, fl, data_len);
 out_unlock:
        bh_unlock_sock(sk);
@@ -1808,8 +1788,6 @@ void l2tp_session_free(struct l2tp_session *session)
        }
 
        kfree(session);
-
-       return;
 }
 EXPORT_SYMBOL_GPL(l2tp_session_free);
 
index 5990919356a5d7c7573d8cbd0f6c899f28eb7f67..d276e2d4a5894c38c6489eb2f814542a181f6d75 100644 (file)
@@ -456,13 +456,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
 
        BUG_ON(session->magic != L2TP_SESSION_MAGIC);
 
-
        if (sock) {
                inet_shutdown(sock, 2);
                /* Don't let the session go away before our socket does */
                l2tp_session_inc_refcount(session);
        }
-       return;
 }
 
 /* Really kill the session socket. (Called from sock_put() if
@@ -476,7 +474,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
                BUG_ON(session->magic != L2TP_SESSION_MAGIC);
                l2tp_session_dec_refcount(session);
        }
-       return;
 }
 
 /* Called when the PPPoX socket (session) is closed.
index 13b7683de5a455fe222c4148dc807471fe3b74bd..ce9633a3cfb0c54abe7aa87746f2b843cf33e65c 100644 (file)
@@ -107,7 +107,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
        mgmt->u.action.u.addba_req.start_seq_num =
                                        cpu_to_le16(start_seq_num << 4);
 
-       ieee80211_tx_skb_tid(sdata, skb, tid);
+       ieee80211_tx_skb(sdata, skb);
 }
 
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
index 453e974287d19b52971116bca5e0e401746f3ffb..aaa59d719592c0b7dc6ef3ddb4df8aaa578bc45c 100644 (file)
@@ -451,11 +451,11 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
                rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
        if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
                rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
-       if (sta->last_rx_rate_flag & RX_FLAG_80MHZ)
+       if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
                rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
-       if (sta->last_rx_rate_flag & RX_FLAG_80P80MHZ)
+       if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80P80MHZ)
                rinfo->flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
-       if (sta->last_rx_rate_flag & RX_FLAG_160MHZ)
+       if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
                rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
 }
 
@@ -970,9 +970,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
        /* TODO: make hostapd tell us what it wants */
        sdata->smps_mode = IEEE80211_SMPS_OFF;
        sdata->needed_rx_chains = sdata->local->rx_chains;
-       sdata->radar_required = params->radar_required;
 
        mutex_lock(&local->mtx);
+       sdata->radar_required = params->radar_required;
        err = ieee80211_vif_use_channel(sdata, &params->chandef,
                                        IEEE80211_CHANCTX_SHARED);
        mutex_unlock(&local->mtx);
@@ -1056,6 +1056,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
        int err;
 
        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       sdata_assert_lock(sdata);
 
        /* don't allow changing the beacon while CSA is in place - offset
         * of channel switch counter may change
@@ -1083,6 +1084,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        struct probe_resp *old_probe_resp;
        struct cfg80211_chan_def chandef;
 
+       sdata_assert_lock(sdata);
+
        old_beacon = sdata_dereference(sdata->u.ap.beacon, sdata);
        if (!old_beacon)
                return -ENOENT;
@@ -1343,6 +1346,15 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
                                                    params->vht_capa, sta);
 
+       if (params->opmode_notif_used) {
+               /* returned value is only needed for rc update, but the
+                * rc isn't initialized here yet, so ignore it
+                */
+               __ieee80211_vht_handle_opmode(sdata, sta,
+                                             params->opmode_notif,
+                                             band, false);
+       }
+
        if (ieee80211_vif_is_mesh(&sdata->vif)) {
 #ifdef CONFIG_MAC80211_MESH
                u32 changed = 0;
@@ -2630,6 +2642,18 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
        if (!roc)
                return -ENOMEM;
 
+       /*
+        * If the duration is zero, then the driver
+        * wouldn't actually do anything. Set it to
+        * 10 for now.
+        *
+        * TODO: cancel the off-channel operation
+        *       when we get the SKB's TX status and
+        *       the wait time was zero before.
+        */
+       if (!duration)
+               duration = 10;
+
        roc->chan = channel;
        roc->duration = duration;
        roc->req_duration = duration;
@@ -2671,18 +2695,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
 
        /* otherwise actually kick it off here (for error handling) */
 
-       /*
-        * If the duration is zero, then the driver
-        * wouldn't actually do anything. Set it to
-        * 10 for now.
-        *
-        * TODO: cancel the off-channel operation
-        *       when we get the SKB's TX status and
-        *       the wait time was zero before.
-        */
-       if (!duration)
-               duration = 10;
-
        ret = drv_remain_on_channel(local, sdata, channel, duration, type);
        if (ret) {
                kfree(roc);
@@ -2902,11 +2914,11 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
 
 static int ieee80211_start_radar_detection(struct wiphy *wiphy,
                                           struct net_device *dev,
-                                          struct cfg80211_chan_def *chandef)
+                                          struct cfg80211_chan_def *chandef,
+                                          u32 cac_time_ms)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
-       unsigned long timeout;
        int err;
 
        mutex_lock(&local->mtx);
@@ -2925,9 +2937,9 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
        if (err)
                goto out_unlock;
 
-       timeout = msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
        ieee80211_queue_delayed_work(&sdata->local->hw,
-                                    &sdata->dfs_cac_timer_work, timeout);
+                                    &sdata->dfs_cac_timer_work,
+                                    msecs_to_jiffies(cac_time_ms));
 
  out_unlock:
        mutex_unlock(&local->mtx);
@@ -2990,136 +3002,135 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
        return new_beacon;
 }
 
-void ieee80211_csa_finalize_work(struct work_struct *work)
+void ieee80211_csa_finish(struct ieee80211_vif *vif)
+{
+       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+       ieee80211_queue_work(&sdata->local->hw,
+                            &sdata->csa_finalize_work);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+
+static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
 {
-       struct ieee80211_sub_if_data *sdata =
-               container_of(work, struct ieee80211_sub_if_data,
-                            csa_finalize_work);
        struct ieee80211_local *local = sdata->local;
        int err, changed = 0;
 
-       sdata_lock(sdata);
-       /* AP might have been stopped while waiting for the lock. */
-       if (!sdata->vif.csa_active)
-               goto unlock;
-
-       if (!ieee80211_sdata_running(sdata))
-               goto unlock;
+       sdata_assert_lock(sdata);
 
-       sdata->radar_required = sdata->csa_radar_required;
        mutex_lock(&local->mtx);
+       sdata->radar_required = sdata->csa_radar_required;
        err = ieee80211_vif_change_channel(sdata, &changed);
        mutex_unlock(&local->mtx);
        if (WARN_ON(err < 0))
-               goto unlock;
+               return;
 
        if (!local->use_chanctx) {
                local->_oper_chandef = sdata->csa_chandef;
                ieee80211_hw_config(local, 0);
        }
 
-       ieee80211_bss_info_change_notify(sdata, changed);
-
        sdata->vif.csa_active = false;
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
                err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
-               if (err < 0)
-                       goto unlock;
-
-               changed |= err;
                kfree(sdata->u.ap.next_beacon);
                sdata->u.ap.next_beacon = NULL;
 
-               ieee80211_bss_info_change_notify(sdata, err);
+               if (err < 0)
+                       return;
+               changed |= err;
                break;
        case NL80211_IFTYPE_ADHOC:
-               ieee80211_ibss_finish_csa(sdata);
+               err = ieee80211_ibss_finish_csa(sdata);
+               if (err < 0)
+                       return;
+               changed |= err;
                break;
 #ifdef CONFIG_MAC80211_MESH
        case NL80211_IFTYPE_MESH_POINT:
                err = ieee80211_mesh_finish_csa(sdata);
                if (err < 0)
-                       goto unlock;
+                       return;
+               changed |= err;
                break;
 #endif
        default:
                WARN_ON(1);
-               goto unlock;
+               return;
        }
 
+       ieee80211_bss_info_change_notify(sdata, changed);
+
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
 
        cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
-
-unlock:
-       sdata_unlock(sdata);
 }
 
-int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
-                            struct cfg80211_csa_settings *params)
+void ieee80211_csa_finalize_work(struct work_struct *work)
 {
-       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       struct ieee80211_chanctx *chanctx;
-       struct ieee80211_if_mesh __maybe_unused *ifmsh;
-       int err, num_chanctx;
-
-       lockdep_assert_held(&sdata->wdev.mtx);
-
-       if (!list_empty(&local->roc_list) || local->scanning)
-               return -EBUSY;
+       struct ieee80211_sub_if_data *sdata =
+               container_of(work, struct ieee80211_sub_if_data,
+                            csa_finalize_work);
 
-       if (sdata->wdev.cac_started)
-               return -EBUSY;
+       sdata_lock(sdata);
+       /* AP might have been stopped while waiting for the lock. */
+       if (!sdata->vif.csa_active)
+               goto unlock;
 
-       if (cfg80211_chandef_identical(&params->chandef,
-                                      &sdata->vif.bss_conf.chandef))
-               return -EINVAL;
+       if (!ieee80211_sdata_running(sdata))
+               goto unlock;
 
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-       if (!chanctx_conf) {
-               rcu_read_unlock();
-               return -EBUSY;
-       }
+       ieee80211_csa_finalize(sdata);
 
-       /* don't handle for multi-VIF cases */
-       chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
-       if (chanctx->refcount > 1) {
-               rcu_read_unlock();
-               return -EBUSY;
-       }
-       num_chanctx = 0;
-       list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
-               num_chanctx++;
-       rcu_read_unlock();
-
-       if (num_chanctx > 1)
-               return -EBUSY;
+unlock:
+       sdata_unlock(sdata);
+}
 
-       /* don't allow another channel switch if one is already active. */
-       if (sdata->vif.csa_active)
-               return -EBUSY;
+static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
+                                   struct cfg80211_csa_settings *params,
+                                   u32 *changed)
+{
+       int err;
 
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
-               sdata->csa_counter_offset_beacon =
-                       params->counter_offset_beacon;
-               sdata->csa_counter_offset_presp = params->counter_offset_presp;
                sdata->u.ap.next_beacon =
                        cfg80211_beacon_dup(&params->beacon_after);
                if (!sdata->u.ap.next_beacon)
                        return -ENOMEM;
 
+               /*
+                * With a count of 0, we don't have to wait for any
+                * TBTT before switching, so complete the CSA
+                * immediately.  In theory, with a count == 1 we
+                * should delay the switch until just before the next
+                * TBTT, but that would complicate things so we switch
+                * immediately too.  If we would delay the switch
+                * until the next TBTT, we would have to set the probe
+                * response here.
+                *
+                * TODO: A channel switch with count <= 1 without
+                * sending a CSA action frame is kind of useless,
+                * because the clients won't know we're changing
+                * channels.  The action frame must be implemented
+                * either here or in the userspace.
+                */
+               if (params->count <= 1)
+                       break;
+
+               sdata->csa_counter_offset_beacon =
+                       params->counter_offset_beacon;
+               sdata->csa_counter_offset_presp = params->counter_offset_presp;
                err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
                if (err < 0) {
                        kfree(sdata->u.ap.next_beacon);
                        return err;
                }
+               *changed |= err;
+
                break;
        case NL80211_IFTYPE_ADHOC:
                if (!sdata->vif.bss_conf.ibss_joined)
@@ -3147,16 +3158,20 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                    params->chandef.chan->band)
                        return -EINVAL;
 
-               err = ieee80211_ibss_csa_beacon(sdata, params);
-               if (err < 0)
-                       return err;
+               /* see comments in the NL80211_IFTYPE_AP block */
+               if (params->count > 1) {
+                       err = ieee80211_ibss_csa_beacon(sdata, params);
+                       if (err < 0)
+                               return err;
+                       *changed |= err;
+               }
+
+               ieee80211_send_action_csa(sdata, params);
+
                break;
 #ifdef CONFIG_MAC80211_MESH
-       case NL80211_IFTYPE_MESH_POINT:
-               ifmsh = &sdata->u.mesh;
-
-               if (!ifmsh->mesh_id)
-                       return -EINVAL;
+       case NL80211_IFTYPE_MESH_POINT: {
+               struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 
                if (params->chandef.width != sdata->vif.bss_conf.chandef.width)
                        return -EINVAL;
@@ -3166,23 +3181,87 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                    params->chandef.chan->band)
                        return -EINVAL;
 
-               ifmsh->chsw_init = true;
-               if (!ifmsh->pre_value)
-                       ifmsh->pre_value = 1;
-               else
-                       ifmsh->pre_value++;
+               if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_NONE) {
+                       ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_INIT;
+                       if (!ifmsh->pre_value)
+                               ifmsh->pre_value = 1;
+                       else
+                               ifmsh->pre_value++;
+               }
 
-               err = ieee80211_mesh_csa_beacon(sdata, params, true);
-               if (err < 0) {
-                       ifmsh->chsw_init = false;
-                       return err;
+               /* see comments in the NL80211_IFTYPE_AP block */
+               if (params->count > 1) {
+                       err = ieee80211_mesh_csa_beacon(sdata, params);
+                       if (err < 0) {
+                               ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+                               return err;
+                       }
+                       *changed |= err;
                }
+
+               if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT)
+                       ieee80211_send_action_csa(sdata, params);
+
                break;
+               }
 #endif
        default:
                return -EOPNOTSUPP;
        }
 
+       return 0;
+}
+
+int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+                            struct cfg80211_csa_settings *params)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_chanctx *chanctx;
+       int err, num_chanctx, changed = 0;
+
+       sdata_assert_lock(sdata);
+
+       if (!list_empty(&local->roc_list) || local->scanning)
+               return -EBUSY;
+
+       if (sdata->wdev.cac_started)
+               return -EBUSY;
+
+       if (cfg80211_chandef_identical(&params->chandef,
+                                      &sdata->vif.bss_conf.chandef))
+               return -EINVAL;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       if (!chanctx_conf) {
+               rcu_read_unlock();
+               return -EBUSY;
+       }
+
+       /* don't handle for multi-VIF cases */
+       chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+       if (chanctx->refcount > 1) {
+               rcu_read_unlock();
+               return -EBUSY;
+       }
+       num_chanctx = 0;
+       list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
+               num_chanctx++;
+       rcu_read_unlock();
+
+       if (num_chanctx > 1)
+               return -EBUSY;
+
+       /* don't allow another channel switch if one is already active. */
+       if (sdata->vif.csa_active)
+               return -EBUSY;
+
+       err = ieee80211_set_csa_beacon(sdata, params, &changed);
+       if (err)
+               return err;
+
        sdata->csa_radar_required = params->radar_required;
 
        if (params->block_tx)
@@ -3193,8 +3272,13 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        sdata->csa_chandef = params->chandef;
        sdata->vif.csa_active = true;
 
-       ieee80211_bss_info_change_notify(sdata, err);
-       drv_channel_switch_beacon(sdata, &params->chandef);
+       if (changed) {
+               ieee80211_bss_info_change_notify(sdata, changed);
+               drv_channel_switch_beacon(sdata, &params->chandef);
+       } else {
+               /* if the beacon didn't change, we can finalize immediately */
+               ieee80211_csa_finalize(sdata);
+       }
 
        return 0;
 }
@@ -3573,8 +3657,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
 
 static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
                               u8 *peer, u8 action_code, u8 dialog_token,
-                              u16 status_code, const u8 *extra_ies,
-                              size_t extra_ies_len)
+                              u16 status_code, u32 peer_capability,
+                              const u8 *extra_ies, size_t extra_ies_len)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
@@ -3865,7 +3949,7 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
        return 0;
 }
 
-struct cfg80211_ops mac80211_config_ops = {
+const struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
        .change_virtual_intf = ieee80211_change_iface,
index 7d7879f5b00b9d9fb7aa42ca8695d04e330dadb0..2d51f62dc76cd4978f1249c4a8ad1f95535a3e21 100644 (file)
@@ -4,6 +4,6 @@
 #ifndef __CFG_H
 #define __CFG_H
 
-extern struct cfg80211_ops mac80211_config_ops;
+extern const struct cfg80211_ops mac80211_config_ops;
 
 #endif /* __CFG_H */
index 0c1ecfdf9a128b05f76e545d3e0b95de50123176..bd1fd8ea5105fead274e09b7bcd093ba97081a97 100644 (file)
@@ -202,6 +202,8 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
 
+       lockdep_assert_held(&local->mtx);
+
        rcu_read_lock();
        list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                if (sdata->radar_required) {
index ebf80f3abd83fe1af18ce94863a0fe1dd090ea19..40a648938985db178b6997f1541bb47a3247b5a8 100644 (file)
@@ -358,6 +358,18 @@ static ssize_t ieee80211_if_parse_tkip_mic_test(
 }
 IEEE80211_IF_FILE_W(tkip_mic_test);
 
+static ssize_t ieee80211_if_parse_beacon_loss(
+       struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+       if (!ieee80211_sdata_running(sdata) || !sdata->vif.bss_conf.assoc)
+               return -ENOTCONN;
+
+       ieee80211_beacon_loss(&sdata->vif);
+
+       return buflen;
+}
+IEEE80211_IF_FILE_W(beacon_loss);
+
 static ssize_t ieee80211_if_fmt_uapsd_queues(
        const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
 {
@@ -569,6 +581,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(beacon_timeout);
        DEBUGFS_ADD_MODE(smps, 0600);
        DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
+       DEBUGFS_ADD_MODE(beacon_loss, 0200);
        DEBUGFS_ADD_MODE(uapsd_queues, 0600);
        DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600);
 }
index 80194b557a0cff8f3d2aba52f06cfc348212abf8..2ecb4deddb5df0ca74eb9630de15bbc9c789f274 100644 (file)
@@ -195,7 +195,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
 static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
                                    size_t count, loff_t *ppos)
 {
-       char _buf[12], *buf = _buf;
+       char _buf[12] = {}, *buf = _buf;
        struct sta_info *sta = file->private_data;
        bool start, tx;
        unsigned long tid;
index ef8b385eff04e4c7a279a92722fedc3f84163f6c..fc689f5d971e259381f0a26e13079f1a727fe704 100644 (file)
@@ -354,16 +354,20 @@ drv_sched_scan_start(struct ieee80211_local *local,
        return ret;
 }
 
-static inline void drv_sched_scan_stop(struct ieee80211_local *local,
-                                      struct ieee80211_sub_if_data *sdata)
+static inline int drv_sched_scan_stop(struct ieee80211_local *local,
+                                     struct ieee80211_sub_if_data *sdata)
 {
+       int ret;
+
        might_sleep();
 
        check_sdata_in_driver(sdata);
 
        trace_drv_sched_scan_stop(local, sdata);
-       local->ops->sched_scan_stop(&local->hw, &sdata->vif);
-       trace_drv_return_void(local);
+       ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif);
+       trace_drv_return_int(local, ret);
+
+       return ret;
 }
 
 static inline void drv_sw_scan_start(struct ieee80211_local *local)
index 70dd013de8361e39c95264eeae967e193fe240e5..c150b68436d78ada5bfbb0825d128d8e89f916e3 100644 (file)
@@ -375,7 +375,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
        mgmt->u.action.u.delba.params = cpu_to_le16(params);
        mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
 
-       ieee80211_tx_skb_tid(sdata, skb, tid);
+       ieee80211_tx_skb(sdata, skb);
 }
 
 void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
@@ -482,8 +482,6 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
                return;
 
        if (vif->type == NL80211_IFTYPE_STATION) {
-               if (WARN_ON(smps_mode == IEEE80211_SMPS_OFF))
-                       smps_mode = IEEE80211_SMPS_AUTOMATIC;
                if (sdata->u.mgd.driver_smps_mode == smps_mode)
                        return;
                sdata->u.mgd.driver_smps_mode = smps_mode;
index 2796a198728fd12bab4625ae1b112123988794f0..06d28787945b513e6672457a1e6990da0fd644d8 100644 (file)
@@ -220,7 +220,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_supported_band *sband;
        struct ieee80211_mgmt *mgmt;
        struct cfg80211_bss *bss;
        u32 bss_change;
@@ -284,6 +283,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
                                            &chandef);
+       if (err < 0) {
+               sdata_info(sdata,
+                          "Failed to join IBSS, invalid chandef\n");
+               return;
+       }
        if (err > 0) {
                if (!ifibss->userspace_handles_dfs) {
                        sdata_info(sdata,
@@ -294,7 +298,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        }
 
        mutex_lock(&local->mtx);
-       ieee80211_vif_release_channel(sdata);
        if (ieee80211_vif_use_channel(sdata, &chandef,
                                      ifibss->fixed_channel ?
                                        IEEE80211_CHANCTX_SHARED :
@@ -303,12 +306,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                mutex_unlock(&local->mtx);
                return;
        }
+       sdata->radar_required = radar_required;
        mutex_unlock(&local->mtx);
 
        memcpy(ifibss->bssid, bssid, ETH_ALEN);
 
-       sband = local->hw.wiphy->bands[chan->band];
-
        presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
                                           capability, tsf, &chandef,
                                           &have_higher_than_11mbit, NULL);
@@ -318,7 +320,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        rcu_assign_pointer(ifibss->presp, presp);
        mgmt = (void *)presp->head;
 
-       sdata->radar_required = radar_required;
        sdata->vif.bss_conf.enable_beacon = true;
        sdata->vif.bss_conf.beacon_int = beacon_int;
        sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -386,7 +387,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                                              presp->head_len, 0, GFP_KERNEL);
        cfg80211_put_bss(local->hw.wiphy, bss);
        netif_carrier_on(sdata->dev);
-       cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
+       cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
 }
 
 static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
@@ -521,12 +522,6 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
        if (old_presp)
                kfree_rcu(old_presp, rcu_head);
 
-       /* it might not send the beacon for a while. send an action frame
-        * immediately to announce the channel switch.
-        */
-       if (csa_settings)
-               ieee80211_send_action_csa(sdata, csa_settings);
-
        return BSS_CHANGED_BEACON;
  out:
        return ret;
@@ -536,7 +531,7 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct cfg80211_bss *cbss;
-       int err;
+       int err, changed = 0;
        u16 capability;
 
        sdata_assert_lock(sdata);
@@ -568,10 +563,9 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
        if (err < 0)
                return err;
 
-       if (err)
-               ieee80211_bss_info_change_notify(sdata, err);
+       changed |= err;
 
-       return 0;
+       return changed;
 }
 
 void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
@@ -799,6 +793,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        int err;
        u32 sta_flags;
 
+       sdata_assert_lock(sdata);
+
        sta_flags = IEEE80211_STA_DISABLE_VHT;
        switch (ifibss->chandef.width) {
        case NL80211_CHAN_WIDTH_5:
@@ -995,7 +991,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                                  struct ieee802_11_elems *elems)
 {
        struct ieee80211_local *local = sdata->local;
-       int freq;
        struct cfg80211_bss *cbss;
        struct ieee80211_bss *bss;
        struct sta_info *sta;
@@ -1007,15 +1002,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
        bool rates_updated = false;
 
-       if (elems->ds_params)
-               freq = ieee80211_channel_to_frequency(elems->ds_params[0],
-                                                     band);
-       else
-               freq = rx_status->freq;
-
-       channel = ieee80211_get_channel(local->hw.wiphy, freq);
-
-       if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+       channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
+       if (!channel)
                return;
 
        if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -1468,6 +1456,11 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
        memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN);
        ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa);
        IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+       /* avoid excessive retries for probe request to wildcard SSIDs */
+       if (pos[1] == 0)
+               IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK;
+
        ieee80211_tx_skb(sdata, skb);
 }
 
index 5e44e3179e02aabaea7b6d455ec5ed656e3691b0..222c28b75315f1ab43226e08566a5f911c6bacc7 100644 (file)
@@ -616,7 +616,11 @@ struct ieee80211_if_mesh {
        struct ps_data ps;
        /* Channel Switching Support */
        struct mesh_csa_settings __rcu *csa;
-       bool chsw_init;
+       enum {
+               IEEE80211_MESH_CSA_ROLE_NONE,
+               IEEE80211_MESH_CSA_ROLE_INIT,
+               IEEE80211_MESH_CSA_ROLE_REPEATER,
+       } csa_role;
        u8 chsw_ttl;
        u16 pre_value;
 
@@ -1238,6 +1242,8 @@ struct ieee80211_local {
 
        struct ieee80211_sub_if_data __rcu *p2p_sdata;
 
+       struct napi_struct *napi;
+
        /* virtual monitor interface */
        struct ieee80211_sub_if_data __rcu *monitor_sdata;
        struct cfg80211_chan_def monitor_chandef;
@@ -1385,6 +1391,7 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
                                  __le16 fc, bool acked);
+void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata);
 void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
 
 /* IBSS code */
@@ -1408,8 +1415,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
                                   struct sk_buff *skb);
 int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
-                             struct cfg80211_csa_settings *csa_settings,
-                             bool csa_action);
+                             struct cfg80211_csa_settings *csa_settings);
 int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
 
 /* scan/BSS handling */
@@ -1553,6 +1559,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
                                    struct sta_info *sta);
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
 void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+                                  struct sta_info *sta, u8 opmode,
+                                  enum ieee80211_band band, bool nss_only);
 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                 struct sta_info *sta, u8 opmode,
                                 enum ieee80211_band band, bool nss_only);
@@ -1605,7 +1614,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
 }
 
 /* utility functions/constants */
-extern void *mac80211_wiphy_privid; /* for wiphy privid */
+extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
 u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
                        enum nl80211_iftype type);
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
index ce1c4437061049a8ebea8d5d08e77995217b2a0d..b8d331e7d883d50fd4fc3adf12c2869ac3beedb7 100644 (file)
@@ -101,9 +101,8 @@ static u32 __ieee80211_idle_on(struct ieee80211_local *local)
 static u32 __ieee80211_recalc_idle(struct ieee80211_local *local,
                                   bool force_active)
 {
-       bool working = false, scanning, active;
+       bool working, scanning, active;
        unsigned int led_trig_start = 0, led_trig_stop = 0;
-       struct ieee80211_roc_work *roc;
 
        lockdep_assert_held(&local->mtx);
 
@@ -111,12 +110,8 @@ static u32 __ieee80211_recalc_idle(struct ieee80211_local *local,
                 !list_empty(&local->chanctx_list) ||
                 local->monitors;
 
-       if (!local->ops->remain_on_channel) {
-               list_for_each_entry(roc, &local->roc_list, list) {
-                       working = true;
-                       break;
-               }
-       }
+       working = !local->ops->remain_on_channel &&
+                 !list_empty(&local->roc_list);
 
        scanning = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
                   test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
@@ -833,7 +828,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        cancel_work_sync(&local->dynamic_ps_enable_work);
 
        cancel_work_sync(&sdata->recalc_smps);
+       sdata_lock(sdata);
        sdata->vif.csa_active = false;
+       sdata_unlock(sdata);
        cancel_work_sync(&sdata->csa_finalize_work);
 
        cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
index d767cfb9b45f092606cd37288113e82714b75fe4..b055f6a55c68e231c5bc73393a7817309a0ed10d 100644 (file)
@@ -893,10 +893,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        /* mac80211 supports control port protocol changing */
        local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
 
-       if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+       if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
                local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-       else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
+       } else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) {
                local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+               if (hw->max_signal <= 0) {
+                       result = -EINVAL;
+                       goto fail_wiphy_register;
+               }
+       }
 
        WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
             && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
@@ -1071,6 +1076,18 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 }
 EXPORT_SYMBOL(ieee80211_register_hw);
 
+void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
+                       struct net_device *napi_dev,
+                       int (*poll)(struct napi_struct *, int),
+                       int weight)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
+
+       netif_napi_add(napi_dev, napi, poll, weight);
+       local->napi = napi;
+}
+EXPORT_SYMBOL_GPL(ieee80211_napi_add);
+
 void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
index 5b919cab1de0015cd833d04b840c9d7cdc69e96b..f70e9cd10552dac6729d703edd2e9ae12750a3a6 100644 (file)
@@ -688,7 +688,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
                *pos++ = csa->settings.count;
                *pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
                *pos++ = 6;
-               if (ifmsh->chsw_init) {
+               if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) {
                        *pos++ = ifmsh->mshcfg.dot11MeshTTL;
                        *pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
                } else {
@@ -859,18 +859,12 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
 {
        struct cfg80211_csa_settings params;
        struct ieee80211_csa_ie csa_ie;
-       struct ieee80211_chanctx_conf *chanctx_conf;
-       struct ieee80211_chanctx *chanctx;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
-       int err, num_chanctx;
+       int err;
        u32 sta_flags;
 
-       if (sdata->vif.csa_active)
-               return true;
-
-       if (!ifmsh->mesh_id)
-               return false;
+       sdata_assert_lock(sdata);
 
        sta_flags = IEEE80211_STA_DISABLE_VHT;
        switch (sdata->vif.bss_conf.chandef.width) {
@@ -896,10 +890,6 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
        params.chandef = csa_ie.chandef;
        params.count = csa_ie.count;
 
-       if (sdata->vif.bss_conf.chandef.chan->band !=
-           params.chandef.chan->band)
-               return false;
-
        if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
                                     IEEE80211_CHAN_DISABLED)) {
                sdata_info(sdata,
@@ -922,24 +912,12 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
                return false;
        }
 
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-       if (!chanctx_conf)
-               goto failed_chswitch;
-
-       /* don't handle for multi-VIF cases */
-       chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
-       if (chanctx->refcount > 1)
-               goto failed_chswitch;
-
-       num_chanctx = 0;
-       list_for_each_entry_rcu(chanctx, &sdata->local->chanctx_list, list)
-               num_chanctx++;
-
-       if (num_chanctx > 1)
-               goto failed_chswitch;
-
-       rcu_read_unlock();
+       if (cfg80211_chandef_identical(&params.chandef,
+                                      &sdata->vif.bss_conf.chandef)) {
+               mcsa_dbg(sdata,
+                        "received csa with an identical chandef, ignoring\n");
+               return true;
+       }
 
        mcsa_dbg(sdata,
                 "received channel switch announcement to go to channel %d MHz\n",
@@ -953,30 +931,16 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
                ifmsh->pre_value = csa_ie.pre_value;
        }
 
-       if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) {
-               if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0)
-                       return false;
-       } else {
+       if (ifmsh->chsw_ttl >= ifmsh->mshcfg.dot11MeshTTL)
                return false;
-       }
 
-       sdata->csa_radar_required = params.radar_required;
+       ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_REPEATER;
 
-       if (params.block_tx)
-               ieee80211_stop_queues_by_reason(&sdata->local->hw,
-                               IEEE80211_MAX_QUEUE_MAP,
-                               IEEE80211_QUEUE_STOP_REASON_CSA);
-
-       sdata->csa_chandef = params.chandef;
-       sdata->vif.csa_active = true;
-
-       ieee80211_bss_info_change_notify(sdata, err);
-       drv_channel_switch_beacon(sdata, &params.chandef);
+       if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev,
+                                    &params) < 0)
+               return false;
 
        return true;
-failed_chswitch:
-       rcu_read_unlock();
-       return false;
 }
 
 static void
@@ -1086,7 +1050,8 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
                ifmsh->sync_ops->rx_bcn_presp(sdata,
                        stype, mgmt, &elems, rx_status);
 
-       if (!ifmsh->chsw_init)
+       if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
+           !sdata->vif.csa_active)
                ieee80211_mesh_process_chnswitch(sdata, &elems, true);
 }
 
@@ -1095,29 +1060,30 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct mesh_csa_settings *tmp_csa_settings;
        int ret = 0;
+       int changed = 0;
 
        /* Reset the TTL value and Initiator flag */
-       ifmsh->chsw_init = false;
+       ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
        ifmsh->chsw_ttl = 0;
 
        /* Remove the CSA and MCSP elements from the beacon */
        tmp_csa_settings = rcu_dereference(ifmsh->csa);
        rcu_assign_pointer(ifmsh->csa, NULL);
-       kfree_rcu(tmp_csa_settings, rcu_head);
+       if (tmp_csa_settings)
+               kfree_rcu(tmp_csa_settings, rcu_head);
        ret = ieee80211_mesh_rebuild_beacon(sdata);
        if (ret)
                return -EINVAL;
 
-       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+       changed |= BSS_CHANGED_BEACON;
 
        mcsa_dbg(sdata, "complete switching to center freq %d MHz",
                 sdata->vif.bss_conf.chandef.chan->center_freq);
-       return 0;
+       return changed;
 }
 
 int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
-                             struct cfg80211_csa_settings *csa_settings,
-                             bool csa_action)
+                             struct cfg80211_csa_settings *csa_settings)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        struct mesh_csa_settings *tmp_csa_settings;
@@ -1141,12 +1107,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
                return ret;
        }
 
-       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
-
-       if (csa_action)
-               ieee80211_send_action_csa(sdata, csa_settings);
-
-       return 0;
+       return BSS_CHANGED_BEACON;
 }
 
 static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
@@ -1210,7 +1171,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
 
        ifmsh->pre_value = pre_value;
 
-       if (!ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
+       if (!sdata->vif.csa_active &&
+           !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
                mcsa_dbg(sdata, "Failed to process CSA action frame");
                return;
        }
@@ -1257,7 +1219,7 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        sdata_lock(sdata);
 
        /* mesh already went down */
-       if (!sdata->wdev.mesh_id_len)
+       if (!sdata->u.mesh.mesh_id_len)
                goto out;
 
        rx_status = IEEE80211_SKB_RXCB(skb);
@@ -1310,7 +1272,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
        sdata_lock(sdata);
 
        /* mesh already went down */
-       if (!sdata->wdev.mesh_id_len)
+       if (!sdata->u.mesh.mesh_id_len)
                goto out;
 
        if (ifmsh->preq_queue_len &&
@@ -1365,7 +1327,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
        mesh_rmc_init(sdata);
        ifmsh->last_preq = jiffies;
        ifmsh->next_perr = jiffies;
-       ifmsh->chsw_init = false;
+       ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
        /* Allocate all mesh structures when creating the first mesh interface. */
        if (!mesh_allocated)
                ieee80211s_init();
index 245dce969b31165078c04fca9a5fc963457e9d68..dee50aefd6e868e247ba869e9e9883d4640330e3 100644 (file)
@@ -131,13 +131,13 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
        if (unlikely(!sdata->u.mgd.associated))
                return;
 
+       ifmgd->probe_send_count = 0;
+
        if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
                return;
 
        mod_timer(&sdata->u.mgd.conn_mon_timer,
                  round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
-
-       ifmgd->probe_send_count = 0;
 }
 
 static int ecw2cw(int ecw)
@@ -531,6 +531,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
        u8 *pos;
        u32 cap;
        struct ieee80211_sta_vht_cap vht_cap;
+       u32 mask, ap_bf_sts, our_bf_sts;
 
        BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
 
@@ -558,6 +559,16 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
                        cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
                cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
 
+       mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+       ap_bf_sts = le32_to_cpu(ap_vht_cap->vht_cap_info) & mask;
+       our_bf_sts = cap & mask;
+
+       if (ap_bf_sts < our_bf_sts) {
+               cap &= ~mask;
+               cap |= ap_bf_sts;
+       }
+
        /* reserve and fill IE */
        pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
        ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
@@ -768,6 +779,34 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
                ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
                                    sband, chan, sdata->smps_mode);
 
+       /* if present, add any custom IEs that go before VHT */
+       if (assoc_data->ie_len) {
+               static const u8 before_vht[] = {
+                       WLAN_EID_SSID,
+                       WLAN_EID_SUPP_RATES,
+                       WLAN_EID_EXT_SUPP_RATES,
+                       WLAN_EID_PWR_CAPABILITY,
+                       WLAN_EID_SUPPORTED_CHANNELS,
+                       WLAN_EID_RSN,
+                       WLAN_EID_QOS_CAPA,
+                       WLAN_EID_RRM_ENABLED_CAPABILITIES,
+                       WLAN_EID_MOBILITY_DOMAIN,
+                       WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+                       WLAN_EID_HT_CAPABILITY,
+                       WLAN_EID_BSS_COEX_2040,
+                       WLAN_EID_EXT_CAPABILITY,
+                       WLAN_EID_QOS_TRAFFIC_CAPA,
+                       WLAN_EID_TIM_BCAST_REQ,
+                       WLAN_EID_INTERWORKING,
+               };
+               noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
+                                            before_vht, ARRAY_SIZE(before_vht),
+                                            offset);
+               pos = skb_put(skb, noffset - offset);
+               memcpy(pos, assoc_data->ie + offset, noffset - offset);
+               offset = noffset;
+       }
+
        if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                ieee80211_add_vht_ie(sdata, skb, sband,
                                     &assoc_data->ap_vht_cap);
@@ -1024,7 +1063,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        }
 
        ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
-       sdata->vif.csa_active = true;
 
        mutex_lock(&local->chanctx_mtx);
        if (local->use_chanctx) {
@@ -1062,6 +1100,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        mutex_unlock(&local->chanctx_mtx);
 
        sdata->csa_chandef = csa_ie.chandef;
+       sdata->vif.csa_active = true;
 
        if (csa_ie.mode)
                ieee80211_stop_queues_by_reason(&local->hw,
@@ -2233,6 +2272,62 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
        /* ignore frame -- wait for timeout */
 }
 
+#define case_WLAN(type) \
+       case WLAN_REASON_##type: return #type
+
+static const char *ieee80211_get_reason_code_string(u16 reason_code)
+{
+       switch (reason_code) {
+       case_WLAN(UNSPECIFIED);
+       case_WLAN(PREV_AUTH_NOT_VALID);
+       case_WLAN(DEAUTH_LEAVING);
+       case_WLAN(DISASSOC_DUE_TO_INACTIVITY);
+       case_WLAN(DISASSOC_AP_BUSY);
+       case_WLAN(CLASS2_FRAME_FROM_NONAUTH_STA);
+       case_WLAN(CLASS3_FRAME_FROM_NONASSOC_STA);
+       case_WLAN(DISASSOC_STA_HAS_LEFT);
+       case_WLAN(STA_REQ_ASSOC_WITHOUT_AUTH);
+       case_WLAN(DISASSOC_BAD_POWER);
+       case_WLAN(DISASSOC_BAD_SUPP_CHAN);
+       case_WLAN(INVALID_IE);
+       case_WLAN(MIC_FAILURE);
+       case_WLAN(4WAY_HANDSHAKE_TIMEOUT);
+       case_WLAN(GROUP_KEY_HANDSHAKE_TIMEOUT);
+       case_WLAN(IE_DIFFERENT);
+       case_WLAN(INVALID_GROUP_CIPHER);
+       case_WLAN(INVALID_PAIRWISE_CIPHER);
+       case_WLAN(INVALID_AKMP);
+       case_WLAN(UNSUPP_RSN_VERSION);
+       case_WLAN(INVALID_RSN_IE_CAP);
+       case_WLAN(IEEE8021X_FAILED);
+       case_WLAN(CIPHER_SUITE_REJECTED);
+       case_WLAN(DISASSOC_UNSPECIFIED_QOS);
+       case_WLAN(DISASSOC_QAP_NO_BANDWIDTH);
+       case_WLAN(DISASSOC_LOW_ACK);
+       case_WLAN(DISASSOC_QAP_EXCEED_TXOP);
+       case_WLAN(QSTA_LEAVE_QBSS);
+       case_WLAN(QSTA_NOT_USE);
+       case_WLAN(QSTA_REQUIRE_SETUP);
+       case_WLAN(QSTA_TIMEOUT);
+       case_WLAN(QSTA_CIPHER_NOT_SUPP);
+       case_WLAN(MESH_PEER_CANCELED);
+       case_WLAN(MESH_MAX_PEERS);
+       case_WLAN(MESH_CONFIG);
+       case_WLAN(MESH_CLOSE);
+       case_WLAN(MESH_MAX_RETRIES);
+       case_WLAN(MESH_CONFIRM_TIMEOUT);
+       case_WLAN(MESH_INVALID_GTK);
+       case_WLAN(MESH_INCONSISTENT_PARAM);
+       case_WLAN(MESH_INVALID_SECURITY);
+       case_WLAN(MESH_PATH_ERROR);
+       case_WLAN(MESH_PATH_NOFORWARD);
+       case_WLAN(MESH_PATH_DEST_UNREACHABLE);
+       case_WLAN(MAC_EXISTS_IN_MBSS);
+       case_WLAN(MESH_CHAN_REGULATORY);
+       case_WLAN(MESH_CHAN);
+       default: return "<unknown>";
+       }
+}
 
 static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_mgmt *mgmt, size_t len)
@@ -2254,8 +2349,8 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
 
        reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
 
-       sdata_info(sdata, "deauthenticated from %pM (Reason: %u)\n",
-                  bssid, reason_code);
+       sdata_info(sdata, "deauthenticated from %pM (Reason: %u=%s)\n",
+                  bssid, reason_code, ieee80211_get_reason_code_string(reason_code));
 
        ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
 
@@ -2688,28 +2783,20 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                                  struct ieee802_11_elems *elems)
 {
        struct ieee80211_local *local = sdata->local;
-       int freq;
        struct ieee80211_bss *bss;
        struct ieee80211_channel *channel;
 
        sdata_assert_lock(sdata);
 
-       if (elems->ds_params)
-               freq = ieee80211_channel_to_frequency(elems->ds_params[0],
-                                                     rx_status->band);
-       else
-               freq = rx_status->freq;
-
-       channel = ieee80211_get_channel(local->hw.wiphy, freq);
-
-       if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
+       channel = ieee80211_get_channel(local->hw.wiphy, rx_status->freq);
+       if (!channel)
                return;
 
        bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
                                        channel);
        if (bss) {
-               ieee80211_rx_bss_put(local, bss);
                sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
+               ieee80211_rx_bss_put(local, bss);
        }
 }
 
@@ -3504,6 +3591,32 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
 }
 
 #ifdef CONFIG_PM
+void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+       u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+
+       sdata_lock(sdata);
+
+       if (ifmgd->auth_data) {
+               /*
+                * If we are trying to authenticate while suspending, cfg80211
+                * won't know and won't actually abort those attempts, thus we
+                * need to do that ourselves.
+                */
+               ieee80211_send_deauth_disassoc(sdata,
+                                              ifmgd->auth_data->bss->bssid,
+                                              IEEE80211_STYPE_DEAUTH,
+                                              WLAN_REASON_DEAUTH_LEAVING,
+                                              false, frame_buf);
+               ieee80211_destroy_auth_data(sdata, false);
+               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                                     IEEE80211_DEAUTH_FRAME_LEN);
+       }
+
+       sdata_unlock(sdata);
+}
+
 void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -4322,37 +4435,41 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
        bool tx = !req->local_state_change;
-       bool report_frame = false;
 
-       sdata_info(sdata,
-                  "deauthenticating from %pM by local choice (reason=%d)\n",
-                  req->bssid, req->reason_code);
+       if (ifmgd->auth_data &&
+           ether_addr_equal(ifmgd->auth_data->bss->bssid, req->bssid)) {
+               sdata_info(sdata,
+                          "aborting authentication with %pM by local choice (Reason: %u=%s)\n",
+                          req->bssid, req->reason_code,
+                          ieee80211_get_reason_code_string(req->reason_code));
 
-       if (ifmgd->auth_data) {
                drv_mgd_prepare_tx(sdata->local, sdata);
                ieee80211_send_deauth_disassoc(sdata, req->bssid,
                                               IEEE80211_STYPE_DEAUTH,
                                               req->reason_code, tx,
                                               frame_buf);
                ieee80211_destroy_auth_data(sdata, false);
+               cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
+                                     IEEE80211_DEAUTH_FRAME_LEN);
 
-               report_frame = true;
-               goto out;
+               return 0;
        }
 
        if (ifmgd->associated &&
            ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
+               sdata_info(sdata,
+                          "deauthenticating from %pM by local choice (Reason: %u=%s)\n",
+                          req->bssid, req->reason_code,
+                          ieee80211_get_reason_code_string(req->reason_code));
+
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       req->reason_code, tx, frame_buf);
-               report_frame = true;
-       }
-
- out:
-       if (report_frame)
                cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
                                      IEEE80211_DEAUTH_FRAME_LEN);
+               return 0;
+       }
 
-       return 0;
+       return -ENOTCONN;
 }
 
 int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
@@ -4372,8 +4489,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
                return -ENOLINK;
 
        sdata_info(sdata,
-                  "disassociating from %pM by local choice (reason=%d)\n",
-                  req->bss->bssid, req->reason_code);
+                  "disassociating from %pM by local choice (Reason: %u=%s)\n",
+                  req->bss->bssid, req->reason_code, ieee80211_get_reason_code_string(req->reason_code));
 
        memcpy(bssid, req->bss->bssid, ETH_ALEN);
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
index af64fb8e8addb58e5933cc3a45a8043f55659f81..d478b880a0afd676dae699d5b5541c150a7e819f 100644 (file)
@@ -100,10 +100,18 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 
        /* remove all interfaces that were created in the driver */
        list_for_each_entry(sdata, &local->interfaces, list) {
-               if (!ieee80211_sdata_running(sdata) ||
-                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
-                   sdata->vif.type == NL80211_IFTYPE_MONITOR)
+               if (!ieee80211_sdata_running(sdata))
                        continue;
+               switch (sdata->vif.type) {
+               case NL80211_IFTYPE_AP_VLAN:
+               case NL80211_IFTYPE_MONITOR:
+                       continue;
+               case NL80211_IFTYPE_STATION:
+                       ieee80211_mgd_quiesce(sdata);
+                       break;
+               default:
+                       break;
+               }
 
                drv_remove_interface(local, sdata);
        }
index 22b223f13c9fa22994eba6f68769a27b2cfe4eb3..8fdadfd94ba8576ae8bc0ee2e99e8c656c2f6a5b 100644 (file)
 
 #include <linux/kernel.h>
 #include <linux/rtnetlink.h>
-#include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/slab.h>
 #include "rate.h"
 #include "ieee80211_i.h"
 #include "debugfs.h"
 
 struct rate_control_alg {
        struct list_head list;
-       struct rate_control_ops *ops;
+       const struct rate_control_ops *ops;
 };
 
 static LIST_HEAD(rate_ctrl_algs);
@@ -29,7 +29,7 @@ module_param(ieee80211_default_rc_algo, charp, 0644);
 MODULE_PARM_DESC(ieee80211_default_rc_algo,
                 "Default rate control algorithm for mac80211 to use");
 
-int ieee80211_rate_control_register(struct rate_control_ops *ops)
+int ieee80211_rate_control_register(const struct rate_control_ops *ops)
 {
        struct rate_control_alg *alg;
 
@@ -60,7 +60,7 @@ int ieee80211_rate_control_register(struct rate_control_ops *ops)
 }
 EXPORT_SYMBOL(ieee80211_rate_control_register);
 
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops)
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops)
 {
        struct rate_control_alg *alg;
 
@@ -76,32 +76,31 @@ void ieee80211_rate_control_unregister(struct rate_control_ops *ops)
 }
 EXPORT_SYMBOL(ieee80211_rate_control_unregister);
 
-static struct rate_control_ops *
+static const struct rate_control_ops *
 ieee80211_try_rate_control_ops_get(const char *name)
 {
        struct rate_control_alg *alg;
-       struct rate_control_ops *ops = NULL;
+       const struct rate_control_ops *ops = NULL;
 
        if (!name)
                return NULL;
 
        mutex_lock(&rate_ctrl_mutex);
        list_for_each_entry(alg, &rate_ctrl_algs, list) {
-               if (!strcmp(alg->ops->name, name))
-                       if (try_module_get(alg->ops->module)) {
-                               ops = alg->ops;
-                               break;
-                       }
+               if (!strcmp(alg->ops->name, name)) {
+                       ops = alg->ops;
+                       break;
+               }
        }
        mutex_unlock(&rate_ctrl_mutex);
        return ops;
 }
 
 /* Get the rate control algorithm. */
-static struct rate_control_ops *
+static const struct rate_control_ops *
 ieee80211_rate_control_ops_get(const char *name)
 {
-       struct rate_control_ops *ops;
+       const struct rate_control_ops *ops;
        const char *alg_name;
 
        kparam_block_sysfs_write(ieee80211_default_rc_algo);
@@ -111,10 +110,6 @@ ieee80211_rate_control_ops_get(const char *name)
                alg_name = name;
 
        ops = ieee80211_try_rate_control_ops_get(alg_name);
-       if (!ops) {
-               request_module("rc80211_%s", alg_name);
-               ops = ieee80211_try_rate_control_ops_get(alg_name);
-       }
        if (!ops && name)
                /* try default if specific alg requested but not found */
                ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo);
@@ -127,11 +122,6 @@ ieee80211_rate_control_ops_get(const char *name)
        return ops;
 }
 
-static void ieee80211_rate_control_ops_put(struct rate_control_ops *ops)
-{
-       module_put(ops->module);
-}
-
 #ifdef CONFIG_MAC80211_DEBUGFS
 static ssize_t rcname_read(struct file *file, char __user *userbuf,
                           size_t count, loff_t *ppos)
@@ -158,11 +148,11 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
 
        ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
        if (!ref)
-               goto fail_ref;
+               return NULL;
        ref->local = local;
        ref->ops = ieee80211_rate_control_ops_get(name);
        if (!ref->ops)
-               goto fail_ops;
+               goto free;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
        debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
@@ -172,14 +162,11 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
 
        ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
        if (!ref->priv)
-               goto fail_priv;
+               goto free;
        return ref;
 
-fail_priv:
-       ieee80211_rate_control_ops_put(ref->ops);
-fail_ops:
+free:
        kfree(ref);
-fail_ref:
        return NULL;
 }
 
@@ -192,7 +179,6 @@ static void rate_control_free(struct rate_control_ref *ctrl_ref)
        ctrl_ref->local->debugfs.rcdir = NULL;
 #endif
 
-       ieee80211_rate_control_ops_put(ctrl_ref->ops);
        kfree(ctrl_ref);
 }
 
index b95e16c070813da22169679e8c5c1aa7e3ee0f1d..9aa2a1190a86353a25deca879018b2750bdefad9 100644 (file)
@@ -21,7 +21,7 @@
 
 struct rate_control_ref {
        struct ieee80211_local *local;
-       struct rate_control_ops *ops;
+       const struct rate_control_ops *ops;
        void *priv;
 };
 
index f3d88b0c054c219fde20b907195c3bdb5a481c97..26fd94fa0aedb86e43382781f791b1f36de44954 100644 (file)
@@ -657,7 +657,7 @@ minstrel_free(void *priv)
        kfree(priv);
 }
 
-struct rate_control_ops mac80211_minstrel = {
+const struct rate_control_ops mac80211_minstrel = {
        .name = "minstrel",
        .tx_status = minstrel_tx_status,
        .get_rate = minstrel_get_rate,
index f4301f4b2e418f1e325ccd78c4c2a6986303ca36..046d1bd598a86d114c5c4dd6a7c7edb75808af13 100644 (file)
@@ -123,7 +123,7 @@ struct minstrel_debugfs_info {
        char buf[];
 };
 
-extern struct rate_control_ops mac80211_minstrel;
+extern const struct rate_control_ops mac80211_minstrel;
 void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
 void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
 
index c1b5b73c5b91353597eb1dfcf7a0f70946c72562..bccaf854a309e9434fb9044d0e98082e3de287de 100644 (file)
@@ -124,7 +124,7 @@ const struct mcs_group minstrel_mcs_groups[] = {
 
 #define MINSTREL_CCK_GROUP     (ARRAY_SIZE(minstrel_mcs_groups) - 1)
 
-static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
+static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
 
 static void
 minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
@@ -1031,7 +1031,7 @@ minstrel_ht_free(void *priv)
        mac80211_minstrel.free(priv);
 }
 
-static struct rate_control_ops mac80211_minstrel_ht = {
+static const struct rate_control_ops mac80211_minstrel_ht = {
        .name = "minstrel_ht",
        .tx_status = minstrel_ht_tx_status,
        .get_rate = minstrel_ht_get_rate,
@@ -1048,8 +1048,7 @@ static struct rate_control_ops mac80211_minstrel_ht = {
 };
 
 
-static void
-init_sample_table(void)
+static void __init init_sample_table(void)
 {
        int col, i, new_idx;
        u8 rnd[MCS_GROUP_RATES];
index 958fad07b54cf64856e3600bd6299f4ca9abd72a..d0da2a70fe6899e7cf4f9733225d3846186f1780 100644 (file)
@@ -452,7 +452,7 @@ static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
        kfree(priv_sta);
 }
 
-static struct rate_control_ops mac80211_rcpid = {
+static const struct rate_control_ops mac80211_rcpid = {
        .name = "pid",
        .tx_status = rate_control_pid_tx_status,
        .get_rate = rate_control_pid_get_rate,
index 3e57f96c9666daf4b420cfd663864878bef34204..216c45b949e513382447050eb560098a5edaa4b3 100644 (file)
@@ -40,8 +40,6 @@
 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
                                           struct sk_buff *skb)
 {
-       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-
        if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
                if (likely(skb->len > FCS_LEN))
                        __pskb_trim(skb, skb->len - FCS_LEN);
@@ -53,9 +51,6 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
                }
        }
 
-       if (status->vendor_radiotap_len)
-               __pskb_pull(skb, status->vendor_radiotap_len);
-
        return skb;
 }
 
@@ -64,14 +59,13 @@ static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len)
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr;
 
-       hdr = (void *)(skb->data + status->vendor_radiotap_len);
+       hdr = (void *)(skb->data);
 
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
                            RX_FLAG_FAILED_PLCP_CRC |
                            RX_FLAG_AMPDU_IS_ZEROLEN))
                return 1;
-       if (unlikely(skb->len < 16 + present_fcs_len +
-                               status->vendor_radiotap_len))
+       if (unlikely(skb->len < 16 + present_fcs_len))
                return 1;
        if (ieee80211_is_ctl(hdr->frame_control) &&
            !ieee80211_is_pspoll(hdr->frame_control) &&
@@ -90,8 +84,6 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
        len = sizeof(struct ieee80211_radiotap_header) + 8;
 
        /* allocate extra bitmaps */
-       if (status->vendor_radiotap_len)
-               len += 4;
        if (status->chains)
                len += 4 * hweight8(status->chains);
 
@@ -127,18 +119,6 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
                len += 2 * hweight8(status->chains);
        }
 
-       if (status->vendor_radiotap_len) {
-               if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
-                       status->vendor_radiotap_align = 1;
-               /* align standard part of vendor namespace */
-               len = ALIGN(len, 2);
-               /* allocate standard part of vendor namespace */
-               len += 6;
-               /* align vendor-defined part */
-               len = ALIGN(len, status->vendor_radiotap_align);
-               /* vendor-defined part is already in skb */
-       }
-
        return len;
 }
 
@@ -172,7 +152,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        it_present = &rthdr->it_present;
 
        /* radiotap header, set always present flags */
-       rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
+       rthdr->it_len = cpu_to_le16(rtap_len);
        it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
                         BIT(IEEE80211_RADIOTAP_CHANNEL) |
                         BIT(IEEE80211_RADIOTAP_RX_FLAGS);
@@ -190,14 +170,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                                 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
        }
 
-       if (status->vendor_radiotap_len) {
-               it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
-                                 BIT(IEEE80211_RADIOTAP_EXT);
-               put_unaligned_le32(it_present_val, it_present);
-               it_present++;
-               it_present_val = status->vendor_radiotap_bitmap;
-       }
-
        put_unaligned_le32(it_present_val, it_present);
 
        pos = (void *)(it_present + 1);
@@ -307,6 +279,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                        *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
                if (status->flag & RX_FLAG_HT_GF)
                        *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+               if (status->flag & RX_FLAG_LDPC)
+                       *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
                stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT;
                *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
                pos++;
@@ -349,20 +323,25 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
 
                rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
                /* known field - how to handle 80+80? */
-               if (status->flag & RX_FLAG_80P80MHZ)
+               if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
                        known &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
                put_unaligned_le16(known, pos);
                pos += 2;
                /* flags */
                if (status->flag & RX_FLAG_SHORT_GI)
                        *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+               /* in VHT, STBC is binary */
+               if (status->flag & RX_FLAG_STBC_MASK)
+                       *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
+               if (status->vht_flag & RX_VHT_FLAG_BF)
+                       *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
                pos++;
                /* bandwidth */
-               if (status->flag & RX_FLAG_80MHZ)
+               if (status->vht_flag & RX_VHT_FLAG_80MHZ)
                        *pos++ = 4;
-               else if (status->flag & RX_FLAG_80P80MHZ)
+               else if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
                        *pos++ = 0; /* marked not known above */
-               else if (status->flag & RX_FLAG_160MHZ)
+               else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
                        *pos++ = 11;
                else if (status->flag & RX_FLAG_40MHZ)
                        *pos++ = 1;
@@ -372,6 +351,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                *pos = (status->rate_idx << 4) | status->vht_nss;
                pos += 4;
                /* coding field */
+               if (status->flag & RX_FLAG_LDPC)
+                       *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
                pos++;
                /* group ID */
                pos++;
@@ -383,21 +364,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
                *pos++ = status->chain_signal[chain];
                *pos++ = chain;
        }
-
-       if (status->vendor_radiotap_len) {
-               /* ensure 2 byte alignment for the vendor field as required */
-               if ((pos - (u8 *)rthdr) & 1)
-                       *pos++ = 0;
-               *pos++ = status->vendor_radiotap_oui[0];
-               *pos++ = status->vendor_radiotap_oui[1];
-               *pos++ = status->vendor_radiotap_oui[2];
-               *pos++ = status->vendor_radiotap_subns;
-               put_unaligned_le16(status->vendor_radiotap_len, pos);
-               pos += 2;
-               /* align the actual payload as requested */
-               while ((pos - (u8 *)rthdr) & (status->vendor_radiotap_align - 1))
-                       *pos++ = 0;
-       }
 }
 
 /*
@@ -428,8 +394,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
        if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
                present_fcs_len = FCS_LEN;
 
-       /* ensure hdr->frame_control and vendor radiotap data are in skb head */
-       if (!pskb_may_pull(origskb, 2 + status->vendor_radiotap_len)) {
+       /* ensure hdr->frame_control is in skb head */
+       if (!pskb_may_pull(origskb, 2)) {
                dev_kfree_skb(origskb);
                return NULL;
        }
@@ -599,10 +565,10 @@ static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
-       if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
+       if (is_multicast_ether_addr(hdr->addr1))
                return 0;
 
-       return ieee80211_is_robust_mgmt_frame(hdr);
+       return ieee80211_is_robust_mgmt_frame(skb);
 }
 
 
@@ -610,10 +576,10 @@ static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 
-       if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
+       if (!is_multicast_ether_addr(hdr->addr1))
                return 0;
 
-       return ieee80211_is_robust_mgmt_frame(hdr);
+       return ieee80211_is_robust_mgmt_frame(skb);
 }
 
 
@@ -626,7 +592,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
        if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
                return -1;
 
-       if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
+       if (!ieee80211_is_robust_mgmt_frame(skb))
                return -1; /* not a robust management frame */
 
        mmie = (struct ieee80211_mmie *)
@@ -1268,6 +1234,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                        if (ieee80211_is_data(hdr->frame_control)) {
                                sta->last_rx_rate_idx = status->rate_idx;
                                sta->last_rx_rate_flag = status->flag;
+                               sta->last_rx_rate_vht_flag = status->vht_flag;
                                sta->last_rx_rate_vht_nss = status->vht_nss;
                        }
                }
@@ -1280,6 +1247,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                if (ieee80211_is_data(hdr->frame_control)) {
                        sta->last_rx_rate_idx = status->rate_idx;
                        sta->last_rx_rate_flag = status->flag;
+                       sta->last_rx_rate_vht_flag = status->vht_flag;
                        sta->last_rx_rate_vht_nss = status->vht_nss;
                }
        }
@@ -1318,18 +1286,15 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
            !ieee80211_has_morefrags(hdr->frame_control) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
            (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
-            rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
+            rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
+           /* PM bit is only checked in frames where it isn't reserved,
+            * in AP mode it's reserved in non-bufferable management frames
+            * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+            */
+           (!ieee80211_is_mgmt(hdr->frame_control) ||
+            ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
                if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
-                       /*
-                        * Ignore doze->wake transitions that are
-                        * indicated by non-data frames, the standard
-                        * is unclear here, but for example going to
-                        * PS mode and then scanning would cause a
-                        * doze->wake transition for the probe request,
-                        * and that is clearly undesirable.
-                        */
-                       if (ieee80211_is_data(hdr->frame_control) &&
-                           !ieee80211_has_pm(hdr->frame_control))
+                       if (!ieee80211_has_pm(hdr->frame_control))
                                sta_ps_end(sta);
                } else {
                        if (ieee80211_has_pm(hdr->frame_control))
@@ -1852,8 +1817,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
                 * having configured keys.
                 */
                if (unlikely(ieee80211_is_action(fc) && !rx->key &&
-                            ieee80211_is_robust_mgmt_frame(
-                                    (struct ieee80211_hdr *) rx->skb->data)))
+                            ieee80211_is_robust_mgmt_frame(rx->skb)))
                        return -EACCES;
        }
 
@@ -2000,7 +1964,10 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
                /* deliver to local stack */
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               netif_receive_skb(skb);
+               if (rx->local->napi)
+                       napi_gro_receive(rx->local->napi, skb);
+               else
+                       netif_receive_skb(skb);
        }
 
        if (xmit_skb) {
index 88c81616f8f758595e90cced1d503448ce6ba969..3ce7f2c8539a1f626f7488833ee966fb3af5d502 100644 (file)
@@ -472,9 +472,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
        if (local->ops->hw_scan) {
                u8 *ies;
 
-               local->hw_scan_ies_bufsize = 2 + IEEE80211_MAX_SSID_LEN +
-                                            local->scan_ies_len +
-                                            req->ie_len;
+               local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len;
                local->hw_scan_req = kmalloc(
                                sizeof(*local->hw_scan_req) +
                                req->n_channels * sizeof(req->channels[0]) +
@@ -979,8 +977,7 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        struct cfg80211_chan_def chandef;
        int ret, i, iebufsz;
 
-       iebufsz = 2 + IEEE80211_MAX_SSID_LEN +
-                 local->scan_ies_len + req->ie_len;
+       iebufsz = local->scan_ies_len + req->ie_len;
 
        lockdep_assert_held(&local->mtx);
 
@@ -1058,9 +1055,11 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
        /* We don't want to restart sched scan anymore. */
        local->sched_scan_req = NULL;
 
-       if (rcu_access_pointer(local->sched_scan_sdata))
-               drv_sched_scan_stop(local, sdata);
-
+       if (rcu_access_pointer(local->sched_scan_sdata)) {
+               ret = drv_sched_scan_stop(local, sdata);
+               if (!ret)
+                       rcu_assign_pointer(local->sched_scan_sdata, NULL);
+       }
 out:
        mutex_unlock(&local->mtx);
 
index d3a6d8208f2f85f7db331238f41c0da2938f0f8d..4acc5fc402fa30b11e8f26da93be91eeabcc303a 100644 (file)
@@ -261,6 +261,7 @@ struct ieee80211_tx_latency_stat {
  *     "the" transmit rate
  * @last_rx_rate_idx: rx status rate index of the last data packet
  * @last_rx_rate_flag: rx status flag of the last data packet
+ * @last_rx_rate_vht_flag: rx status vht flag of the last data packet
  * @last_rx_rate_vht_nss: rx status nss of last data packet
  * @lock: used for locking all fields that require locking, see comments
  *     in the header file.
@@ -396,6 +397,7 @@ struct sta_info {
        struct ieee80211_tx_rate last_tx_rate;
        int last_rx_rate_idx;
        u32 last_rx_rate_flag;
+       u32 last_rx_rate_vht_flag;
        u8 last_rx_rate_vht_nss;
        u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
 
index 1ee85c402439bfc886029f9ae6dfe6ec306e013f..e6e574a307c8f3fd552329379919aaf9d071245d 100644 (file)
@@ -479,7 +479,7 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
        u32 msrmnt;
        u16 tid;
        u8 *qc;
-       int i, bin_range_count, bin_count;
+       int i, bin_range_count;
        u32 *bin_ranges;
        __le16 fc;
        struct ieee80211_tx_latency_stat *tx_lat;
@@ -522,7 +522,6 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
        /* count how many Tx frames transmitted with the appropriate latency */
        bin_range_count = tx_latency->n_ranges;
        bin_ranges = tx_latency->ranges;
-       bin_count = tx_lat->bin_count;
 
        for (i = 0; i < bin_range_count; i++) {
                if (msrmnt <= bin_ranges[i]) {
index 4080c615636fabf3d430ecd898d4349ccd213464..19d36d4117e0da0b5524b7e3f8102a86810dceed 100644 (file)
@@ -452,8 +452,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
        if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
                return 0;
 
-       if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
-                                           skb->data))
+       if (!ieee80211_is_robust_mgmt_frame(skb))
                return 0;
 
        return 1;
@@ -538,11 +537,8 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
                return TX_CONTINUE;
 
-       /* only deauth, disassoc and action are bufferable MMPDUs */
        if (ieee80211_is_mgmt(hdr->frame_control) &&
-           !ieee80211_is_deauth(hdr->frame_control) &&
-           !ieee80211_is_disassoc(hdr->frame_control) &&
-           !ieee80211_is_action(hdr->frame_control)) {
+           !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
                if (tx->flags & IEEE80211_TX_UNICAST)
                        info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
                return TX_CONTINUE;
@@ -582,7 +578,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                tx->key = key;
        else if (ieee80211_is_mgmt(hdr->frame_control) &&
                 is_multicast_ether_addr(hdr->addr1) &&
-                ieee80211_is_robust_mgmt_frame(hdr) &&
+                ieee80211_is_robust_mgmt_frame(tx->skb) &&
                 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
                tx->key = key;
        else if (is_multicast_ether_addr(hdr->addr1) &&
@@ -597,12 +593,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                tx->key = NULL;
        else if (tx->skb->protocol == tx->sdata->control_port_protocol)
                tx->key = NULL;
-       else if (ieee80211_is_robust_mgmt_frame(hdr) &&
+       else if (ieee80211_is_robust_mgmt_frame(tx->skb) &&
                 !(ieee80211_is_action(hdr->frame_control) &&
                   tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
                tx->key = NULL;
        else if (ieee80211_is_mgmt(hdr->frame_control) &&
-                !ieee80211_is_robust_mgmt_frame(hdr))
+                !ieee80211_is_robust_mgmt_frame(tx->skb))
                tx->key = NULL;
        else {
                I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
@@ -2417,15 +2413,6 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
        return 0;
 }
 
-void ieee80211_csa_finish(struct ieee80211_vif *vif)
-{
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-
-       ieee80211_queue_work(&sdata->local->hw,
-                            &sdata->csa_finalize_work);
-}
-EXPORT_SYMBOL(ieee80211_csa_finish);
-
 static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
                                 struct beacon_data *beacon)
 {
@@ -2454,8 +2441,12 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(counter_offset_beacon >= beacon_data_len))
                return;
 
-       /* warn if the driver did not check for/react to csa completeness */
-       if (WARN_ON(beacon_data[counter_offset_beacon] == 0))
+       /* Warn if the driver did not check for/react to csa
+        * completeness.  A beacon with CSA counter set to 0 should
+        * never occur, because a counter of 1 means switch just
+        * before the next beacon.
+        */
+       if (WARN_ON(beacon_data[counter_offset_beacon] == 1))
                return;
 
        beacon_data[counter_offset_beacon]--;
@@ -2521,7 +2512,7 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
        if (WARN_ON(counter_beacon > beacon_data_len))
                goto out;
 
-       if (beacon_data[counter_beacon] == 0)
+       if (beacon_data[counter_beacon] == 1)
                ret = true;
  out:
        rcu_read_unlock();
@@ -2909,7 +2900,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
                                cpu_to_le16(IEEE80211_FCTL_MOREDATA);
                }
 
-               if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+               if (sdata->vif.type == NL80211_IFTYPE_AP)
                        sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
                if (!ieee80211_tx_prepare(sdata, &tx, skb))
                        break;
index b8700d417a9cf26735a581fe25eb2619cf4a37da..275c94f995f7c8401749cbbafb249bb52a418be7 100644 (file)
@@ -34,7 +34,7 @@
 #include "wep.h"
 
 /* privid for wiphys to determine whether they belong to us or not */
-void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
+const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid;
 
 struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
 {
@@ -1277,13 +1277,32 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
         * that calculates local->scan_ies_len.
         */
 
-       /* add any remaining custom IEs */
+       /* insert custom IEs that go before VHT */
        if (ie && ie_len) {
-               noffset = ie_len;
+               static const u8 before_vht[] = {
+                       WLAN_EID_SSID,
+                       WLAN_EID_SUPP_RATES,
+                       WLAN_EID_REQUEST,
+                       WLAN_EID_EXT_SUPP_RATES,
+                       WLAN_EID_DS_PARAMS,
+                       WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+                       WLAN_EID_HT_CAPABILITY,
+                       WLAN_EID_BSS_COEX_2040,
+                       WLAN_EID_EXT_CAPABILITY,
+                       WLAN_EID_SSID_LIST,
+                       WLAN_EID_CHANNEL_USAGE,
+                       WLAN_EID_INTERWORKING,
+                       /* mesh ID can't happen here */
+                       /* 60 GHz can't happen here right now */
+               };
+               noffset = ieee80211_ie_split(ie, ie_len,
+                                            before_vht, ARRAY_SIZE(before_vht),
+                                            offset);
                if (end - pos < noffset - offset)
                        goto out_err;
                memcpy(pos, ie + offset, noffset - offset);
                pos += noffset - offset;
+               offset = noffset;
        }
 
        if (sband->vht_cap.vht_supported) {
@@ -1293,6 +1312,15 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
                                                 sband->vht_cap.cap);
        }
 
+       /* add any remaining custom IEs */
+       if (ie && ie_len) {
+               noffset = ie_len;
+               if (end - pos < noffset - offset)
+                       goto out_err;
+               memcpy(pos, ie + offset, noffset - offset);
+               pos += noffset - offset;
+       }
+
        return pos - buffer;
  out_err:
        WARN_ONCE(1, "not enough space for preq IEs\n");
@@ -1370,7 +1398,6 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
                            enum ieee80211_band band, u32 *basic_rates)
 {
        struct ieee80211_supported_band *sband;
-       struct ieee80211_rate *bitrates;
        size_t num_rates;
        u32 supp_rates, rate_flags;
        int i, j, shift;
@@ -1382,7 +1409,6 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
        if (WARN_ON(!sband))
                return 1;
 
-       bitrates = sband->bitrates;
        num_rates = sband->n_bitrates;
        supp_rates = 0;
        for (i = 0; i < elems->supp_rates_len +
@@ -2268,11 +2294,11 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
                ri.nss = status->vht_nss;
                if (status->flag & RX_FLAG_40MHZ)
                        ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
-               if (status->flag & RX_FLAG_80MHZ)
+               if (status->vht_flag & RX_VHT_FLAG_80MHZ)
                        ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
-               if (status->flag & RX_FLAG_80P80MHZ)
+               if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
                        ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
-               if (status->flag & RX_FLAG_160MHZ)
+               if (status->vht_flag & RX_VHT_FLAG_160MHZ)
                        ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
                if (status->flag & RX_FLAG_SHORT_GI)
                        ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
index d75f35c6e1a0884eb9452030cb27bcafb33deeab..e9e36a256165842ac112e35e612ba79c8f86d305 100644 (file)
@@ -349,9 +349,9 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
        sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss);
 }
 
-void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
-                                struct sta_info *sta, u8 opmode,
-                                enum ieee80211_band band, bool nss_only)
+u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+                                 struct sta_info *sta, u8 opmode,
+                                 enum ieee80211_band band, bool nss_only)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
@@ -363,7 +363,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
 
        /* ignore - no support for BF yet */
        if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
-               return;
+               return 0;
 
        nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
        nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
@@ -375,7 +375,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
        }
 
        if (nss_only)
-               goto change;
+               return changed;
 
        switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
        case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
@@ -398,7 +398,19 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                changed |= IEEE80211_RC_BW_CHANGED;
        }
 
- change:
-       if (changed)
+       return changed;
+}
+
+void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+                                struct sta_info *sta, u8 opmode,
+                                enum ieee80211_band band, bool nss_only)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+
+       u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode,
+                                                   band, nss_only);
+
+       if (changed > 0)
                rate_control_rate_update(local, sband, sta, changed);
 }
index 21448d629b152adebff514e5d5a8d37e11a676e6..b8600e3c29c828d918b3676397f73a4d0fe7892c 100644 (file)
@@ -301,8 +301,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
 }
 
 
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
-                               int encrypted)
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
 {
        __le16 mask_fc;
        int a4_included, mgmt;
@@ -456,7 +455,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
                return 0;
 
        pos += IEEE80211_CCMP_HDR_LEN;
-       ccmp_special_blocks(skb, pn, b_0, aad, 0);
+       ccmp_special_blocks(skb, pn, b_0, aad);
        ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
                                  skb_put(skb, IEEE80211_CCMP_MIC_LEN));
 
@@ -495,7 +494,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
        if (!ieee80211_is_data(hdr->frame_control) &&
-           !ieee80211_is_robust_mgmt_frame(hdr))
+           !ieee80211_is_robust_mgmt_frame(skb))
                return RX_CONTINUE;
 
        data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN -
@@ -524,7 +523,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
                u8 aad[2 * AES_BLOCK_SIZE];
                u8 b_0[AES_BLOCK_SIZE];
                /* hardware didn't decrypt/verify MIC */
-               ccmp_special_blocks(skb, pn, b_0, aad, 1);
+               ccmp_special_blocks(skb, pn, b_0, aad);
 
                if (ieee80211_aes_ccm_decrypt(
                            key->u.ccmp.tfm, b_0, aad,
index 57cf5d1a2e4a4e3de3c6b839cacb5bdf8baf1367..15d62df521825c8581fc25ec25a9caebc8902a72 100644 (file)
@@ -1,2 +1,4 @@
 obj-$(CONFIG_MAC802154)        += mac802154.o
 mac802154-objs         := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o
+
+ccflags-y += -D__CHECK_ENDIAN__
index 52ae6646a41140e065920ee80c4df626c6a58594..10cdb091b775602a1807816256a22f734c0dfb8e 100644 (file)
@@ -27,6 +27,7 @@
 #include <net/netlink.h>
 #include <linux/nl802154.h>
 #include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
 #include <net/route.h>
 #include <net/wpan-phy.h>
 
@@ -46,7 +47,9 @@ int mac802154_slave_open(struct net_device *dev)
        }
 
        if (ipriv->ops->ieee_addr) {
-               res = ipriv->ops->ieee_addr(&ipriv->hw, dev->dev_addr);
+               __le64 addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+
+               res = ipriv->ops->ieee_addr(&ipriv->hw, addr);
                WARN_ON(res);
                if (res)
                        goto err;
@@ -165,6 +168,67 @@ err:
        return ERR_PTR(err);
 }
 
+static int mac802154_set_txpower(struct wpan_phy *phy, int db)
+{
+       struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+       if (!priv->ops->set_txpower)
+               return -ENOTSUPP;
+
+       return priv->ops->set_txpower(&priv->hw, db);
+}
+
+static int mac802154_set_lbt(struct wpan_phy *phy, bool on)
+{
+       struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+       if (!priv->ops->set_lbt)
+               return -ENOTSUPP;
+
+       return priv->ops->set_lbt(&priv->hw, on);
+}
+
+static int mac802154_set_cca_mode(struct wpan_phy *phy, u8 mode)
+{
+       struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+       if (!priv->ops->set_cca_mode)
+               return -ENOTSUPP;
+
+       return priv->ops->set_cca_mode(&priv->hw, mode);
+}
+
+static int mac802154_set_cca_ed_level(struct wpan_phy *phy, s32 level)
+{
+       struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+       if (!priv->ops->set_cca_ed_level)
+               return -ENOTSUPP;
+
+       return priv->ops->set_cca_ed_level(&priv->hw, level);
+}
+
+static int mac802154_set_csma_params(struct wpan_phy *phy, u8 min_be,
+                                    u8 max_be, u8 retries)
+{
+       struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+       if (!priv->ops->set_csma_params)
+               return -ENOTSUPP;
+
+       return priv->ops->set_csma_params(&priv->hw, min_be, max_be, retries);
+}
+
+static int mac802154_set_frame_retries(struct wpan_phy *phy, s8 retries)
+{
+       struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+       if (!priv->ops->set_frame_retries)
+               return -ENOTSUPP;
+
+       return priv->ops->set_frame_retries(&priv->hw, retries);
+}
+
 struct ieee802154_dev *
 ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
 {
@@ -242,6 +306,12 @@ int ieee802154_register_device(struct ieee802154_dev *dev)
 
        priv->phy->add_iface = mac802154_add_iface;
        priv->phy->del_iface = mac802154_del_iface;
+       priv->phy->set_txpower = mac802154_set_txpower;
+       priv->phy->set_lbt = mac802154_set_lbt;
+       priv->phy->set_cca_mode = mac802154_set_cca_mode;
+       priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
+       priv->phy->set_csma_params = mac802154_set_csma_params;
+       priv->phy->set_frame_retries = mac802154_set_frame_retries;
 
        rc = wpan_phy_register(priv->phy);
        if (rc < 0)
index d48422e271109a47d2e7139cf33e2bc727c38f3d..4619486f1da21e728b609f94d785ce683426ccdf 100644 (file)
@@ -76,6 +76,7 @@ struct mac802154_sub_if_data {
 
        __le16 pan_id;
        __le16 short_addr;
+       __le64 extended_addr;
 
        u8 chan;
        u8 page;
@@ -106,11 +107,11 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
                         u8 page, u8 chan);
 
 /* MIB callbacks */
-void mac802154_dev_set_short_addr(struct net_device *dev, u16 val);
-u16 mac802154_dev_get_short_addr(const struct net_device *dev);
+void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val);
+__le16 mac802154_dev_get_short_addr(const struct net_device *dev);
 void mac802154_dev_set_ieee_addr(struct net_device *dev);
-u16 mac802154_dev_get_pan_id(const struct net_device *dev);
-void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
+__le16 mac802154_dev_get_pan_id(const struct net_device *dev);
+void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val);
 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
 u8 mac802154_dev_get_dsn(const struct net_device *dev);
 
index a99910d4d52f8403a8c2141b473deba885b977ad..15bac3358889198e4b34800896bec9fc12163450 100644 (file)
@@ -40,7 +40,7 @@ static int mac802154_mlme_start_req(struct net_device *dev,
                                    u8 pan_coord, u8 blx,
                                    u8 coord_realign)
 {
-       BUG_ON(addr->addr_type != IEEE802154_ADDR_SHORT);
+       BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
 
        mac802154_dev_set_pan_id(dev, addr->pan_id);
        mac802154_dev_set_short_addr(dev, addr->short_addr);
index 8ded97cf1c33f4336b3a59761095eff575ba6287..153bd1ddbfbba782b4c2842d7559d8a8917bb474 100644 (file)
@@ -24,7 +24,9 @@
 #include <linux/if_arp.h>
 
 #include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
 #include <net/wpan-phy.h>
+#include <net/ieee802154_netdev.h>
 
 #include "mac802154.h"
 
@@ -62,8 +64,6 @@ static void hw_addr_notify(struct work_struct *work)
                pr_debug("failed changed mask %lx\n", nw->changed);
 
        kfree(nw);
-
-       return;
 }
 
 static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
@@ -79,11 +79,9 @@ static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
        work->dev = dev;
        work->changed = changed;
        queue_work(priv->hw->dev_workqueue, &work->work);
-
-       return;
 }
 
-void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
+void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val)
 {
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
 
@@ -100,10 +98,10 @@ void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
        }
 }
 
-u16 mac802154_dev_get_short_addr(const struct net_device *dev)
+__le16 mac802154_dev_get_short_addr(const struct net_device *dev)
 {
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
-       u16 ret;
+       __le16 ret;
 
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
@@ -119,19 +117,19 @@ void mac802154_dev_set_ieee_addr(struct net_device *dev)
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
        struct mac802154_priv *mac = priv->hw;
 
+       priv->extended_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+
        if (mac->ops->set_hw_addr_filt &&
-           memcmp(mac->hw.hw_filt.ieee_addr,
-                  dev->dev_addr, IEEE802154_ADDR_LEN)) {
-               memcpy(mac->hw.hw_filt.ieee_addr,
-                      dev->dev_addr, IEEE802154_ADDR_LEN);
+           mac->hw.hw_filt.ieee_addr != priv->extended_addr) {
+               mac->hw.hw_filt.ieee_addr = priv->extended_addr;
                set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
        }
 }
 
-u16 mac802154_dev_get_pan_id(const struct net_device *dev)
+__le16 mac802154_dev_get_pan_id(const struct net_device *dev)
 {
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
-       u16 ret;
+       __le16 ret;
 
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
@@ -142,7 +140,7 @@ u16 mac802154_dev_get_pan_id(const struct net_device *dev)
        return ret;
 }
 
-void mac802154_dev_set_pan_id(struct net_device *dev, u16 val)
+void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val)
 {
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
 
index 38548ec2098fb1350b4f4278a1a161f7495a2eb7..03855b0677ccf8efcb0819591bae63bd8609c693 100644 (file)
@@ -80,7 +80,6 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
        mac802154_wpans_rx(priv, skb);
 out:
        dev_kfree_skb(skb);
-       return;
 }
 
 static void mac802154_rx_worker(struct work_struct *work)
index 372d8a222b9184e2881d108a087cf9ebaad768e6..80cbee1a2f567aaf3521419bc13e9beab168bb59 100644 (file)
 
 #include "mac802154.h"
 
-static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
-{
-       if (unlikely(!pskb_may_pull(skb, 1)))
-               return -EINVAL;
-
-       *val = skb->data[0];
-       skb_pull(skb, 1);
-
-       return 0;
-}
-
-static inline int mac802154_fetch_skb_u16(struct sk_buff *skb, u16 *val)
-{
-       if (unlikely(!pskb_may_pull(skb, 2)))
-               return -EINVAL;
-
-       *val = skb->data[0] | (skb->data[1] << 8);
-       skb_pull(skb, 2);
-
-       return 0;
-}
-
-static inline void mac802154_haddr_copy_swap(u8 *dest, const u8 *src)
-{
-       int i;
-       for (i = 0; i < IEEE802154_ADDR_LEN; i++)
-               dest[IEEE802154_ADDR_LEN - i - 1] = src[i];
-}
-
 static int
 mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
@@ -76,19 +47,25 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
        switch (cmd) {
        case SIOCGIFADDR:
-               if (priv->pan_id == IEEE802154_PANID_BROADCAST ||
-                   priv->short_addr == IEEE802154_ADDR_BROADCAST) {
+       {
+               u16 pan_id, short_addr;
+
+               pan_id = le16_to_cpu(priv->pan_id);
+               short_addr = le16_to_cpu(priv->short_addr);
+               if (pan_id == IEEE802154_PANID_BROADCAST ||
+                   short_addr == IEEE802154_ADDR_BROADCAST) {
                        err = -EADDRNOTAVAIL;
                        break;
                }
 
                sa->family = AF_IEEE802154;
                sa->addr.addr_type = IEEE802154_ADDR_SHORT;
-               sa->addr.pan_id = priv->pan_id;
-               sa->addr.short_addr = priv->short_addr;
+               sa->addr.pan_id = pan_id;
+               sa->addr.short_addr = short_addr;
 
                err = 0;
                break;
+       }
        case SIOCSIFADDR:
                dev_warn(&dev->dev,
                         "Using DEBUGing ioctl SIOCSIFADDR isn't recommened!\n");
@@ -101,8 +78,8 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        break;
                }
 
-               priv->pan_id = sa->addr.pan_id;
-               priv->short_addr = sa->addr.short_addr;
+               priv->pan_id = cpu_to_le16(sa->addr.pan_id);
+               priv->short_addr = cpu_to_le16(sa->addr.short_addr);
 
                err = 0;
                break;
@@ -128,187 +105,70 @@ static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
 static int mac802154_header_create(struct sk_buff *skb,
                                   struct net_device *dev,
                                   unsigned short type,
-                                  const void *_daddr,
-                                  const void *_saddr,
+                                  const void *daddr,
+                                  const void *saddr,
                                   unsigned len)
 {
-       const struct ieee802154_addr *saddr = _saddr;
-       const struct ieee802154_addr *daddr = _daddr;
-       struct ieee802154_addr dev_addr;
+       struct ieee802154_hdr hdr;
        struct mac802154_sub_if_data *priv = netdev_priv(dev);
-       int pos = 2;
-       u8 head[MAC802154_FRAME_HARD_HEADER_LEN];
-       u16 fc;
+       int hlen;
 
        if (!daddr)
                return -EINVAL;
 
-       head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
-       fc = mac_cb_type(skb);
-       if (mac_cb_is_ackreq(skb))
-               fc |= IEEE802154_FC_ACK_REQ;
+       memset(&hdr.fc, 0, sizeof(hdr.fc));
+       hdr.fc.type = mac_cb_type(skb);
+       hdr.fc.security_enabled = mac_cb_is_secen(skb);
+       hdr.fc.ack_request = mac_cb_is_ackreq(skb);
 
        if (!saddr) {
                spin_lock_bh(&priv->mib_lock);
 
-               if (priv->short_addr == IEEE802154_ADDR_BROADCAST ||
-                   priv->short_addr == IEEE802154_ADDR_UNDEF ||
-                   priv->pan_id == IEEE802154_PANID_BROADCAST) {
-                       dev_addr.addr_type = IEEE802154_ADDR_LONG;
-                       memcpy(dev_addr.hwaddr, dev->dev_addr,
-                              IEEE802154_ADDR_LEN);
+               if (priv->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
+                   priv->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
+                   priv->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
+                       hdr.source.mode = IEEE802154_ADDR_LONG;
+                       hdr.source.extended_addr = priv->extended_addr;
                } else {
-                       dev_addr.addr_type = IEEE802154_ADDR_SHORT;
-                       dev_addr.short_addr = priv->short_addr;
+                       hdr.source.mode = IEEE802154_ADDR_SHORT;
+                       hdr.source.short_addr = priv->short_addr;
                }
 
-               dev_addr.pan_id = priv->pan_id;
-               saddr = &dev_addr;
+               hdr.source.pan_id = priv->pan_id;
 
                spin_unlock_bh(&priv->mib_lock);
+       } else {
+               hdr.source = *(const struct ieee802154_addr *)saddr;
        }
 
-       if (daddr->addr_type != IEEE802154_ADDR_NONE) {
-               fc |= (daddr->addr_type << IEEE802154_FC_DAMODE_SHIFT);
-
-               head[pos++] = daddr->pan_id & 0xff;
-               head[pos++] = daddr->pan_id >> 8;
-
-               if (daddr->addr_type == IEEE802154_ADDR_SHORT) {
-                       head[pos++] = daddr->short_addr & 0xff;
-                       head[pos++] = daddr->short_addr >> 8;
-               } else {
-                       mac802154_haddr_copy_swap(head + pos, daddr->hwaddr);
-                       pos += IEEE802154_ADDR_LEN;
-               }
-       }
-
-       if (saddr->addr_type != IEEE802154_ADDR_NONE) {
-               fc |= (saddr->addr_type << IEEE802154_FC_SAMODE_SHIFT);
-
-               if ((saddr->pan_id == daddr->pan_id) &&
-                   (saddr->pan_id != IEEE802154_PANID_BROADCAST)) {
-                       /* PANID compression/intra PAN */
-                       fc |= IEEE802154_FC_INTRA_PAN;
-               } else {
-                       head[pos++] = saddr->pan_id & 0xff;
-                       head[pos++] = saddr->pan_id >> 8;
-               }
-
-               if (saddr->addr_type == IEEE802154_ADDR_SHORT) {
-                       head[pos++] = saddr->short_addr & 0xff;
-                       head[pos++] = saddr->short_addr >> 8;
-               } else {
-                       mac802154_haddr_copy_swap(head + pos, saddr->hwaddr);
-                       pos += IEEE802154_ADDR_LEN;
-               }
-       }
+       hdr.dest = *(const struct ieee802154_addr *)daddr;
 
-       head[0] = fc;
-       head[1] = fc >> 8;
+       hlen = ieee802154_hdr_push(skb, &hdr);
+       if (hlen < 0)
+               return -EINVAL;
 
-       memcpy(skb_push(skb, pos), head, pos);
        skb_reset_mac_header(skb);
-       skb->mac_len = pos;
+       skb->mac_len = hlen;
+
+       if (hlen + len + 2 > dev->mtu)
+               return -EMSGSIZE;
 
-       return pos;
+       return hlen;
 }
 
 static int
 mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
 {
-       const u8 *hdr = skb_mac_header(skb);
-       const u8 *tail = skb_tail_pointer(skb);
+       struct ieee802154_hdr hdr;
        struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
-       u16 fc;
-       int da_type;
-
-       if (hdr + 3 > tail)
-               goto malformed;
-
-       fc = hdr[0] | (hdr[1] << 8);
-
-       hdr += 3;
-
-       da_type = IEEE802154_FC_DAMODE(fc);
-       addr->addr_type = IEEE802154_FC_SAMODE(fc);
-
-       switch (da_type) {
-       case IEEE802154_ADDR_NONE:
-               if (fc & IEEE802154_FC_INTRA_PAN)
-                       goto malformed;
-               break;
-       case IEEE802154_ADDR_LONG:
-               if (fc & IEEE802154_FC_INTRA_PAN) {
-                       if (hdr + 2 > tail)
-                               goto malformed;
-                       addr->pan_id = hdr[0] | (hdr[1] << 8);
-                       hdr += 2;
-               }
-
-               if (hdr + IEEE802154_ADDR_LEN > tail)
-                       goto malformed;
-
-               hdr += IEEE802154_ADDR_LEN;
-               break;
-       case IEEE802154_ADDR_SHORT:
-               if (fc & IEEE802154_FC_INTRA_PAN) {
-                       if (hdr + 2 > tail)
-                               goto malformed;
-                       addr->pan_id = hdr[0] | (hdr[1] << 8);
-                       hdr += 2;
-               }
-
-               if (hdr + 2 > tail)
-                       goto malformed;
-
-               hdr += 2;
-               break;
-       default:
-               goto malformed;
 
+       if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) {
+               pr_debug("malformed packet\n");
+               return 0;
        }
 
-       switch (addr->addr_type) {
-       case IEEE802154_ADDR_NONE:
-               break;
-       case IEEE802154_ADDR_LONG:
-               if (!(fc & IEEE802154_FC_INTRA_PAN)) {
-                       if (hdr + 2 > tail)
-                               goto malformed;
-                       addr->pan_id = hdr[0] | (hdr[1] << 8);
-                       hdr += 2;
-               }
-
-               if (hdr + IEEE802154_ADDR_LEN > tail)
-                       goto malformed;
-
-               mac802154_haddr_copy_swap(addr->hwaddr, hdr);
-               hdr += IEEE802154_ADDR_LEN;
-               break;
-       case IEEE802154_ADDR_SHORT:
-               if (!(fc & IEEE802154_FC_INTRA_PAN)) {
-                       if (hdr + 2 > tail)
-                               goto malformed;
-                       addr->pan_id = hdr[0] | (hdr[1] << 8);
-                       hdr += 2;
-               }
-
-               if (hdr + 2 > tail)
-                       goto malformed;
-
-               addr->short_addr = hdr[0] | (hdr[1] << 8);
-               hdr += 2;
-               break;
-       default:
-               goto malformed;
-       }
-
-       return sizeof(struct ieee802154_addr);
-
-malformed:
-       pr_debug("malformed packet\n");
-       return 0;
+       *addr = hdr.source;
+       return sizeof(*addr);
 }
 
 static netdev_tx_t
@@ -382,8 +242,8 @@ void mac802154_wpan_setup(struct net_device *dev)
        get_random_bytes(&priv->bsn, 1);
        get_random_bytes(&priv->dsn, 1);
 
-       priv->pan_id = IEEE802154_PANID_BROADCAST;
-       priv->short_addr = IEEE802154_ADDR_BROADCAST;
+       priv->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
+       priv->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
 }
 
 static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
@@ -394,13 +254,18 @@ static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
 static int
 mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
 {
+       __le16 span, sshort;
+
        pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
 
        spin_lock_bh(&sdata->mib_lock);
 
-       switch (mac_cb(skb)->da.addr_type) {
+       span = sdata->pan_id;
+       sshort = sdata->short_addr;
+
+       switch (mac_cb(skb)->dest.mode) {
        case IEEE802154_ADDR_NONE:
-               if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE)
+               if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
                        /* FIXME: check if we are PAN coordinator */
                        skb->pkt_type = PACKET_OTHERHOST;
                else
@@ -408,23 +273,22 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
                        skb->pkt_type = PACKET_HOST;
                break;
        case IEEE802154_ADDR_LONG:
-               if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
-                   mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
+               if (mac_cb(skb)->dest.pan_id != span &&
+                   mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
                        skb->pkt_type = PACKET_OTHERHOST;
-               else if (!memcmp(mac_cb(skb)->da.hwaddr, sdata->dev->dev_addr,
-                                IEEE802154_ADDR_LEN))
+               else if (mac_cb(skb)->dest.extended_addr == sdata->extended_addr)
                        skb->pkt_type = PACKET_HOST;
                else
                        skb->pkt_type = PACKET_OTHERHOST;
                break;
        case IEEE802154_ADDR_SHORT:
-               if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
-                   mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
+               if (mac_cb(skb)->dest.pan_id != span &&
+                   mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST))
                        skb->pkt_type = PACKET_OTHERHOST;
-               else if (mac_cb(skb)->da.short_addr == sdata->short_addr)
+               else if (mac_cb(skb)->dest.short_addr == sshort)
                        skb->pkt_type = PACKET_HOST;
-               else if (mac_cb(skb)->da.short_addr ==
-                                       IEEE802154_ADDR_BROADCAST)
+               else if (mac_cb(skb)->dest.short_addr ==
+                         cpu_to_le16(IEEE802154_ADDR_BROADCAST))
                        skb->pkt_type = PACKET_BROADCAST;
                else
                        skb->pkt_type = PACKET_OTHERHOST;
@@ -451,88 +315,82 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
        }
 }
 
-static int mac802154_parse_frame_start(struct sk_buff *skb)
+static void mac802154_print_addr(const char *name,
+                                const struct ieee802154_addr *addr)
 {
-       u8 *head = skb->data;
-       u16 fc;
+       if (addr->mode == IEEE802154_ADDR_NONE)
+               pr_debug("%s not present\n", name);
 
-       if (mac802154_fetch_skb_u16(skb, &fc) ||
-           mac802154_fetch_skb_u8(skb, &(mac_cb(skb)->seq)))
-               goto err;
+       pr_debug("%s PAN ID: %04x\n", name, le16_to_cpu(addr->pan_id));
+       if (addr->mode == IEEE802154_ADDR_SHORT) {
+               pr_debug("%s is short: %04x\n", name,
+                        le16_to_cpu(addr->short_addr));
+       } else {
+               u64 hw = swab64((__force u64) addr->extended_addr);
 
-       pr_debug("fc: %04x dsn: %02x\n", fc, head[2]);
-
-       mac_cb(skb)->flags = IEEE802154_FC_TYPE(fc);
-       mac_cb(skb)->sa.addr_type = IEEE802154_FC_SAMODE(fc);
-       mac_cb(skb)->da.addr_type = IEEE802154_FC_DAMODE(fc);
+               pr_debug("%s is hardware: %8phC\n", name, &hw);
+       }
+}
 
-       if (fc & IEEE802154_FC_INTRA_PAN)
-               mac_cb(skb)->flags |= MAC_CB_FLAG_INTRAPAN;
+static int mac802154_parse_frame_start(struct sk_buff *skb)
+{
+       int hlen;
+       struct ieee802154_hdr hdr;
 
-       if (mac_cb(skb)->da.addr_type != IEEE802154_ADDR_NONE) {
-               if (mac802154_fetch_skb_u16(skb, &(mac_cb(skb)->da.pan_id)))
-                       goto err;
+       hlen = ieee802154_hdr_pull(skb, &hdr);
+       if (hlen < 0)
+               return -EINVAL;
 
-               /* source PAN id compression */
-               if (mac_cb_is_intrapan(skb))
-                       mac_cb(skb)->sa.pan_id = mac_cb(skb)->da.pan_id;
+       skb->mac_len = hlen;
 
-               pr_debug("dest PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
+       pr_debug("fc: %04x dsn: %02x\n", le16_to_cpup((__le16 *)&hdr.fc),
+                hdr.seq);
 
-               if (mac_cb(skb)->da.addr_type == IEEE802154_ADDR_SHORT) {
-                       u16 *da = &(mac_cb(skb)->da.short_addr);
+       mac_cb(skb)->flags = hdr.fc.type;
 
-                       if (mac802154_fetch_skb_u16(skb, da))
-                               goto err;
+       if (hdr.fc.ack_request)
+               mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
+       if (hdr.fc.security_enabled)
+               mac_cb(skb)->flags |= MAC_CB_FLAG_SECEN;
 
-                       pr_debug("destination address is short: %04x\n",
-                                mac_cb(skb)->da.short_addr);
-               } else {
-                       if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
-                               goto err;
+       mac802154_print_addr("destination", &hdr.dest);
+       mac802154_print_addr("source", &hdr.source);
 
-                       mac802154_haddr_copy_swap(mac_cb(skb)->da.hwaddr,
-                                                 skb->data);
-                       skb_pull(skb, IEEE802154_ADDR_LEN);
+       mac_cb(skb)->source = hdr.source;
+       mac_cb(skb)->dest = hdr.dest;
 
-                       pr_debug("destination address is hardware\n");
-               }
-       }
+       if (hdr.fc.security_enabled) {
+               u64 key;
 
-       if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE) {
-               /* non PAN-compression, fetch source address id */
-               if (!(mac_cb_is_intrapan(skb))) {
-                       u16 *sa_pan = &(mac_cb(skb)->sa.pan_id);
+               pr_debug("seclevel %i\n", hdr.sec.level);
 
-                       if (mac802154_fetch_skb_u16(skb, sa_pan))
-                               goto err;
-               }
-
-               pr_debug("source PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
-
-               if (mac_cb(skb)->sa.addr_type == IEEE802154_ADDR_SHORT) {
-                       u16 *sa = &(mac_cb(skb)->sa.short_addr);
-
-                       if (mac802154_fetch_skb_u16(skb, sa))
-                               goto err;
+               switch (hdr.sec.key_id_mode) {
+               case IEEE802154_SCF_KEY_IMPLICIT:
+                       pr_debug("implicit key\n");
+                       break;
 
-                       pr_debug("source address is short: %04x\n",
-                                mac_cb(skb)->sa.short_addr);
-               } else {
-                       if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
-                               goto err;
+               case IEEE802154_SCF_KEY_INDEX:
+                       pr_debug("key %02x\n", hdr.sec.key_id);
+                       break;
 
-                       mac802154_haddr_copy_swap(mac_cb(skb)->sa.hwaddr,
-                                                 skb->data);
-                       skb_pull(skb, IEEE802154_ADDR_LEN);
+               case IEEE802154_SCF_KEY_SHORT_INDEX:
+                       pr_debug("key %04x:%04x %02x\n",
+                                le32_to_cpu(hdr.sec.short_src) >> 16,
+                                le32_to_cpu(hdr.sec.short_src) & 0xffff,
+                                hdr.sec.key_id);
+                       break;
 
-                       pr_debug("source address is hardware\n");
+               case IEEE802154_SCF_KEY_HW_INDEX:
+                       key = swab64((__force u64) hdr.sec.extended_src);
+                       pr_debug("key source %8phC %02x\n", &key,
+                                hdr.sec.key_id);
+                       break;
                }
+
+               return -EINVAL;
        }
 
        return 0;
-err:
-       return -EINVAL;
 }
 
 void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
index 44cd4f58adf08b914b871d7399942fb58168a623..2f7f5c32c6f90a0eb376d7921aecf167564329ce 100644 (file)
@@ -61,6 +61,15 @@ config IP_SET_HASH_IP
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_IPMARK
+       tristate "hash:ip,mark set support"
+       depends on IP_SET
+       help
+         This option adds the hash:ip,mark set type support, by which one
+         can store IPv4/IPv6 address and mark pairs.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_HASH_IPPORT
        tristate "hash:ip,port set support"
        depends on IP_SET
index 44b2d38476faeb75a95c5ac8348330a99aa866fd..231f10196cb906fd4cbbe5f58263c5a3ba97886a 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_IP_SET_BITMAP_PORT) += ip_set_bitmap_port.o
 
 # hash types
 obj-$(CONFIG_IP_SET_HASH_IP) += ip_set_hash_ip.o
+obj-$(CONFIG_IP_SET_HASH_IPMARK) += ip_set_hash_ipmark.o
 obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
 obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
 obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
index de770ec39e5112e4cbf5b4b90629144bad1d957b..117208321f16997af9f24bee1f86519f8f8fa26e 100644 (file)
@@ -54,10 +54,10 @@ MODULE_DESCRIPTION("core IP set support");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
 
 /* When the nfnl mutex is held: */
-#define nfnl_dereference(p)            \
+#define ip_set_dereference(p)          \
        rcu_dereference_protected(p, 1)
-#define nfnl_set(inst, id)                     \
-       nfnl_dereference((inst)->ip_set_list)[id]
+#define ip_set(inst, id)               \
+       ip_set_dereference((inst)->ip_set_list)[id]
 
 /*
  * The set types are implemented in modules and registered set types
@@ -368,6 +368,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
 
        if (tb[IPSET_ATTR_CADT_FLAGS])
                cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+       if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
+               set->flags |= IPSET_CREATE_FLAG_FORCEADD;
        for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
                if (!add_extension(id, cadt_flags, tb))
                        continue;
@@ -510,7 +512,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
 
        if (opt->dim < set->type->dimension ||
            !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
-               return 0;
+               return -IPSET_ERR_TYPE_MISMATCH;
 
        write_lock_bh(&set->lock);
        ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
@@ -533,7 +535,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
 
        if (opt->dim < set->type->dimension ||
            !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
-               return 0;
+               return -IPSET_ERR_TYPE_MISMATCH;
 
        write_lock_bh(&set->lock);
        ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
@@ -640,7 +642,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
                return IPSET_INVALID_ID;
 
        nfnl_lock(NFNL_SUBSYS_IPSET);
-       set = nfnl_set(inst, index);
+       set = ip_set(inst, index);
        if (set)
                __ip_set_get(set);
        else
@@ -666,7 +668,7 @@ ip_set_nfnl_put(struct net *net, ip_set_id_t index)
 
        nfnl_lock(NFNL_SUBSYS_IPSET);
        if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
-               set = nfnl_set(inst, index);
+               set = ip_set(inst, index);
                if (set != NULL)
                        __ip_set_put(set);
        }
@@ -734,7 +736,7 @@ find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
 
        *id = IPSET_INVALID_ID;
        for (i = 0; i < inst->ip_set_max; i++) {
-               set = nfnl_set(inst, i);
+               set = ip_set(inst, i);
                if (set != NULL && STREQ(set->name, name)) {
                        *id = i;
                        break;
@@ -760,7 +762,7 @@ find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
 
        *index = IPSET_INVALID_ID;
        for (i = 0;  i < inst->ip_set_max; i++) {
-               s = nfnl_set(inst, i);
+               s = ip_set(inst, i);
                if (s == NULL) {
                        if (*index == IPSET_INVALID_ID)
                                *index = i;
@@ -883,7 +885,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                if (!list)
                        goto cleanup;
                /* nfnl mutex is held, both lists are valid */
-               tmp = nfnl_dereference(inst->ip_set_list);
+               tmp = ip_set_dereference(inst->ip_set_list);
                memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
                rcu_assign_pointer(inst->ip_set_list, list);
                /* Make sure all current packets have passed through */
@@ -900,7 +902,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
         * Finally! Add our shiny new set to the list, and be done.
         */
        pr_debug("create: '%s' created with index %u!\n", set->name, index);
-       nfnl_set(inst, index) = set;
+       ip_set(inst, index) = set;
 
        return ret;
 
@@ -925,10 +927,10 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
 static void
 ip_set_destroy_set(struct ip_set_net *inst, ip_set_id_t index)
 {
-       struct ip_set *set = nfnl_set(inst, index);
+       struct ip_set *set = ip_set(inst, index);
 
        pr_debug("set: %s\n",  set->name);
-       nfnl_set(inst, index) = NULL;
+       ip_set(inst, index) = NULL;
 
        /* Must call it without holding any lock */
        set->variant->destroy(set);
@@ -962,7 +964,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
        read_lock_bh(&ip_set_ref_lock);
        if (!attr[IPSET_ATTR_SETNAME]) {
                for (i = 0; i < inst->ip_set_max; i++) {
-                       s = nfnl_set(inst, i);
+                       s = ip_set(inst, i);
                        if (s != NULL && s->ref) {
                                ret = -IPSET_ERR_BUSY;
                                goto out;
@@ -970,7 +972,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
                }
                read_unlock_bh(&ip_set_ref_lock);
                for (i = 0; i < inst->ip_set_max; i++) {
-                       s = nfnl_set(inst, i);
+                       s = ip_set(inst, i);
                        if (s != NULL)
                                ip_set_destroy_set(inst, i);
                }
@@ -1020,7 +1022,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
 
        if (!attr[IPSET_ATTR_SETNAME]) {
                for (i = 0; i < inst->ip_set_max; i++) {
-                       s = nfnl_set(inst, i);
+                       s = ip_set(inst, i);
                        if (s != NULL)
                                ip_set_flush_set(s);
                }
@@ -1074,7 +1076,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
 
        name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
        for (i = 0; i < inst->ip_set_max; i++) {
-               s = nfnl_set(inst, i);
+               s = ip_set(inst, i);
                if (s != NULL && STREQ(s->name, name2)) {
                        ret = -IPSET_ERR_EXIST_SETNAME2;
                        goto out;
@@ -1134,8 +1136,8 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
 
        write_lock_bh(&ip_set_ref_lock);
        swap(from->ref, to->ref);
-       nfnl_set(inst, from_id) = to;
-       nfnl_set(inst, to_id) = from;
+       ip_set(inst, from_id) = to;
+       ip_set(inst, to_id) = from;
        write_unlock_bh(&ip_set_ref_lock);
 
        return 0;
@@ -1157,7 +1159,7 @@ ip_set_dump_done(struct netlink_callback *cb)
        struct ip_set_net *inst = (struct ip_set_net *)cb->args[IPSET_CB_NET];
        if (cb->args[IPSET_CB_ARG0]) {
                pr_debug("release set %s\n",
-                        nfnl_set(inst, cb->args[IPSET_CB_INDEX])->name);
+                        ip_set(inst, cb->args[IPSET_CB_INDEX])->name);
                __ip_set_put_byindex(inst,
                        (ip_set_id_t) cb->args[IPSET_CB_INDEX]);
        }
@@ -1254,7 +1256,7 @@ dump_last:
                 dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
        for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
                index = (ip_set_id_t) cb->args[IPSET_CB_INDEX];
-               set = nfnl_set(inst, index);
+               set = ip_set(inst, index);
                if (set == NULL) {
                        if (dump_type == DUMP_ONE) {
                                ret = -ENOENT;
@@ -1332,7 +1334,7 @@ next_set:
 release_refcount:
        /* If there was an error or set is done, release set */
        if (ret || !cb->args[IPSET_CB_ARG0]) {
-               pr_debug("release set %s\n", nfnl_set(inst, index)->name);
+               pr_debug("release set %s\n", ip_set(inst, index)->name);
                __ip_set_put_byindex(inst, index);
                cb->args[IPSET_CB_ARG0] = 0;
        }
@@ -1887,7 +1889,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
                find_set_and_id(inst, req_get->set.name, &id);
                req_get->set.index = id;
                if (id != IPSET_INVALID_ID)
-                       req_get->family = nfnl_set(inst, id)->family;
+                       req_get->family = ip_set(inst, id)->family;
                nfnl_unlock(NFNL_SUBSYS_IPSET);
                goto copy;
        }
@@ -1901,7 +1903,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
                        goto done;
                }
                nfnl_lock(NFNL_SUBSYS_IPSET);
-               set = nfnl_set(inst, req_get->set.index);
+               set = ip_set(inst, req_get->set.index);
                strncpy(req_get->set.name, set ? set->name : "",
                        IPSET_MAXNAMELEN);
                nfnl_unlock(NFNL_SUBSYS_IPSET);
@@ -1945,7 +1947,6 @@ ip_set_net_init(struct net *net)
                return -ENOMEM;
        inst->is_deleted = 0;
        rcu_assign_pointer(inst->ip_set_list, list);
-       pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
        return 0;
 }
 
@@ -1960,7 +1961,7 @@ ip_set_net_exit(struct net *net)
        inst->is_deleted = 1; /* flag for ip_set_nfnl_put */
 
        for (i = 0; i < inst->ip_set_max; i++) {
-               set = nfnl_set(inst, i);
+               set = ip_set(inst, i);
                if (set != NULL)
                        ip_set_destroy_set(inst, i);
        }
@@ -1996,6 +1997,7 @@ ip_set_init(void)
                nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
                return ret;
        }
+       pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL);
        return 0;
 }
 
index be6932ad3a8626d6c06e99091b32f540b7afd338..61c7fb052802e7c0cb291289ee29c484b4f84179 100644 (file)
@@ -263,6 +263,9 @@ struct htype {
        u32 maxelem;            /* max elements in the hash */
        u32 elements;           /* current element (vs timeout) */
        u32 initval;            /* random jhash init value */
+#ifdef IP_SET_HASH_WITH_MARKMASK
+       u32 markmask;           /* markmask value for mark mask to store */
+#endif
        struct timer_list gc;   /* garbage collection when timeout enabled */
        struct mtype_elem next; /* temporary storage for uadd */
 #ifdef IP_SET_HASH_WITH_MULTI
@@ -453,6 +456,9 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
               a->timeout == b->timeout &&
 #ifdef IP_SET_HASH_WITH_NETMASK
               x->netmask == y->netmask &&
+#endif
+#ifdef IP_SET_HASH_WITH_MARKMASK
+              x->markmask == y->markmask &&
 #endif
               a->extensions == b->extensions;
 }
@@ -627,6 +633,18 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        bool flag_exist = flags & IPSET_FLAG_EXIST;
        u32 key, multi = 0;
 
+       if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set)) {
+               rcu_read_lock_bh();
+               t = rcu_dereference_bh(h->table);
+               key = HKEY(value, h->initval, t->htable_bits);
+               n = hbucket(t,key);
+               if (n->pos) {
+                       /* Choosing the first entry in the array to replace */
+                       j = 0;
+                       goto reuse_slot;
+               }
+               rcu_read_unlock_bh();
+       }
        if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
                /* FIXME: when set is full, we slow down here */
                mtype_expire(set, h, NLEN(set->family), set->dsize);
@@ -907,6 +925,10 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
        if (h->netmask != HOST_MASK &&
            nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
                goto nla_put_failure;
+#endif
+#ifdef IP_SET_HASH_WITH_MARKMASK
+       if (nla_put_u32(skb, IPSET_ATTR_MARKMASK, h->markmask))
+               goto nla_put_failure;
 #endif
        if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
@@ -1016,6 +1038,9 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
                            struct nlattr *tb[], u32 flags)
 {
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+#ifdef IP_SET_HASH_WITH_MARKMASK
+       u32 markmask;
+#endif
        u8 hbits;
 #ifdef IP_SET_HASH_WITH_NETMASK
        u8 netmask;
@@ -1026,6 +1051,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
 
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
+
+#ifdef IP_SET_HASH_WITH_MARKMASK
+       markmask = 0xffffffff;
+#endif
 #ifdef IP_SET_HASH_WITH_NETMASK
        netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
        pr_debug("Create set %s with family %s\n",
@@ -1034,6 +1063,9 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
 
        if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+#ifdef IP_SET_HASH_WITH_MARKMASK
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_MARKMASK) ||
+#endif
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
                return -IPSET_ERR_PROTOCOL;
@@ -1057,6 +1089,14 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
                        return -IPSET_ERR_INVALID_NETMASK;
        }
 #endif
+#ifdef IP_SET_HASH_WITH_MARKMASK
+       if (tb[IPSET_ATTR_MARKMASK]) {
+               markmask = ntohl(nla_get_u32(tb[IPSET_ATTR_MARKMASK]));
+
+               if ((markmask > 4294967295u) || markmask == 0)
+                       return -IPSET_ERR_INVALID_MARKMASK;
+       }
+#endif
 
        hsize = sizeof(*h);
 #ifdef IP_SET_HASH_WITH_NETS
@@ -1070,6 +1110,9 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        h->maxelem = maxelem;
 #ifdef IP_SET_HASH_WITH_NETMASK
        h->netmask = netmask;
+#endif
+#ifdef IP_SET_HASH_WITH_MARKMASK
+       h->markmask = markmask;
 #endif
        get_random_bytes(&h->initval, sizeof(h->initval));
        set->timeout = IPSET_NO_TIMEOUT;
index e65fc2423d56dd2b21cee513786eec41ceabefc9..dd40607f878e28e8c5c411a24fb8e36ef7db5d46 100644 (file)
@@ -25,7 +25,8 @@
 
 #define IPSET_TYPE_REV_MIN     0
 /*                             1          Counters support */
-#define IPSET_TYPE_REV_MAX     2       /* Comments support */
+/*                             2          Comments support */
+#define IPSET_TYPE_REV_MAX     3       /* Forceadd support */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
new file mode 100644 (file)
index 0000000..4eff0a2
--- /dev/null
@@ -0,0 +1,321 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ * Copyright (C) 2013 Smoothwall Ltd. <vytas.dauksa@smoothwall.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,mark type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN     0
+#define IPSET_TYPE_REV_MAX     1       /* Forceadd support */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vytas Dauksa <vytas.dauksa@smoothwall.net>");
+IP_SET_MODULE_DESC("hash:ip,mark", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:ip,mark");
+
+/* Type specific function prefix */
+#define HTYPE          hash_ipmark
+#define IP_SET_HASH_WITH_MARKMASK
+
+/* IPv4 variant */
+
+/* Member elements */
+struct hash_ipmark4_elem {
+       __be32 ip;
+       __u32 mark;
+};
+
+/* Common functions */
+
+static inline bool
+hash_ipmark4_data_equal(const struct hash_ipmark4_elem *ip1,
+                       const struct hash_ipmark4_elem *ip2,
+                       u32 *multi)
+{
+       return ip1->ip == ip2->ip &&
+              ip1->mark == ip2->mark;
+}
+
+static bool
+hash_ipmark4_data_list(struct sk_buff *skb,
+                      const struct hash_ipmark4_elem *data)
+{
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
+           nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_ipmark4_data_next(struct hash_ipmark4_elem *next,
+                      const struct hash_ipmark4_elem *d)
+{
+       next->ip = d->ip;
+}
+
+#define MTYPE           hash_ipmark4
+#define PF              4
+#define HOST_MASK       32
+#define HKEY_DATALEN   sizeof(struct hash_ipmark4_elem)
+#include "ip_set_hash_gen.h"
+
+static int
+hash_ipmark4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 const struct xt_action_param *par,
+                 enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_ipmark *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipmark4_elem e = { };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       e.mark = skb->mark;
+       e.mark &= h->markmask;
+
+       ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_ipmark *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipmark4_elem e = { };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip, ip_to = 0;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+       e.mark &= h->markmask;
+
+       if (adt == IPSET_TEST ||
+           !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR])) {
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip_to = ip = ntohl(e.ip);
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+       } else if (tb[IPSET_ATTR_CIDR]) {
+               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+               if (!cidr || cidr > 32)
+                       return -IPSET_ERR_INVALID_CIDR;
+               ip_set_mask_from_to(ip, ip_to, cidr);
+       }
+
+       if (retried)
+               ip = ntohl(h->next.ip);
+       for (; !before(ip_to, ip); ip++) {
+               e.ip = htonl(ip);
+               ret = adtfn(set, &e, &ext, &ext, flags);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+/* IPv6 variant */
+
+struct hash_ipmark6_elem {
+       union nf_inet_addr ip;
+       __u32 mark;
+};
+
+/* Common functions */
+
+static inline bool
+hash_ipmark6_data_equal(const struct hash_ipmark6_elem *ip1,
+                       const struct hash_ipmark6_elem *ip2,
+                       u32 *multi)
+{
+       return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) &&
+              ip1->mark == ip2->mark;
+}
+
+static bool
+hash_ipmark6_data_list(struct sk_buff *skb,
+                      const struct hash_ipmark6_elem *data)
+{
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
+           nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_ipmark6_data_next(struct hash_ipmark4_elem *next,
+                      const struct hash_ipmark6_elem *d)
+{
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+#undef HKEY_DATALEN
+
+#define MTYPE          hash_ipmark6
+#define PF             6
+#define HOST_MASK      128
+#define HKEY_DATALEN   sizeof(struct hash_ipmark6_elem)
+#define        IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+
+static int
+hash_ipmark6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                 const struct xt_action_param *par,
+                 enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_ipmark *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipmark6_elem e = { };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       e.mark = skb->mark;
+       e.mark &= h->markmask;
+
+       ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
+                 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_ipmark *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_ipmark6_elem e = { };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+                    tb[IPSET_ATTR_IP_TO] ||
+                    tb[IPSET_ATTR_CIDR]))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+       e.mark &= h->markmask;
+
+       if (adt == IPSET_TEST) {
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ret = adtfn(set, &e, &ext, &ext, flags);
+       if (ret && !ip_set_eexist(ret, flags))
+               return ret;
+       else
+               ret = 0;
+
+       return ret;
+}
+
+static struct ip_set_type hash_ipmark_type __read_mostly = {
+       .name           = "hash:ip,mark",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_MARK,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = NFPROTO_UNSPEC,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
+       .create         = hash_ipmark_create,
+       .create_policy  = {
+               [IPSET_ATTR_MARKMASK]   = { .type = NLA_U32 },
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_MARK]       = { .type = NLA_U32 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+               [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
+               [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_ipmark_init(void)
+{
+       return ip_set_type_register(&hash_ipmark_type);
+}
+
+static void __exit
+hash_ipmark_fini(void)
+{
+       ip_set_type_unregister(&hash_ipmark_type);
+}
+
+module_init(hash_ipmark_init);
+module_exit(hash_ipmark_fini);
index 525a595dd1fe4bf0efe6db7ca9cc06d995c66430..7597b82a8b033bd37c8a5f64dae02e8aa9137a19 100644 (file)
@@ -27,7 +27,8 @@
 #define IPSET_TYPE_REV_MIN     0
 /*                             1    SCTP and UDPLITE support added */
 /*                             2    Counters support added */
-#define IPSET_TYPE_REV_MAX     3 /* Comments support added */
+/*                             3    Comments support added */
+#define IPSET_TYPE_REV_MAX     4 /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
index f5636631466eb3ee98509e8b832e46da4ad9a417..672655ffd573404b2f2f5bf097b7a8d01f0c4c14 100644 (file)
@@ -27,7 +27,8 @@
 #define IPSET_TYPE_REV_MIN     0
 /*                             1    SCTP and UDPLITE support added */
 /*                             2    Counters support added */
-#define IPSET_TYPE_REV_MAX     3 /* Comments support added */
+/*                             3    Comments support added */
+#define IPSET_TYPE_REV_MAX     4 /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
index 5d87fe8a41ffa4888a70b6d91897360e30c253bc..7308d84f9277813f16351b692f75218ff1a451b3 100644 (file)
@@ -29,7 +29,8 @@
 /*                             2    Range as input support for IPv4 added */
 /*                             3    nomatch flag support added */
 /*                             4    Counters support added */
-#define IPSET_TYPE_REV_MAX     5 /* Comments support added */
+/*                             5    Comments support added */
+#define IPSET_TYPE_REV_MAX     6 /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
index 8295cf4f9fdcfdb3d3da4b72792add9fc83156ad..4c7d495783a3aefa9447d4b37114bd3715928b0a 100644 (file)
@@ -26,7 +26,8 @@
 /*                             1    Range as input support for IPv4 added */
 /*                             2    nomatch flag support added */
 /*                             3    Counters support added */
-#define IPSET_TYPE_REV_MAX     4 /* Comments support added */
+/*                             4    Comments support added */
+#define IPSET_TYPE_REV_MAX     5 /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
index b827a0f1f3510bc0fa4e6b0a498738806ac62e81..db2606805b3575b91a87cded299222c571fbf6ba 100644 (file)
@@ -27,7 +27,8 @@
 /*                             1    nomatch flag support added */
 /*                             2    /0 support added */
 /*                             3    Counters support added */
-#define IPSET_TYPE_REV_MAX     4 /* Comments support added */
+/*                             4    Comments support added */
+#define IPSET_TYPE_REV_MAX     5 /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
index 6226803fc490ce33c53994c344a1c034b9b0cce9..3e99987e4bf248f6d6085118dba52a4c24ee108a 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
 #define IPSET_TYPE_REV_MIN     0
-#define IPSET_TYPE_REV_MAX     0
+#define IPSET_TYPE_REV_MAX     1       /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@@ -112,10 +112,10 @@ hash_netnet4_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -334,10 +334,10 @@ hash_netnet6_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
index 7097fb0141bf6e1363ca0b0342451e66c34773b4..1c645fbd09c7d6bcb337b90977ae2536b2ec9ebd 100644 (file)
@@ -28,7 +28,8 @@
 /*                             2    Range as input support for IPv4 added */
 /*                             3    nomatch flag support added */
 /*                             4    Counters support added */
-#define IPSET_TYPE_REV_MAX     5 /* Comments support added */
+/*                             5    Comments support added */
+#define IPSET_TYPE_REV_MAX     6 /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
index 703d1192a6a225214f1ffd28c6ab926110942c32..c0d2ba73f8b2394995bba3737a833cc47138d581 100644 (file)
@@ -25,7 +25,8 @@
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
 #define IPSET_TYPE_REV_MIN     0
-#define IPSET_TYPE_REV_MAX     0 /* Comments support added */
+/*                             0    Comments support added */
+#define IPSET_TYPE_REV_MAX     1 /* Forceadd support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
index 4f29fa97044b18523967cdc8b8c6ab272376a2fb..04d15fdc99eec407545c2c598c7a158de0daade9 100644 (file)
@@ -7,8 +7,8 @@
 
 #define E(a, b, c, d) \
        {.ip6 = { \
-               __constant_htonl(a), __constant_htonl(b), \
-               __constant_htonl(c), __constant_htonl(d), \
+               htonl(a), htonl(b), \
+               htonl(c), htonl(d), \
        } }
 
 /*
index 35be035ee0cec79b5b797efb46c287661b8dbc93..c42e83d2751cdc2dd1324a3b9db3e36b615d2fb5 100644 (file)
@@ -2177,10 +2177,10 @@ static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
                __u64 inbytes, outbytes;
 
                do {
-                       start = u64_stats_fetch_begin_bh(&u->syncp);
+                       start = u64_stats_fetch_begin_irq(&u->syncp);
                        inbytes = u->ustats.inbytes;
                        outbytes = u->ustats.outbytes;
-               } while (u64_stats_fetch_retry_bh(&u->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&u->syncp, start));
 
                seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
                           i, u->ustats.conns, u->ustats.inpkts,
@@ -3580,7 +3580,7 @@ out:
 }
 
 
-static const struct genl_ops ip_vs_genl_ops[] __read_mostly = {
+static const struct genl_ops ip_vs_genl_ops[] = {
        {
                .cmd    = IPVS_CMD_NEW_SERVICE,
                .flags  = GENL_ADMIN_PERM,
index ca056a331e60b23f1b6e542fd640b2b996878335..547ff33c1efdb0cb92f3f8890640a77bba3a73b5 100644 (file)
@@ -238,7 +238,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
 
        spin_lock_bh(&svc->sched_lock);
        tbl->dead = 1;
-       for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
+       for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
                hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
                        ip_vs_lblc_del(en);
                        atomic_dec(&tbl->entries);
@@ -265,7 +265,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
        unsigned long now = jiffies;
        int i, j;
 
-       for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
+       for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
                spin_lock(&svc->sched_lock);
@@ -321,7 +321,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
        if (goal > tbl->max_size/2)
                goal = tbl->max_size/2;
 
-       for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
+       for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
                j = (j + 1) & IP_VS_LBLC_TAB_MASK;
 
                spin_lock(&svc->sched_lock);
@@ -340,7 +340,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
        tbl->rover = j;
 
   out:
-       mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
+       mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
 }
 
 
@@ -363,7 +363,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
        /*
         *    Initialize the hash buckets
         */
-       for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
+       for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
                INIT_HLIST_HEAD(&tbl->bucket[i]);
        }
        tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
@@ -536,8 +536,7 @@ out:
 /*
  *      IPVS LBLC Scheduler structure
  */
-static struct ip_vs_scheduler ip_vs_lblc_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_lblc_scheduler = {
        .name =                 "lblc",
        .refcnt =               ATOMIC_INIT(0),
        .module =               THIS_MODULE,
index 356bef519fe5b781f6225557f64186c0075852a1..6dba48efe01e83bebda511e56410b403a8d4b968 100644 (file)
@@ -60,8 +60,59 @@ int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
                                      const struct nlattr *attr) __read_mostly;
 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
 
-DEFINE_SPINLOCK(nf_conntrack_lock);
-EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+EXPORT_SYMBOL_GPL(nf_conntrack_locks);
+
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
+
+static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
+{
+       h1 %= CONNTRACK_LOCKS;
+       h2 %= CONNTRACK_LOCKS;
+       spin_unlock(&nf_conntrack_locks[h1]);
+       if (h1 != h2)
+               spin_unlock(&nf_conntrack_locks[h2]);
+}
+
+/* return true if we need to recompute hashes (in case hash table was resized) */
+static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
+                                    unsigned int h2, unsigned int sequence)
+{
+       h1 %= CONNTRACK_LOCKS;
+       h2 %= CONNTRACK_LOCKS;
+       if (h1 <= h2) {
+               spin_lock(&nf_conntrack_locks[h1]);
+               if (h1 != h2)
+                       spin_lock_nested(&nf_conntrack_locks[h2],
+                                        SINGLE_DEPTH_NESTING);
+       } else {
+               spin_lock(&nf_conntrack_locks[h2]);
+               spin_lock_nested(&nf_conntrack_locks[h1],
+                                SINGLE_DEPTH_NESTING);
+       }
+       if (read_seqcount_retry(&net->ct.generation, sequence)) {
+               nf_conntrack_double_unlock(h1, h2);
+               return true;
+       }
+       return false;
+}
+
+static void nf_conntrack_all_lock(void)
+{
+       int i;
+
+       for (i = 0; i < CONNTRACK_LOCKS; i++)
+               spin_lock_nested(&nf_conntrack_locks[i], i);
+}
+
+static void nf_conntrack_all_unlock(void)
+{
+       int i;
+
+       for (i = 0; i < CONNTRACK_LOCKS; i++)
+               spin_unlock(&nf_conntrack_locks[i]);
+}
 
 unsigned int nf_conntrack_htable_size __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
@@ -192,6 +243,50 @@ clean_from_lists(struct nf_conn *ct)
        nf_ct_remove_expectations(ct);
 }
 
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_dying_list(struct nf_conn *ct)
+{
+       struct ct_pcpu *pcpu;
+
+       /* add this conntrack to the (per cpu) dying list */
+       ct->cpu = smp_processor_id();
+       pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+       spin_lock(&pcpu->lock);
+       hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+                            &pcpu->dying);
+       spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
+{
+       struct ct_pcpu *pcpu;
+
+       /* add this conntrack to the (per cpu) unconfirmed list */
+       ct->cpu = smp_processor_id();
+       pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+       spin_lock(&pcpu->lock);
+       hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+                            &pcpu->unconfirmed);
+       spin_unlock(&pcpu->lock);
+}
+
+/* must be called with local_bh_disable */
+static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
+{
+       struct ct_pcpu *pcpu;
+
+       /* We overload first tuple to link into unconfirmed or dying list.*/
+       pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
+
+       spin_lock(&pcpu->lock);
+       BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
+       hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+       spin_unlock(&pcpu->lock);
+}
+
 static void
 destroy_conntrack(struct nf_conntrack *nfct)
 {
@@ -203,9 +298,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
        NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
        NF_CT_ASSERT(!timer_pending(&ct->timeout));
 
-       /* To make sure we don't get any weird locking issues here:
-        * destroy_conntrack() MUST NOT be called with a write lock
-        * to nf_conntrack_lock!!! -HW */
        rcu_read_lock();
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
        if (l4proto && l4proto->destroy)
@@ -213,19 +305,18 @@ destroy_conntrack(struct nf_conntrack *nfct)
 
        rcu_read_unlock();
 
-       spin_lock_bh(&nf_conntrack_lock);
+       local_bh_disable();
        /* Expectations will have been removed in clean_from_lists,
         * except TFTP can create an expectation on the first packet,
         * before connection is in the list, so we need to clean here,
-        * too. */
+        * too.
+        */
        nf_ct_remove_expectations(ct);
 
-       /* We overload first tuple to link into unconfirmed or dying list.*/
-       BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
-       hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
        NF_CT_STAT_INC(net, delete);
-       spin_unlock_bh(&nf_conntrack_lock);
+       local_bh_enable();
 
        if (ct->master)
                nf_ct_put(ct->master);
@@ -237,17 +328,28 @@ destroy_conntrack(struct nf_conntrack *nfct)
 static void nf_ct_delete_from_lists(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
+       unsigned int hash, reply_hash;
+       u16 zone = nf_ct_zone(ct);
+       unsigned int sequence;
 
        nf_ct_helper_destroy(ct);
-       spin_lock_bh(&nf_conntrack_lock);
-       /* Inside lock so preempt is disabled on module removal path.
-        * Otherwise we can get spurious warnings. */
-       NF_CT_STAT_INC(net, delete_list);
+
+       local_bh_disable();
+       do {
+               sequence = read_seqcount_begin(&net->ct.generation);
+               hash = hash_conntrack(net, zone,
+                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+               reply_hash = hash_conntrack(net, zone,
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+
        clean_from_lists(ct);
-       /* add this conntrack to the dying list */
-       hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
-                            &net->ct.dying);
-       spin_unlock_bh(&nf_conntrack_lock);
+       nf_conntrack_double_unlock(hash, reply_hash);
+
+       nf_ct_add_to_dying_list(ct);
+
+       NF_CT_STAT_INC(net, delete_list);
+       local_bh_enable();
 }
 
 static void death_by_event(unsigned long ul_conntrack)
@@ -331,8 +433,6 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
  * Warning :
  * - Caller must take a reference on returned object
  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
- * OR
- * - Caller must lock nf_conntrack_lock before calling this function
  */
 static struct nf_conntrack_tuple_hash *
 ____nf_conntrack_find(struct net *net, u16 zone,
@@ -408,32 +508,36 @@ EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
                                       unsigned int hash,
-                                      unsigned int repl_hash)
+                                      unsigned int reply_hash)
 {
        struct net *net = nf_ct_net(ct);
 
        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
                           &net->ct.hash[hash]);
        hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
-                          &net->ct.hash[repl_hash]);
+                          &net->ct.hash[reply_hash]);
 }
 
 int
 nf_conntrack_hash_check_insert(struct nf_conn *ct)
 {
        struct net *net = nf_ct_net(ct);
-       unsigned int hash, repl_hash;
+       unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        u16 zone;
+       unsigned int sequence;
 
        zone = nf_ct_zone(ct);
-       hash = hash_conntrack(net, zone,
-                             &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-       repl_hash = hash_conntrack(net, zone,
-                                  &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
-       spin_lock_bh(&nf_conntrack_lock);
+       local_bh_disable();
+       do {
+               sequence = read_seqcount_begin(&net->ct.generation);
+               hash = hash_conntrack(net, zone,
+                                     &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+               reply_hash = hash_conntrack(net, zone,
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
        /* See if there's one in the list already, including reverse */
        hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
@@ -441,7 +545,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
+       hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
@@ -451,15 +555,16 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
        smp_wmb();
        /* The caller holds a reference to this object */
        atomic_set(&ct->ct_general.use, 2);
-       __nf_conntrack_hash_insert(ct, hash, repl_hash);
+       __nf_conntrack_hash_insert(ct, hash, reply_hash);
+       nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert);
-       spin_unlock_bh(&nf_conntrack_lock);
-
+       local_bh_enable();
        return 0;
 
 out:
+       nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert_failed);
-       spin_unlock_bh(&nf_conntrack_lock);
+       local_bh_enable();
        return -EEXIST;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
@@ -467,15 +572,22 @@ EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 /* deletion from this larval template list happens via nf_ct_put() */
 void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
 {
+       struct ct_pcpu *pcpu;
+
        __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
        __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
        nf_conntrack_get(&tmpl->ct_general);
 
-       spin_lock_bh(&nf_conntrack_lock);
+       /* add this conntrack to the (per cpu) tmpl list */
+       local_bh_disable();
+       tmpl->cpu = smp_processor_id();
+       pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
+
+       spin_lock(&pcpu->lock);
        /* Overload tuple linked list to put us in template list. */
        hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
-                                &net->ct.tmpl);
-       spin_unlock_bh(&nf_conntrack_lock);
+                                &pcpu->tmpl);
+       spin_unlock_bh(&pcpu->lock);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
 
@@ -483,7 +595,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
-       unsigned int hash, repl_hash;
+       unsigned int hash, reply_hash;
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        struct nf_conn_help *help;
@@ -492,6 +604,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        enum ip_conntrack_info ctinfo;
        struct net *net;
        u16 zone;
+       unsigned int sequence;
 
        ct = nf_ct_get(skb, &ctinfo);
        net = nf_ct_net(ct);
@@ -504,31 +617,37 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                return NF_ACCEPT;
 
        zone = nf_ct_zone(ct);
-       /* reuse the hash saved before */
-       hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
-       hash = hash_bucket(hash, net);
-       repl_hash = hash_conntrack(net, zone,
-                                  &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+       local_bh_disable();
+
+       do {
+               sequence = read_seqcount_begin(&net->ct.generation);
+               /* reuse the hash saved before */
+               hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
+               hash = hash_bucket(hash, net);
+               reply_hash = hash_conntrack(net, zone,
+                                          &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+       } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
        /* We're not in hash table, and we refuse to set up related
-          connections for unconfirmed conns.  But packet copies and
-          REJECT will give spurious warnings here. */
+        * connections for unconfirmed conns.  But packet copies and
+        * REJECT will give spurious warnings here.
+        */
        /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
 
        /* No external references means no one else could have
-          confirmed us. */
+        * confirmed us.
+        */
        NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
        pr_debug("Confirming conntrack %p\n", ct);
-
-       spin_lock_bh(&nf_conntrack_lock);
-
        /* We have to check the DYING flag inside the lock to prevent
           a race against nf_ct_get_next_corpse() possibly called from
           user context, else we insert an already 'dead' hash, blocking
           further use of that particular connection -JM */
 
        if (unlikely(nf_ct_is_dying(ct))) {
-               spin_unlock_bh(&nf_conntrack_lock);
+               nf_conntrack_double_unlock(hash, reply_hash);
+               local_bh_enable();
                return NF_ACCEPT;
        }
 
@@ -540,14 +659,13 @@ __nf_conntrack_confirm(struct sk_buff *skb)
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
-       hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
+       hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
                if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
                                      &h->tuple) &&
                    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
                        goto out;
 
-       /* Remove from unconfirmed list */
-       hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+       nf_ct_del_from_dying_or_unconfirmed_list(ct);
 
        /* Timer relative to confirmation time, not original
           setting time, otherwise we'd get timer wrap in
@@ -570,9 +688,10 @@ __nf_conntrack_confirm(struct sk_buff *skb)
         * guarantee that no other CPU can find the conntrack before the above
         * stores are visible.
         */
-       __nf_conntrack_hash_insert(ct, hash, repl_hash);
+       __nf_conntrack_hash_insert(ct, hash, reply_hash);
+       nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert);
-       spin_unlock_bh(&nf_conntrack_lock);
+       local_bh_enable();
 
        help = nfct_help(ct);
        if (help && help->helper)
@@ -583,8 +702,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
        return NF_ACCEPT;
 
 out:
+       nf_conntrack_double_unlock(hash, reply_hash);
        NF_CT_STAT_INC(net, insert_failed);
-       spin_unlock_bh(&nf_conntrack_lock);
+       local_bh_enable();
        return NF_DROP;
 }
 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
@@ -627,39 +747,48 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
 
 /* There's a small race here where we may free a just-assured
    connection.  Too bad: we're in trouble anyway. */
-static noinline int early_drop(struct net *net, unsigned int hash)
+static noinline int early_drop(struct net *net, unsigned int _hash)
 {
        /* Use oldest entry, which is roughly LRU */
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct = NULL, *tmp;
        struct hlist_nulls_node *n;
-       unsigned int i, cnt = 0;
+       unsigned int i = 0, cnt = 0;
        int dropped = 0;
+       unsigned int hash, sequence;
+       spinlock_t *lockp;
 
-       rcu_read_lock();
-       for (i = 0; i < net->ct.htable_size; i++) {
+       local_bh_disable();
+restart:
+       sequence = read_seqcount_begin(&net->ct.generation);
+       hash = hash_bucket(_hash, net);
+       for (; i < net->ct.htable_size; i++) {
+               lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
+               spin_lock(lockp);
+               if (read_seqcount_retry(&net->ct.generation, sequence)) {
+                       spin_unlock(lockp);
+                       goto restart;
+               }
                hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
                                         hnnode) {
                        tmp = nf_ct_tuplehash_to_ctrack(h);
-                       if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
+                       if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
+                           !nf_ct_is_dying(tmp) &&
+                           atomic_inc_not_zero(&tmp->ct_general.use)) {
                                ct = tmp;
+                               break;
+                       }
                        cnt++;
                }
 
-               if (ct != NULL) {
-                       if (likely(!nf_ct_is_dying(ct) &&
-                                  atomic_inc_not_zero(&ct->ct_general.use)))
-                               break;
-                       else
-                               ct = NULL;
-               }
+               hash = (hash + 1) % net->ct.htable_size;
+               spin_unlock(lockp);
 
-               if (cnt >= NF_CT_EVICTION_RANGE)
+               if (ct || cnt >= NF_CT_EVICTION_RANGE)
                        break;
 
-               hash = (hash + 1) % net->ct.htable_size;
        }
-       rcu_read_unlock();
+       local_bh_enable();
 
        if (!ct)
                return dropped;
@@ -708,7 +837,7 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
 
        if (nf_conntrack_max &&
            unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
-               if (!early_drop(net, hash_bucket(hash, net))) {
+               if (!early_drop(net, hash)) {
                        atomic_dec(&net->ct.count);
                        net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
                        return ERR_PTR(-ENOMEM);
@@ -805,7 +934,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        struct nf_conn_help *help;
        struct nf_conntrack_tuple repl_tuple;
        struct nf_conntrack_ecache *ecache;
-       struct nf_conntrack_expect *exp;
+       struct nf_conntrack_expect *exp = NULL;
        u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
        struct nf_conn_timeout *timeout_ext;
        unsigned int *timeouts;
@@ -849,42 +978,44 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
                                 ecache ? ecache->expmask : 0,
                             GFP_ATOMIC);
 
-       spin_lock_bh(&nf_conntrack_lock);
-       exp = nf_ct_find_expectation(net, zone, tuple);
-       if (exp) {
-               pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
-                        ct, exp);
-               /* Welcome, Mr. Bond.  We've been expecting you... */
-               __set_bit(IPS_EXPECTED_BIT, &ct->status);
-               ct->master = exp->master;
-               if (exp->helper) {
-                       help = nf_ct_helper_ext_add(ct, exp->helper,
-                                                   GFP_ATOMIC);
-                       if (help)
-                               rcu_assign_pointer(help->helper, exp->helper);
-               }
+       local_bh_disable();
+       if (net->ct.expect_count) {
+               spin_lock(&nf_conntrack_expect_lock);
+               exp = nf_ct_find_expectation(net, zone, tuple);
+               if (exp) {
+                       pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
+                                ct, exp);
+                       /* Welcome, Mr. Bond.  We've been expecting you... */
+                       __set_bit(IPS_EXPECTED_BIT, &ct->status);
+                       /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
+                       ct->master = exp->master;
+                       if (exp->helper) {
+                               help = nf_ct_helper_ext_add(ct, exp->helper,
+                                                           GFP_ATOMIC);
+                               if (help)
+                                       rcu_assign_pointer(help->helper, exp->helper);
+                       }
 
 #ifdef CONFIG_NF_CONNTRACK_MARK
-               ct->mark = exp->master->mark;
+                       ct->mark = exp->master->mark;
 #endif
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
-               ct->secmark = exp->master->secmark;
+                       ct->secmark = exp->master->secmark;
 #endif
-               nf_conntrack_get(&ct->master->ct_general);
-               NF_CT_STAT_INC(net, expect_new);
-       } else {
+                       NF_CT_STAT_INC(net, expect_new);
+               }
+               spin_unlock(&nf_conntrack_expect_lock);
+       }
+       if (!exp) {
                __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
                NF_CT_STAT_INC(net, new);
        }
 
        /* Now it is inserted into the unconfirmed list, bump refcount */
        nf_conntrack_get(&ct->ct_general);
+       nf_ct_add_to_unconfirmed_list(ct);
 
-       /* Overload tuple linked list to put us in unconfirmed list. */
-       hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
-                      &net->ct.unconfirmed);
-
-       spin_unlock_bh(&nf_conntrack_lock);
+       local_bh_enable();
 
        if (exp) {
                if (exp->expectfn)
@@ -1254,27 +1385,42 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        struct hlist_nulls_node *n;
+       int cpu;
+       spinlock_t *lockp;
 
-       spin_lock_bh(&nf_conntrack_lock);
        for (; *bucket < net->ct.htable_size; (*bucket)++) {
-               hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
-                       if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
-                               continue;
+               lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
+               local_bh_disable();
+               spin_lock(lockp);
+               if (*bucket < net->ct.htable_size) {
+                       hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
+                               if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+                                       continue;
+                               ct = nf_ct_tuplehash_to_ctrack(h);
+                               if (iter(ct, data))
+                                       goto found;
+                       }
+               }
+               spin_unlock(lockp);
+               local_bh_enable();
+       }
+
+       for_each_possible_cpu(cpu) {
+               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+               spin_lock_bh(&pcpu->lock);
+               hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
                        ct = nf_ct_tuplehash_to_ctrack(h);
                        if (iter(ct, data))
-                               goto found;
+                               set_bit(IPS_DYING_BIT, &ct->status);
                }
+               spin_unlock_bh(&pcpu->lock);
        }
-       hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) {
-               ct = nf_ct_tuplehash_to_ctrack(h);
-               if (iter(ct, data))
-                       set_bit(IPS_DYING_BIT, &ct->status);
-       }
-       spin_unlock_bh(&nf_conntrack_lock);
        return NULL;
 found:
        atomic_inc(&ct->ct_general.use);
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock(lockp);
+       local_bh_enable();
        return ct;
 }
 
@@ -1323,14 +1469,19 @@ static void nf_ct_release_dying_list(struct net *net)
        struct nf_conntrack_tuple_hash *h;
        struct nf_conn *ct;
        struct hlist_nulls_node *n;
+       int cpu;
 
-       spin_lock_bh(&nf_conntrack_lock);
-       hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
-               ct = nf_ct_tuplehash_to_ctrack(h);
-               /* never fails to remove them, no listeners at this point */
-               nf_ct_kill(ct);
+       for_each_possible_cpu(cpu) {
+               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+               spin_lock_bh(&pcpu->lock);
+               hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
+                       ct = nf_ct_tuplehash_to_ctrack(h);
+                       /* never fails to remove them, no listeners at this point */
+                       nf_ct_kill(ct);
+               }
+               spin_unlock_bh(&pcpu->lock);
        }
-       spin_unlock_bh(&nf_conntrack_lock);
 }
 
 static int untrack_refs(void)
@@ -1417,6 +1568,7 @@ i_see_dead_people:
                kmem_cache_destroy(net->ct.nf_conntrack_cachep);
                kfree(net->ct.slabname);
                free_percpu(net->ct.stat);
+               free_percpu(net->ct.pcpu_lists);
        }
 }
 
@@ -1469,12 +1621,16 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
        if (!hash)
                return -ENOMEM;
 
+       local_bh_disable();
+       nf_conntrack_all_lock();
+       write_seqcount_begin(&init_net.ct.generation);
+
        /* Lookups in the old hash might happen in parallel, which means we
         * might get false negatives during connection lookup. New connections
         * created because of a false negative won't make it into the hash
-        * though since that required taking the lock.
+        * though since that required taking the locks.
         */
-       spin_lock_bh(&nf_conntrack_lock);
+
        for (i = 0; i < init_net.ct.htable_size; i++) {
                while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
                        h = hlist_nulls_entry(init_net.ct.hash[i].first,
@@ -1491,7 +1647,10 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 
        init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
        init_net.ct.hash = hash;
-       spin_unlock_bh(&nf_conntrack_lock);
+
+       write_seqcount_end(&init_net.ct.generation);
+       nf_conntrack_all_unlock();
+       local_bh_enable();
 
        nf_ct_free_hashtable(old_hash, old_size);
        return 0;
@@ -1513,7 +1672,10 @@ EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
 int nf_conntrack_init_start(void)
 {
        int max_factor = 8;
-       int ret, cpu;
+       int i, ret, cpu;
+
+       for (i = 0; i < CONNTRACK_LOCKS; i++)
+               spin_lock_init(&nf_conntrack_locks[i]);
 
        /* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
         * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
@@ -1629,37 +1791,43 @@ void nf_conntrack_init_end(void)
 
 int nf_conntrack_init_net(struct net *net)
 {
-       int ret;
+       int ret = -ENOMEM;
+       int cpu;
 
        atomic_set(&net->ct.count, 0);
-       INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
-       INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
-       INIT_HLIST_NULLS_HEAD(&net->ct.tmpl, TEMPLATE_NULLS_VAL);
-       net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
-       if (!net->ct.stat) {
-               ret = -ENOMEM;
+
+       net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
+       if (!net->ct.pcpu_lists)
                goto err_stat;
+
+       for_each_possible_cpu(cpu) {
+               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+               spin_lock_init(&pcpu->lock);
+               INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
+               INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
+               INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
        }
 
+       net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
+       if (!net->ct.stat)
+               goto err_pcpu_lists;
+
        net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
-       if (!net->ct.slabname) {
-               ret = -ENOMEM;
+       if (!net->ct.slabname)
                goto err_slabname;
-       }
 
        net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
                                                        sizeof(struct nf_conn), 0,
                                                        SLAB_DESTROY_BY_RCU, NULL);
        if (!net->ct.nf_conntrack_cachep) {
                printk(KERN_ERR "Unable to create nf_conn slab cache\n");
-               ret = -ENOMEM;
                goto err_cache;
        }
 
        net->ct.htable_size = nf_conntrack_htable_size;
        net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
        if (!net->ct.hash) {
-               ret = -ENOMEM;
                printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
                goto err_hash;
        }
@@ -1701,6 +1869,8 @@ err_cache:
        kfree(net->ct.slabname);
 err_slabname:
        free_percpu(net->ct.stat);
+err_pcpu_lists:
+       free_percpu(net->ct.pcpu_lists);
 err_stat:
        return ret;
 }
index 4fd1ca94fd4a140b374385ded878af60b503b529..f87e8f68ad453e9baeec017cc74534e8ce85dfab 100644 (file)
@@ -66,9 +66,9 @@ static void nf_ct_expectation_timed_out(unsigned long ul_expect)
 {
        struct nf_conntrack_expect *exp = (void *)ul_expect;
 
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        nf_ct_unlink_expect(exp);
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
        nf_ct_expect_put(exp);
 }
 
@@ -155,6 +155,18 @@ nf_ct_find_expectation(struct net *net, u16 zone,
        if (!nf_ct_is_confirmed(exp->master))
                return NULL;
 
+       /* Avoid race with other CPUs, that for exp->master ct, is
+        * about to invoke ->destroy(), or nf_ct_delete() via timeout
+        * or early_drop().
+        *
+        * The atomic_inc_not_zero() check tells:  If that fails, we
+        * know that the ct is being destroyed.  If it succeeds, we
+        * can be sure the ct cannot disappear underneath.
+        */
+       if (unlikely(nf_ct_is_dying(exp->master) ||
+                    !atomic_inc_not_zero(&exp->master->ct_general.use)))
+               return NULL;
+
        if (exp->flags & NF_CT_EXPECT_PERMANENT) {
                atomic_inc(&exp->use);
                return exp;
@@ -162,6 +174,8 @@ nf_ct_find_expectation(struct net *net, u16 zone,
                nf_ct_unlink_expect(exp);
                return exp;
        }
+       /* Undo exp->master refcnt increase, if del_timer() failed */
+       nf_ct_put(exp->master);
 
        return NULL;
 }
@@ -177,12 +191,14 @@ void nf_ct_remove_expectations(struct nf_conn *ct)
        if (!help)
                return;
 
+       spin_lock_bh(&nf_conntrack_expect_lock);
        hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
                if (del_timer(&exp->timeout)) {
                        nf_ct_unlink_expect(exp);
                        nf_ct_expect_put(exp);
                }
        }
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 }
 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
 
@@ -217,12 +233,12 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
 /* Generally a bad idea to call this: could have matched already. */
 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
 {
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        if (del_timer(&exp->timeout)) {
                nf_ct_unlink_expect(exp);
                nf_ct_expect_put(exp);
        }
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 }
 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
 
@@ -335,7 +351,7 @@ static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
                    (unsigned long)exp);
        helper = rcu_dereference_protected(master_help->helper,
-                                          lockdep_is_held(&nf_conntrack_lock));
+                                          lockdep_is_held(&nf_conntrack_expect_lock));
        if (helper) {
                exp->timeout.expires = jiffies +
                        helper->expect_policy[exp->class].timeout * HZ;
@@ -395,7 +411,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
        }
        /* Will be over limit? */
        helper = rcu_dereference_protected(master_help->helper,
-                                          lockdep_is_held(&nf_conntrack_lock));
+                                          lockdep_is_held(&nf_conntrack_expect_lock));
        if (helper) {
                p = &helper->expect_policy[expect->class];
                if (p->max_expected &&
@@ -417,12 +433,12 @@ out:
        return ret;
 }
 
-int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 
+int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
                                u32 portid, int report)
 {
        int ret;
 
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        ret = __nf_ct_expect_check(expect);
        if (ret <= 0)
                goto out;
@@ -430,11 +446,11 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
        ret = nf_ct_expect_insert(expect);
        if (ret < 0)
                goto out;
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
        nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
        return ret;
 out:
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
        return ret;
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
index 70866d192efc9351f2fcafe3ef23ce0c131fc0ba..3a3a60b126e0097f309badc40ebf8016b3773c98 100644 (file)
@@ -1476,7 +1476,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
                nf_ct_refresh(ct, skb, info->timeout * HZ);
 
                /* Set expect timeout */
-               spin_lock_bh(&nf_conntrack_lock);
+               spin_lock_bh(&nf_conntrack_expect_lock);
                exp = find_expect(ct, &ct->tuplehash[dir].tuple.dst.u3,
                                  info->sig_port[!dir]);
                if (exp) {
@@ -1486,7 +1486,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
                        nf_ct_dump_tuple(&exp->tuple);
                        set_expect_timeout(exp, info->timeout);
                }
-               spin_unlock_bh(&nf_conntrack_lock);
+               spin_unlock_bh(&nf_conntrack_expect_lock);
        }
 
        return 0;
index 974a2a4adefa739c729692e0df3ad3142f83d0d4..5b3eae7d4c9a51dd1bdd559fa5943125035d8490 100644 (file)
@@ -250,16 +250,14 @@ out:
 }
 EXPORT_SYMBOL_GPL(__nf_ct_try_assign_helper);
 
+/* appropiate ct lock protecting must be taken by caller */
 static inline int unhelp(struct nf_conntrack_tuple_hash *i,
                         const struct nf_conntrack_helper *me)
 {
        struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
        struct nf_conn_help *help = nfct_help(ct);
 
-       if (help && rcu_dereference_protected(
-                       help->helper,
-                       lockdep_is_held(&nf_conntrack_lock)
-                       ) == me) {
+       if (help && rcu_dereference_raw(help->helper) == me) {
                nf_conntrack_event(IPCT_HELPER, ct);
                RCU_INIT_POINTER(help->helper, NULL);
        }
@@ -284,17 +282,17 @@ static LIST_HEAD(nf_ct_helper_expectfn_list);
 
 void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
 {
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
 
 void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
 {
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        list_del_rcu(&n->head);
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
 
@@ -396,15 +394,17 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
        const struct hlist_node *next;
        const struct hlist_nulls_node *nn;
        unsigned int i;
+       int cpu;
 
        /* Get rid of expectations */
+       spin_lock_bh(&nf_conntrack_expect_lock);
        for (i = 0; i < nf_ct_expect_hsize; i++) {
                hlist_for_each_entry_safe(exp, next,
                                          &net->ct.expect_hash[i], hnode) {
                        struct nf_conn_help *help = nfct_help(exp->master);
                        if ((rcu_dereference_protected(
                                        help->helper,
-                                       lockdep_is_held(&nf_conntrack_lock)
+                                       lockdep_is_held(&nf_conntrack_expect_lock)
                                        ) == me || exp->helper == me) &&
                            del_timer(&exp->timeout)) {
                                nf_ct_unlink_expect(exp);
@@ -412,14 +412,27 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
                        }
                }
        }
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 
        /* Get rid of expecteds, set helpers to NULL. */
-       hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
-               unhelp(h, me);
-       for (i = 0; i < net->ct.htable_size; i++) {
-               hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
+       for_each_possible_cpu(cpu) {
+               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+               spin_lock_bh(&pcpu->lock);
+               hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
                        unhelp(h, me);
+               spin_unlock_bh(&pcpu->lock);
+       }
+       local_bh_disable();
+       for (i = 0; i < net->ct.htable_size; i++) {
+               spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               if (i < net->ct.htable_size) {
+                       hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
+                               unhelp(h, me);
+               }
+               spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
        }
+       local_bh_enable();
 }
 
 void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
@@ -437,10 +450,8 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
        synchronize_rcu();
 
        rtnl_lock();
-       spin_lock_bh(&nf_conntrack_lock);
        for_each_net(net)
                __nf_conntrack_helper_unregister(me, net);
-       spin_unlock_bh(&nf_conntrack_lock);
        rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
index b9f0e03743228ec852eee97eb6bb9ef1e0ab73e9..ccc46fa5edbce5e52710a22ae502e49a0f59e0a5 100644 (file)
@@ -764,14 +764,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
        u_int8_t l3proto = nfmsg->nfgen_family;
        int res;
+       spinlock_t *lockp;
+
 #ifdef CONFIG_NF_CONNTRACK_MARK
        const struct ctnetlink_dump_filter *filter = cb->data;
 #endif
 
-       spin_lock_bh(&nf_conntrack_lock);
        last = (struct nf_conn *)cb->args[1];
+
+       local_bh_disable();
        for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
 restart:
+               lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
+               spin_lock(lockp);
+               if (cb->args[0] >= net->ct.htable_size) {
+                       spin_unlock(lockp);
+                       goto out;
+               }
                hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
                                         hnnode) {
                        if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
@@ -803,16 +812,18 @@ restart:
                        if (res < 0) {
                                nf_conntrack_get(&ct->ct_general);
                                cb->args[1] = (unsigned long)ct;
+                               spin_unlock(lockp);
                                goto out;
                        }
                }
+               spin_unlock(lockp);
                if (cb->args[1]) {
                        cb->args[1] = 0;
                        goto restart;
                }
        }
 out:
-       spin_unlock_bh(&nf_conntrack_lock);
+       local_bh_enable();
        if (last)
                nf_ct_put(last);
 
@@ -966,7 +977,6 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
        return 0;
 }
 
-#define __CTA_LABELS_MAX_LENGTH ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
        [CTA_TUPLE_ORIG]        = { .type = NLA_NESTED },
        [CTA_TUPLE_REPLY]       = { .type = NLA_NESTED },
@@ -984,9 +994,9 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
        [CTA_ZONE]              = { .type = NLA_U16 },
        [CTA_MARK_MASK]         = { .type = NLA_U32 },
        [CTA_LABELS]            = { .type = NLA_BINARY,
-                                   .len = __CTA_LABELS_MAX_LENGTH },
+                                   .len = NF_CT_LABELS_MAX_SIZE },
        [CTA_LABELS_MASK]       = { .type = NLA_BINARY,
-                                   .len = __CTA_LABELS_MAX_LENGTH },
+                                   .len = NF_CT_LABELS_MAX_SIZE },
 };
 
 static int
@@ -1138,50 +1148,65 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
 }
 
 static int
-ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb,
-                   struct hlist_nulls_head *list)
+ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
 {
-       struct nf_conn *ct, *last;
+       struct nf_conn *ct, *last = NULL;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
        u_int8_t l3proto = nfmsg->nfgen_family;
        int res;
+       int cpu;
+       struct hlist_nulls_head *list;
+       struct net *net = sock_net(skb->sk);
 
        if (cb->args[2])
                return 0;
 
-       spin_lock_bh(&nf_conntrack_lock);
-       last = (struct nf_conn *)cb->args[1];
-restart:
-       hlist_nulls_for_each_entry(h, n, list, hnnode) {
-               ct = nf_ct_tuplehash_to_ctrack(h);
-               if (l3proto && nf_ct_l3num(ct) != l3proto)
+       if (cb->args[0] == nr_cpu_ids)
+               return 0;
+
+       for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
+               struct ct_pcpu *pcpu;
+
+               if (!cpu_possible(cpu))
                        continue;
-               if (cb->args[1]) {
-                       if (ct != last)
+
+               pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+               spin_lock_bh(&pcpu->lock);
+               last = (struct nf_conn *)cb->args[1];
+               list = dying ? &pcpu->dying : &pcpu->unconfirmed;
+restart:
+               hlist_nulls_for_each_entry(h, n, list, hnnode) {
+                       ct = nf_ct_tuplehash_to_ctrack(h);
+                       if (l3proto && nf_ct_l3num(ct) != l3proto)
                                continue;
-                       cb->args[1] = 0;
-               }
-               rcu_read_lock();
-               res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
-                                         cb->nlh->nlmsg_seq,
-                                         NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
-                                         ct);
-               rcu_read_unlock();
-               if (res < 0) {
-                       nf_conntrack_get(&ct->ct_general);
-                       cb->args[1] = (unsigned long)ct;
-                       goto out;
+                       if (cb->args[1]) {
+                               if (ct != last)
+                                       continue;
+                               cb->args[1] = 0;
+                       }
+                       rcu_read_lock();
+                       res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
+                                                 cb->nlh->nlmsg_seq,
+                                                 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+                                                 ct);
+                       rcu_read_unlock();
+                       if (res < 0) {
+                               nf_conntrack_get(&ct->ct_general);
+                               cb->args[1] = (unsigned long)ct;
+                               spin_unlock_bh(&pcpu->lock);
+                               goto out;
+                       }
                }
+               if (cb->args[1]) {
+                       cb->args[1] = 0;
+                       goto restart;
+               } else
+                       cb->args[2] = 1;
+               spin_unlock_bh(&pcpu->lock);
        }
-       if (cb->args[1]) {
-               cb->args[1] = 0;
-               goto restart;
-       } else
-               cb->args[2] = 1;
 out:
-       spin_unlock_bh(&nf_conntrack_lock);
        if (last)
                nf_ct_put(last);
 
@@ -1191,9 +1216,7 @@ out:
 static int
 ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       struct net *net = sock_net(skb->sk);
-
-       return ctnetlink_dump_list(skb, cb, &net->ct.dying);
+       return ctnetlink_dump_list(skb, cb, true);
 }
 
 static int
@@ -1215,9 +1238,7 @@ ctnetlink_get_ct_dying(struct sock *ctnl, struct sk_buff *skb,
 static int
 ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       struct net *net = sock_net(skb->sk);
-
-       return ctnetlink_dump_list(skb, cb, &net->ct.unconfirmed);
+       return ctnetlink_dump_list(skb, cb, false);
 }
 
 static int
@@ -1361,14 +1382,14 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
                                            nf_ct_protonum(ct));
        if (helper == NULL) {
 #ifdef CONFIG_MODULES
-               spin_unlock_bh(&nf_conntrack_lock);
+               spin_unlock_bh(&nf_conntrack_expect_lock);
 
                if (request_module("nfct-helper-%s", helpname) < 0) {
-                       spin_lock_bh(&nf_conntrack_lock);
+                       spin_lock_bh(&nf_conntrack_expect_lock);
                        return -EOPNOTSUPP;
                }
 
-               spin_lock_bh(&nf_conntrack_lock);
+               spin_lock_bh(&nf_conntrack_expect_lock);
                helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
                                                    nf_ct_protonum(ct));
                if (helper)
@@ -1804,9 +1825,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
        err = -EEXIST;
        ct = nf_ct_tuplehash_to_ctrack(h);
        if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
-               spin_lock_bh(&nf_conntrack_lock);
+               spin_lock_bh(&nf_conntrack_expect_lock);
                err = ctnetlink_change_conntrack(ct, cda);
-               spin_unlock_bh(&nf_conntrack_lock);
+               spin_unlock_bh(&nf_conntrack_expect_lock);
                if (err == 0) {
                        nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
                                                      (1 << IPCT_ASSURED) |
@@ -2135,9 +2156,9 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
        if (ret < 0)
                return ret;
 
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 
        return ret;
 }
@@ -2692,13 +2713,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                }
 
                /* after list removal, usage count == 1 */
-               spin_lock_bh(&nf_conntrack_lock);
+               spin_lock_bh(&nf_conntrack_expect_lock);
                if (del_timer(&exp->timeout)) {
                        nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
                                                   nlmsg_report(nlh));
                        nf_ct_expect_put(exp);
                }
-               spin_unlock_bh(&nf_conntrack_lock);
+               spin_unlock_bh(&nf_conntrack_expect_lock);
                /* have to put what we 'get' above.
                 * after this line usage count == 0 */
                nf_ct_expect_put(exp);
@@ -2707,7 +2728,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                struct nf_conn_help *m_help;
 
                /* delete all expectations for this helper */
-               spin_lock_bh(&nf_conntrack_lock);
+               spin_lock_bh(&nf_conntrack_expect_lock);
                for (i = 0; i < nf_ct_expect_hsize; i++) {
                        hlist_for_each_entry_safe(exp, next,
                                                  &net->ct.expect_hash[i],
@@ -2722,10 +2743,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                                }
                        }
                }
-               spin_unlock_bh(&nf_conntrack_lock);
+               spin_unlock_bh(&nf_conntrack_expect_lock);
        } else {
                /* This basically means we have to flush everything*/
-               spin_lock_bh(&nf_conntrack_lock);
+               spin_lock_bh(&nf_conntrack_expect_lock);
                for (i = 0; i < nf_ct_expect_hsize; i++) {
                        hlist_for_each_entry_safe(exp, next,
                                                  &net->ct.expect_hash[i],
@@ -2738,7 +2759,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
                                }
                        }
                }
-               spin_unlock_bh(&nf_conntrack_lock);
+               spin_unlock_bh(&nf_conntrack_expect_lock);
        }
 
        return 0;
@@ -2964,11 +2985,11 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        exp = __nf_ct_expect_find(net, zone, &tuple);
 
        if (!exp) {
-               spin_unlock_bh(&nf_conntrack_lock);
+               spin_unlock_bh(&nf_conntrack_expect_lock);
                err = -ENOENT;
                if (nlh->nlmsg_flags & NLM_F_CREATE) {
                        err = ctnetlink_create_expect(net, zone, cda,
@@ -2982,7 +3003,7 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
        err = -EEXIST;
        if (!(nlh->nlmsg_flags & NLM_F_EXCL))
                err = ctnetlink_change_expect(exp, cda);
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 
        return err;
 }
index 466410eaa482c2a3940d3768351e92be17a644ef..4c3ba1c8d682d16abe0912f80525a84d184130db 100644 (file)
@@ -800,7 +800,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
        struct hlist_node *next;
        int found = 0;
 
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
                if (exp->class != SIP_EXPECT_SIGNALLING ||
                    !nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
@@ -815,7 +815,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
                found = 1;
                break;
        }
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
        return found;
 }
 
@@ -825,7 +825,7 @@ static void flush_expectations(struct nf_conn *ct, bool media)
        struct nf_conntrack_expect *exp;
        struct hlist_node *next;
 
-       spin_lock_bh(&nf_conntrack_lock);
+       spin_lock_bh(&nf_conntrack_expect_lock);
        hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
                if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
                        continue;
@@ -836,7 +836,7 @@ static void flush_expectations(struct nf_conn *ct, bool media)
                if (!media)
                        break;
        }
-       spin_unlock_bh(&nf_conntrack_lock);
+       spin_unlock_bh(&nf_conntrack_expect_lock);
 }
 
 static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
index adce01e8bb57e7fb9794eebceae274707d96899e..33045a56229769502532b38e7b4a1bc5d89c80d8 100644 (file)
@@ -794,9 +794,8 @@ nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
        stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
 
        if (chain->stats) {
-               /* nfnl_lock is held, add some nfnl function for this, later */
                struct nft_stats __percpu *oldstats =
-                       rcu_dereference_protected(chain->stats, 1);
+                               nft_dereference(chain->stats);
 
                rcu_assign_pointer(chain->stats, newstats);
                synchronize_rcu();
@@ -1254,10 +1253,11 @@ err1:
        return err;
 }
 
-static void nf_tables_expr_destroy(struct nft_expr *expr)
+static void nf_tables_expr_destroy(const struct nft_ctx *ctx,
+                                  struct nft_expr *expr)
 {
        if (expr->ops->destroy)
-               expr->ops->destroy(expr);
+               expr->ops->destroy(ctx, expr);
        module_put(expr->ops->type->owner);
 }
 
@@ -1296,6 +1296,8 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
        [NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED },
        [NFTA_RULE_COMPAT]      = { .type = NLA_NESTED },
        [NFTA_RULE_POSITION]    = { .type = NLA_U64 },
+       [NFTA_RULE_USERDATA]    = { .type = NLA_BINARY,
+                                   .len = NFT_USERDATA_MAXLEN },
 };
 
 static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
@@ -1348,6 +1350,10 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
        }
        nla_nest_end(skb, list);
 
+       if (rule->ulen &&
+           nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule)))
+               goto nla_put_failure;
+
        return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -1531,7 +1537,8 @@ err:
        return err;
 }
 
-static void nf_tables_rule_destroy(struct nft_rule *rule)
+static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
+                                  struct nft_rule *rule)
 {
        struct nft_expr *expr;
 
@@ -1541,7 +1548,7 @@ static void nf_tables_rule_destroy(struct nft_rule *rule)
         */
        expr = nft_expr_first(rule);
        while (expr->ops && expr != nft_expr_last(rule)) {
-               nf_tables_expr_destroy(expr);
+               nf_tables_expr_destroy(ctx, expr);
                expr = nft_expr_next(expr);
        }
        kfree(rule);
@@ -1552,7 +1559,7 @@ static void nf_tables_rule_destroy(struct nft_rule *rule)
 static struct nft_expr_info *info;
 
 static struct nft_rule_trans *
-nf_tables_trans_add(struct nft_rule *rule, const struct nft_ctx *ctx)
+nf_tables_trans_add(struct nft_ctx *ctx, struct nft_rule *rule)
 {
        struct nft_rule_trans *rupd;
 
@@ -1560,11 +1567,8 @@ nf_tables_trans_add(struct nft_rule *rule, const struct nft_ctx *ctx)
        if (rupd == NULL)
               return NULL;
 
-       rupd->chain = ctx->chain;
-       rupd->table = ctx->table;
+       rupd->ctx = *ctx;
        rupd->rule = rule;
-       rupd->family = ctx->afi->family;
-       rupd->nlh = ctx->nlh;
        list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
 
        return rupd;
@@ -1584,7 +1588,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
        struct nft_expr *expr;
        struct nft_ctx ctx;
        struct nlattr *tmp;
-       unsigned int size, i, n;
+       unsigned int size, i, n, ulen = 0;
        int err, rem;
        bool create;
        u64 handle, pos_handle;
@@ -1650,8 +1654,11 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                }
        }
 
+       if (nla[NFTA_RULE_USERDATA])
+               ulen = nla_len(nla[NFTA_RULE_USERDATA]);
+
        err = -ENOMEM;
-       rule = kzalloc(sizeof(*rule) + size, GFP_KERNEL);
+       rule = kzalloc(sizeof(*rule) + size + ulen, GFP_KERNEL);
        if (rule == NULL)
                goto err1;
 
@@ -1659,6 +1666,10 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
        rule->handle = handle;
        rule->dlen   = size;
+       rule->ulen   = ulen;
+
+       if (ulen)
+               nla_memcpy(nft_userdata(rule), nla[NFTA_RULE_USERDATA], ulen);
 
        expr = nft_expr_first(rule);
        for (i = 0; i < n; i++) {
@@ -1671,7 +1682,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                if (nft_rule_is_active_next(net, old_rule)) {
-                       repl = nf_tables_trans_add(old_rule, &ctx);
+                       repl = nf_tables_trans_add(&ctx, old_rule);
                        if (repl == NULL) {
                                err = -ENOMEM;
                                goto err2;
@@ -1694,7 +1705,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                        list_add_rcu(&rule->list, &chain->rules);
        }
 
-       if (nf_tables_trans_add(rule, &ctx) == NULL) {
+       if (nf_tables_trans_add(&ctx, rule) == NULL) {
                err = -ENOMEM;
                goto err3;
        }
@@ -1709,7 +1720,7 @@ err3:
                kfree(repl);
        }
 err2:
-       nf_tables_rule_destroy(rule);
+       nf_tables_rule_destroy(&ctx, rule);
 err1:
        for (i = 0; i < n; i++) {
                if (info[i].ops != NULL)
@@ -1723,7 +1734,7 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
 {
        /* You cannot delete the same rule twice */
        if (nft_rule_is_active_next(ctx->net, rule)) {
-               if (nf_tables_trans_add(rule, ctx) == NULL)
+               if (nf_tables_trans_add(ctx, rule) == NULL)
                        return -ENOMEM;
                nft_rule_disactivate_next(ctx->net, rule);
                return 0;
@@ -1819,10 +1830,10 @@ static int nf_tables_commit(struct sk_buff *skb)
                 */
                if (nft_rule_is_active(net, rupd->rule)) {
                        nft_rule_clear(net, rupd->rule);
-                       nf_tables_rule_notify(skb, rupd->nlh, rupd->table,
-                                             rupd->chain, rupd->rule,
-                                             NFT_MSG_NEWRULE, 0,
-                                             rupd->family);
+                       nf_tables_rule_notify(skb, rupd->ctx.nlh,
+                                             rupd->ctx.table, rupd->ctx.chain,
+                                             rupd->rule, NFT_MSG_NEWRULE, 0,
+                                             rupd->ctx.afi->family);
                        list_del(&rupd->list);
                        kfree(rupd);
                        continue;
@@ -1830,9 +1841,10 @@ static int nf_tables_commit(struct sk_buff *skb)
 
                /* This rule is in the past, get rid of it */
                list_del_rcu(&rupd->rule->list);
-               nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
+               nf_tables_rule_notify(skb, rupd->ctx.nlh,
+                                     rupd->ctx.table, rupd->ctx.chain,
                                      rupd->rule, NFT_MSG_DELRULE, 0,
-                                     rupd->family);
+                                     rupd->ctx.afi->family);
        }
 
        /* Make sure we don't see any packet traversing old rules */
@@ -1840,7 +1852,7 @@ static int nf_tables_commit(struct sk_buff *skb)
 
        /* Now we can safely release unused old rules */
        list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               nf_tables_rule_destroy(rupd->rule);
+               nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
                list_del(&rupd->list);
                kfree(rupd);
        }
@@ -1869,7 +1881,7 @@ static int nf_tables_abort(struct sk_buff *skb)
        synchronize_rcu();
 
        list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
-               nf_tables_rule_destroy(rupd->rule);
+               nf_tables_rule_destroy(&rupd->ctx, rupd->rule);
                list_del(&rupd->list);
                kfree(rupd);
        }
@@ -2430,8 +2442,7 @@ err1:
 static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
 {
        list_del(&set->list);
-       if (!(set->flags & NFT_SET_ANONYMOUS))
-               nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
+       nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
 
        set->ops->destroy(set);
        module_put(set->ops->owner);
@@ -3175,9 +3186,16 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
        data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
 
        switch (data->verdict) {
-       case NF_ACCEPT:
-       case NF_DROP:
-       case NF_QUEUE:
+       default:
+               switch (data->verdict & NF_VERDICT_MASK) {
+               case NF_ACCEPT:
+               case NF_DROP:
+               case NF_QUEUE:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               /* fall through */
        case NFT_CONTINUE:
        case NFT_BREAK:
        case NFT_RETURN:
@@ -3198,8 +3216,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
                data->chain = chain;
                desc->len = sizeof(data);
                break;
-       default:
-               return -EINVAL;
        }
 
        desc->type = NFT_DATA_VERDICT;
index 046aa13b4fea6a570b990444b7ca4c6419824f8f..e8138da4c14f70f40449c72ec4dc4d31f2960b8e 100644 (file)
@@ -61,6 +61,14 @@ void nfnl_unlock(__u8 subsys_id)
 }
 EXPORT_SYMBOL_GPL(nfnl_unlock);
 
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_nfnl_is_held(u8 subsys_id)
+{
+       return lockdep_is_held(&table[subsys_id].mutex);
+}
+EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
+#endif
+
 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
 {
        nfnl_lock(n->subsys_id);
index a155d19a225edcfb4b1a550ebc898f40d51c564d..d292c8d286ebeac22f688d47df8b793e9c90cc6b 100644 (file)
@@ -28,8 +28,6 @@
 #include <linux/proc_fs.h>
 #include <linux/security.h>
 #include <linux/list.h>
-#include <linux/jhash.h>
-#include <linux/random.h>
 #include <linux/slab.h>
 #include <net/sock.h>
 #include <net/netfilter/nf_log.h>
@@ -75,7 +73,6 @@ struct nfulnl_instance {
 };
 
 #define INSTANCE_BUCKETS       16
-static unsigned int hash_init;
 
 static int nfnl_log_net_id __read_mostly;
 
@@ -1067,11 +1064,6 @@ static int __init nfnetlink_log_init(void)
 {
        int status = -ENOMEM;
 
-       /* it's not really all that important to have a random value, so
-        * we can do this from the init function, even if there hasn't
-        * been that much entropy yet */
-       get_random_bytes(&hash_init, sizeof(hash_init));
-
        netlink_register_notifier(&nfulnl_rtnl_notifier);
        status = nfnetlink_subsys_register(&nfulnl_subsys);
        if (status < 0) {
index 82cb8236f8a101d260a9f3cc6eb1c218e34163f3..8a779be832fba2d8a86181f8f8f68a445299d626 100644 (file)
@@ -192,7 +192,7 @@ err:
 }
 
 static void
-nft_target_destroy(const struct nft_expr *expr)
+nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 {
        struct xt_target *target = expr->ops->data;
 
@@ -379,7 +379,7 @@ err:
 }
 
 static void
-nft_match_destroy(const struct nft_expr *expr)
+nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 {
        struct xt_match *match = expr->ops->data;
 
index 46e2754038387cf978679e2fd4f1032b30e40bd6..bd0d41e693416167b4f149f64117e440a5134496 100644 (file)
 #include <net/netfilter/nf_conntrack_tuple.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_labels.h>
 
 struct nft_ct {
        enum nft_ct_keys        key:8;
        enum ip_conntrack_dir   dir:8;
-       union{
+       union {
                enum nft_registers      dreg:8;
                enum nft_registers      sreg:8;
        };
-       uint8_t                 family;
 };
 
 static void nft_ct_get_eval(const struct nft_expr *expr,
@@ -97,6 +97,26 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                        goto err;
                strncpy((char *)dest->data, helper->name, sizeof(dest->data));
                return;
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+       case NFT_CT_LABELS: {
+               struct nf_conn_labels *labels = nf_ct_labels_find(ct);
+               unsigned int size;
+
+               if (!labels) {
+                       memset(dest->data, 0, sizeof(dest->data));
+                       return;
+               }
+
+               BUILD_BUG_ON(NF_CT_LABELS_MAX_SIZE > sizeof(dest->data));
+               size = labels->words * sizeof(long);
+
+               memcpy(dest->data, labels->bits, size);
+               if (size < sizeof(dest->data))
+                       memset(((char *) dest->data) + size, 0,
+                              sizeof(dest->data) - size);
+               return;
+       }
+#endif
        }
 
        tuple = &ct->tuplehash[priv->dir].tuple;
@@ -220,6 +240,9 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
 #endif
 #ifdef CONFIG_NF_CONNTRACK_SECMARK
        case NFT_CT_SECMARK:
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+       case NFT_CT_LABELS:
 #endif
        case NFT_CT_EXPIRATION:
        case NFT_CT_HELPER:
@@ -292,16 +315,13 @@ static int nft_ct_init(const struct nft_ctx *ctx,
        if (err < 0)
                return err;
 
-       priv->family = ctx->afi->family;
-
        return 0;
 }
 
-static void nft_ct_destroy(const struct nft_expr *expr)
+static void nft_ct_destroy(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr)
 {
-       struct nft_ct *priv = nft_expr_priv(expr);
-
-       nft_ct_l3proto_module_put(priv->family);
+       nft_ct_l3proto_module_put(ctx->afi->family);
 }
 
 static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
index 3d3f8fce10a5136391e53bd75b13498a45e82e96..3b1ad876d6b028f987ccf8e78ceb9639d767e6bf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/list.h>
 #include <linux/jhash.h>
 #include <linux/netlink.h>
+#include <linux/vmalloc.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
+#define NFT_HASH_MIN_SIZE      4
+
 struct nft_hash {
-       struct hlist_head       *hash;
-       unsigned int            hsize;
+       struct nft_hash_table __rcu     *tbl;
+};
+
+struct nft_hash_table {
+       unsigned int                    size;
+       unsigned int                    elements;
+       struct nft_hash_elem __rcu      *buckets[];
 };
 
 struct nft_hash_elem {
-       struct hlist_node       hnode;
-       struct nft_data         key;
-       struct nft_data         data[];
+       struct nft_hash_elem __rcu      *next;
+       struct nft_data                 key;
+       struct nft_data                 data[];
 };
 
+#define nft_hash_for_each_entry(i, head) \
+       for (i = nft_dereference(head); i != NULL; i = nft_dereference(i->next))
+#define nft_hash_for_each_entry_rcu(i, head) \
+       for (i = rcu_dereference(head); i != NULL; i = rcu_dereference(i->next))
+
 static u32 nft_hash_rnd __read_mostly;
 static bool nft_hash_rnd_initted __read_mostly;
 
@@ -38,7 +51,7 @@ static unsigned int nft_hash_data(const struct nft_data *data,
        unsigned int h;
 
        h = jhash(data->data, len, nft_hash_rnd);
-       return ((u64)h * hsize) >> 32;
+       return h & (hsize - 1);
 }
 
 static bool nft_hash_lookup(const struct nft_set *set,
@@ -46,11 +59,12 @@ static bool nft_hash_lookup(const struct nft_set *set,
                            struct nft_data *data)
 {
        const struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_table *tbl = rcu_dereference(priv->tbl);
        const struct nft_hash_elem *he;
        unsigned int h;
 
-       h = nft_hash_data(key, priv->hsize, set->klen);
-       hlist_for_each_entry(he, &priv->hash[h], hnode) {
+       h = nft_hash_data(key, tbl->size, set->klen);
+       nft_hash_for_each_entry_rcu(he, tbl->buckets[h]) {
                if (nft_data_cmp(&he->key, key, set->klen))
                        continue;
                if (set->flags & NFT_SET_MAP)
@@ -60,19 +74,148 @@ static bool nft_hash_lookup(const struct nft_set *set,
        return false;
 }
 
-static void nft_hash_elem_destroy(const struct nft_set *set,
-                                 struct nft_hash_elem *he)
+static void nft_hash_tbl_free(const struct nft_hash_table *tbl)
 {
-       nft_data_uninit(&he->key, NFT_DATA_VALUE);
-       if (set->flags & NFT_SET_MAP)
-               nft_data_uninit(he->data, set->dtype);
-       kfree(he);
+       if (is_vmalloc_addr(tbl))
+               vfree(tbl);
+       else
+               kfree(tbl);
+}
+
+static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets)
+{
+       struct nft_hash_table *tbl;
+       size_t size;
+
+       size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
+       tbl = kzalloc(size, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN);
+       if (tbl == NULL)
+               tbl = vzalloc(size);
+       if (tbl == NULL)
+               return NULL;
+       tbl->size = nbuckets;
+
+       return tbl;
+}
+
+static void nft_hash_chain_unzip(const struct nft_set *set,
+                                const struct nft_hash_table *ntbl,
+                                struct nft_hash_table *tbl, unsigned int n)
+{
+       struct nft_hash_elem *he, *last, *next;
+       unsigned int h;
+
+       he = nft_dereference(tbl->buckets[n]);
+       if (he == NULL)
+               return;
+       h = nft_hash_data(&he->key, ntbl->size, set->klen);
+
+       /* Find last element of first chain hashing to bucket h */
+       last = he;
+       nft_hash_for_each_entry(he, he->next) {
+               if (nft_hash_data(&he->key, ntbl->size, set->klen) != h)
+                       break;
+               last = he;
+       }
+
+       /* Unlink first chain from the old table */
+       RCU_INIT_POINTER(tbl->buckets[n], last->next);
+
+       /* If end of chain reached, done */
+       if (he == NULL)
+               return;
+
+       /* Find first element of second chain hashing to bucket h */
+       next = NULL;
+       nft_hash_for_each_entry(he, he->next) {
+               if (nft_hash_data(&he->key, ntbl->size, set->klen) != h)
+                       continue;
+               next = he;
+               break;
+       }
+
+       /* Link the two chains */
+       RCU_INIT_POINTER(last->next, next);
+}
+
+static int nft_hash_tbl_expand(const struct nft_set *set, struct nft_hash *priv)
+{
+       struct nft_hash_table *tbl = nft_dereference(priv->tbl), *ntbl;
+       struct nft_hash_elem *he;
+       unsigned int i, h;
+       bool complete;
+
+       ntbl = nft_hash_tbl_alloc(tbl->size * 2);
+       if (ntbl == NULL)
+               return -ENOMEM;
+
+       /* Link new table's buckets to first element in the old table
+        * hashing to the new bucket.
+        */
+       for (i = 0; i < ntbl->size; i++) {
+               h = i < tbl->size ? i : i - tbl->size;
+               nft_hash_for_each_entry(he, tbl->buckets[h]) {
+                       if (nft_hash_data(&he->key, ntbl->size, set->klen) != i)
+                               continue;
+                       RCU_INIT_POINTER(ntbl->buckets[i], he);
+                       break;
+               }
+       }
+       ntbl->elements = tbl->elements;
+
+       /* Publish new table */
+       rcu_assign_pointer(priv->tbl, ntbl);
+
+       /* Unzip interleaved hash chains */
+       do {
+               /* Wait for readers to use new table/unzipped chains */
+               synchronize_rcu();
+
+               complete = true;
+               for (i = 0; i < tbl->size; i++) {
+                       nft_hash_chain_unzip(set, ntbl, tbl, i);
+                       if (tbl->buckets[i] != NULL)
+                               complete = false;
+               }
+       } while (!complete);
+
+       nft_hash_tbl_free(tbl);
+       return 0;
+}
+
+static int nft_hash_tbl_shrink(const struct nft_set *set, struct nft_hash *priv)
+{
+       struct nft_hash_table *tbl = nft_dereference(priv->tbl), *ntbl;
+       struct nft_hash_elem __rcu **pprev;
+       unsigned int i;
+
+       ntbl = nft_hash_tbl_alloc(tbl->size / 2);
+       if (ntbl == NULL)
+               return -ENOMEM;
+
+       for (i = 0; i < ntbl->size; i++) {
+               ntbl->buckets[i] = tbl->buckets[i];
+
+               for (pprev = &ntbl->buckets[i]; *pprev != NULL;
+                    pprev = &nft_dereference(*pprev)->next)
+                       ;
+               RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+       }
+       ntbl->elements = tbl->elements;
+
+       /* Publish new table */
+       rcu_assign_pointer(priv->tbl, ntbl);
+       synchronize_rcu();
+
+       nft_hash_tbl_free(tbl);
+       return 0;
 }
 
 static int nft_hash_insert(const struct nft_set *set,
                           const struct nft_set_elem *elem)
 {
        struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_table *tbl = nft_dereference(priv->tbl);
        struct nft_hash_elem *he;
        unsigned int size, h;
 
@@ -91,33 +234,66 @@ static int nft_hash_insert(const struct nft_set *set,
        if (set->flags & NFT_SET_MAP)
                nft_data_copy(he->data, &elem->data);
 
-       h = nft_hash_data(&he->key, priv->hsize, set->klen);
-       hlist_add_head_rcu(&he->hnode, &priv->hash[h]);
+       h = nft_hash_data(&he->key, tbl->size, set->klen);
+       RCU_INIT_POINTER(he->next, tbl->buckets[h]);
+       rcu_assign_pointer(tbl->buckets[h], he);
+       tbl->elements++;
+
+       /* Expand table when exceeding 75% load */
+       if (tbl->elements > tbl->size / 4 * 3)
+               nft_hash_tbl_expand(set, priv);
+
        return 0;
 }
 
+static void nft_hash_elem_destroy(const struct nft_set *set,
+                                 struct nft_hash_elem *he)
+{
+       nft_data_uninit(&he->key, NFT_DATA_VALUE);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_uninit(he->data, set->dtype);
+       kfree(he);
+}
+
 static void nft_hash_remove(const struct nft_set *set,
                            const struct nft_set_elem *elem)
 {
-       struct nft_hash_elem *he = elem->cookie;
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_table *tbl = nft_dereference(priv->tbl);
+       struct nft_hash_elem *he, __rcu **pprev;
 
-       hlist_del_rcu(&he->hnode);
+       pprev = elem->cookie;
+       he = nft_dereference((*pprev));
+
+       RCU_INIT_POINTER(*pprev, he->next);
+       synchronize_rcu();
        kfree(he);
+       tbl->elements--;
+
+       /* Shrink table beneath 30% load */
+       if (tbl->elements < tbl->size * 3 / 10 &&
+           tbl->size > NFT_HASH_MIN_SIZE)
+               nft_hash_tbl_shrink(set, priv);
 }
 
 static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
 {
        const struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
+       struct nft_hash_elem __rcu * const *pprev;
        struct nft_hash_elem *he;
        unsigned int h;
 
-       h = nft_hash_data(&elem->key, priv->hsize, set->klen);
-       hlist_for_each_entry(he, &priv->hash[h], hnode) {
-               if (nft_data_cmp(&he->key, &elem->key, set->klen))
+       h = nft_hash_data(&elem->key, tbl->size, set->klen);
+       pprev = &tbl->buckets[h];
+       nft_hash_for_each_entry(he, tbl->buckets[h]) {
+               if (nft_data_cmp(&he->key, &elem->key, set->klen)) {
+                       pprev = &he->next;
                        continue;
+               }
 
-               elem->cookie = he;
-               elem->flags  = 0;
+               elem->cookie = (void *)pprev;
+               elem->flags = 0;
                if (set->flags & NFT_SET_MAP)
                        nft_data_copy(&elem->data, he->data);
                return 0;
@@ -129,12 +305,13 @@ static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                          struct nft_set_iter *iter)
 {
        const struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
        const struct nft_hash_elem *he;
        struct nft_set_elem elem;
        unsigned int i;
 
-       for (i = 0; i < priv->hsize; i++) {
-               hlist_for_each_entry(he, &priv->hash[i], hnode) {
+       for (i = 0; i < tbl->size; i++) {
+               nft_hash_for_each_entry(he, tbl->buckets[i]) {
                        if (iter->count < iter->skip)
                                goto cont;
 
@@ -161,43 +338,35 @@ static int nft_hash_init(const struct nft_set *set,
                         const struct nlattr * const tb[])
 {
        struct nft_hash *priv = nft_set_priv(set);
-       unsigned int cnt, i;
+       struct nft_hash_table *tbl;
 
        if (unlikely(!nft_hash_rnd_initted)) {
                get_random_bytes(&nft_hash_rnd, 4);
                nft_hash_rnd_initted = true;
        }
 
-       /* Aim for a load factor of 0.75 */
-       // FIXME: temporarily broken until we have set descriptions
-       cnt = 100;
-       cnt = cnt * 4 / 3;
-
-       priv->hash = kcalloc(cnt, sizeof(struct hlist_head), GFP_KERNEL);
-       if (priv->hash == NULL)
+       tbl = nft_hash_tbl_alloc(NFT_HASH_MIN_SIZE);
+       if (tbl == NULL)
                return -ENOMEM;
-       priv->hsize = cnt;
-
-       for (i = 0; i < cnt; i++)
-               INIT_HLIST_HEAD(&priv->hash[i]);
-
+       RCU_INIT_POINTER(priv->tbl, tbl);
        return 0;
 }
 
 static void nft_hash_destroy(const struct nft_set *set)
 {
        const struct nft_hash *priv = nft_set_priv(set);
-       const struct hlist_node *next;
-       struct nft_hash_elem *elem;
+       const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
+       struct nft_hash_elem *he, *next;
        unsigned int i;
 
-       for (i = 0; i < priv->hsize; i++) {
-               hlist_for_each_entry_safe(elem, next, &priv->hash[i], hnode) {
-                       hlist_del(&elem->hnode);
-                       nft_hash_elem_destroy(set, elem);
+       for (i = 0; i < tbl->size; i++) {
+               for (he = nft_dereference(tbl->buckets[i]); he != NULL;
+                    he = next) {
+                       next = nft_dereference(he->next);
+                       nft_hash_elem_destroy(set, he);
                }
        }
-       kfree(priv->hash);
+       kfree(tbl);
 }
 
 static struct nft_set_ops nft_hash_ops __read_mostly = {
index f169501f1ad4389970613d1e44fb2b60759406cb..810385eb7249c7be39bcbc0e6c34abc1a6b1708d 100644 (file)
@@ -70,7 +70,8 @@ err1:
        return err;
 }
 
-static void nft_immediate_destroy(const struct nft_expr *expr)
+static void nft_immediate_destroy(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr)
 {
        const struct nft_immediate_expr *priv = nft_expr_priv(expr);
        return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg));
index 26c5154e05f3fc09aae2ebdea968ce564a3d5eb9..10cfb156cdf4449dee49e53667549e5c2fccdf82 100644 (file)
@@ -74,7 +74,8 @@ static int nft_log_init(const struct nft_ctx *ctx,
        return 0;
 }
 
-static void nft_log_destroy(const struct nft_expr *expr)
+static void nft_log_destroy(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr)
 {
        struct nft_log *priv = nft_expr_priv(expr);
 
index bb4ef4cccb6efcdf937bc3417eb8dbd6ccf0e22f..7fd2bea8aa239f347dc461c7bc45869dac405573 100644 (file)
@@ -89,11 +89,12 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
        return 0;
 }
 
-static void nft_lookup_destroy(const struct nft_expr *expr)
+static void nft_lookup_destroy(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr)
 {
        struct nft_lookup *priv = nft_expr_priv(expr);
 
-       nf_tables_unbind_set(NULL, priv->set, &priv->binding);
+       nf_tables_unbind_set(ctx, priv->set, &priv->binding);
 }
 
 static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
index d3b1ffe26181b22538ab29fefadd7a41d96b76f1..a0195d28bcfc2e5b90025ac9ceb14499d1747934 100644 (file)
@@ -31,8 +31,8 @@ struct nft_nat {
        enum nft_registers      sreg_addr_max:8;
        enum nft_registers      sreg_proto_min:8;
        enum nft_registers      sreg_proto_max:8;
-       int                     family;
-       enum nf_nat_manip_type  type;
+       enum nf_nat_manip_type  type:8;
+       u8                      family;
 };
 
 static void nft_nat_eval(const struct nft_expr *expr,
@@ -88,6 +88,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                        const struct nlattr * const tb[])
 {
        struct nft_nat *priv = nft_expr_priv(expr);
+       u32 family;
        int err;
 
        if (tb[NFTA_NAT_TYPE] == NULL)
@@ -107,9 +108,12 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        if (tb[NFTA_NAT_FAMILY] == NULL)
                return -EINVAL;
 
-       priv->family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
-       if (priv->family != AF_INET && priv->family != AF_INET6)
-               return -EINVAL;
+       family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
+       if (family != AF_INET && family != AF_INET6)
+               return -EAFNOSUPPORT;
+       if (family != ctx->afi->family)
+               return -EOPNOTSUPP;
+       priv->family = family;
 
        if (tb[NFTA_NAT_REG_ADDR_MIN]) {
                priv->sreg_addr_min = ntohl(nla_get_be32(
@@ -202,13 +206,7 @@ static struct nft_expr_type nft_nat_type __read_mostly = {
 
 static int __init nft_nat_module_init(void)
 {
-       int err;
-
-       err = nft_register_expr(&nft_nat_type);
-       if (err < 0)
-               return err;
-
-       return 0;
+       return nft_register_expr(&nft_nat_type);
 }
 
 static void __exit nft_nat_module_exit(void)
index 3228d7f24eb4107ff717af4323e834af3e680797..4973cbddc446bd50a377765bec23128716ff630e 100644 (file)
@@ -146,11 +146,11 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
 
                if (par->family == NFPROTO_BRIDGE) {
                        switch (eth_hdr(skb)->h_proto) {
-                       case __constant_htons(ETH_P_IP):
+                       case htons(ETH_P_IP):
                                audit_ip4(ab, skb);
                                break;
 
-                       case __constant_htons(ETH_P_IPV6):
+                       case htons(ETH_P_IPV6):
                                audit_ip6(ab, skb);
                                break;
                        }
index c40b2695633b15960ac65eb84ebc2405b4aa2c1c..458464e7bd7a841915aeebdd448cb17bb49c01e4 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/jhash.h>
 #include <linux/slab.h>
 #include <linux/list.h>
+#include <linux/rbtree.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/skbuff.h>
 #include <net/netfilter/nf_conntrack_tuple.h>
 #include <net/netfilter/nf_conntrack_zones.h>
 
+#define CONNLIMIT_SLOTS                32
+#define CONNLIMIT_LOCK_SLOTS   32
+#define CONNLIMIT_GC_MAX_NODES 8
+
 /* we will save the tuples of all connections we care about */
 struct xt_connlimit_conn {
        struct hlist_node               node;
@@ -38,16 +43,26 @@ struct xt_connlimit_conn {
        union nf_inet_addr              addr;
 };
 
+struct xt_connlimit_rb {
+       struct rb_node node;
+       struct hlist_head hhead; /* connections/hosts in same subnet */
+       union nf_inet_addr addr; /* search key */
+};
+
 struct xt_connlimit_data {
-       struct hlist_head       iphash[256];
-       spinlock_t              lock;
+       struct rb_root climit_root4[CONNLIMIT_SLOTS];
+       struct rb_root climit_root6[CONNLIMIT_SLOTS];
+       spinlock_t              locks[CONNLIMIT_LOCK_SLOTS];
 };
 
 static u_int32_t connlimit_rnd __read_mostly;
+static struct kmem_cache *connlimit_rb_cachep __read_mostly;
+static struct kmem_cache *connlimit_conn_cachep __read_mostly;
 
 static inline unsigned int connlimit_iphash(__be32 addr)
 {
-       return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF;
+       return jhash_1word((__force __u32)addr,
+                           connlimit_rnd) % CONNLIMIT_SLOTS;
 }
 
 static inline unsigned int
@@ -60,7 +75,8 @@ connlimit_iphash6(const union nf_inet_addr *addr,
        for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
                res.ip6[i] = addr->ip6[i] & mask->ip6[i];
 
-       return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6), connlimit_rnd) & 0xFF;
+       return jhash2((u32 *)res.ip6, ARRAY_SIZE(res.ip6),
+                      connlimit_rnd) % CONNLIMIT_SLOTS;
 }
 
 static inline bool already_closed(const struct nf_conn *conn)
@@ -72,13 +88,14 @@ static inline bool already_closed(const struct nf_conn *conn)
                return 0;
 }
 
-static inline unsigned int
+static int
 same_source_net(const union nf_inet_addr *addr,
                const union nf_inet_addr *mask,
                const union nf_inet_addr *u3, u_int8_t family)
 {
        if (family == NFPROTO_IPV4) {
-               return (addr->ip & mask->ip) == (u3->ip & mask->ip);
+               return ntohl(addr->ip & mask->ip) -
+                      ntohl(u3->ip & mask->ip);
        } else {
                union nf_inet_addr lh, rh;
                unsigned int i;
@@ -88,89 +105,205 @@ same_source_net(const union nf_inet_addr *addr,
                        rh.ip6[i] = u3->ip6[i] & mask->ip6[i];
                }
 
-               return memcmp(&lh.ip6, &rh.ip6, sizeof(lh.ip6)) == 0;
+               return memcmp(&lh.ip6, &rh.ip6, sizeof(lh.ip6));
        }
 }
 
-static int count_them(struct net *net,
-                     struct xt_connlimit_data *data,
+static bool add_hlist(struct hlist_head *head,
                      const struct nf_conntrack_tuple *tuple,
-                     const union nf_inet_addr *addr,
-                     const union nf_inet_addr *mask,
-                     u_int8_t family)
+                     const union nf_inet_addr *addr)
+{
+       struct xt_connlimit_conn *conn;
+
+       conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
+       if (conn == NULL)
+               return false;
+       conn->tuple = *tuple;
+       conn->addr = *addr;
+       hlist_add_head(&conn->node, head);
+       return true;
+}
+
+static unsigned int check_hlist(struct net *net,
+                               struct hlist_head *head,
+                               const struct nf_conntrack_tuple *tuple,
+                               bool *addit)
 {
        const struct nf_conntrack_tuple_hash *found;
        struct xt_connlimit_conn *conn;
        struct hlist_node *n;
        struct nf_conn *found_ct;
-       struct hlist_head *hash;
-       bool addit = true;
-       int matches = 0;
-
-       if (family == NFPROTO_IPV6)
-               hash = &data->iphash[connlimit_iphash6(addr, mask)];
-       else
-               hash = &data->iphash[connlimit_iphash(addr->ip & mask->ip)];
+       unsigned int length = 0;
 
+       *addit = true;
        rcu_read_lock();
 
        /* check the saved connections */
-       hlist_for_each_entry_safe(conn, n, hash, node) {
+       hlist_for_each_entry_safe(conn, n, head, node) {
                found    = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
                                                 &conn->tuple);
-               found_ct = NULL;
+               if (found == NULL) {
+                       hlist_del(&conn->node);
+                       kmem_cache_free(connlimit_conn_cachep, conn);
+                       continue;
+               }
 
-               if (found != NULL)
-                       found_ct = nf_ct_tuplehash_to_ctrack(found);
+               found_ct = nf_ct_tuplehash_to_ctrack(found);
 
-               if (found_ct != NULL &&
-                   nf_ct_tuple_equal(&conn->tuple, tuple) &&
-                   !already_closed(found_ct))
+               if (nf_ct_tuple_equal(&conn->tuple, tuple)) {
                        /*
                         * Just to be sure we have it only once in the list.
                         * We should not see tuples twice unless someone hooks
                         * this into a table without "-p tcp --syn".
                         */
-                       addit = false;
-
-               if (found == NULL) {
-                       /* this one is gone */
-                       hlist_del(&conn->node);
-                       kfree(conn);
-                       continue;
-               }
-
-               if (already_closed(found_ct)) {
+                       *addit = false;
+               } else if (already_closed(found_ct)) {
                        /*
                         * we do not care about connections which are
                         * closed already -> ditch it
                         */
                        nf_ct_put(found_ct);
                        hlist_del(&conn->node);
-                       kfree(conn);
+                       kmem_cache_free(connlimit_conn_cachep, conn);
                        continue;
                }
 
-               if (same_source_net(addr, mask, &conn->addr, family))
-                       /* same source network -> be counted! */
-                       ++matches;
                nf_ct_put(found_ct);
+               length++;
        }
 
        rcu_read_unlock();
 
-       if (addit) {
-               /* save the new connection in our list */
-               conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
-               if (conn == NULL)
-                       return -ENOMEM;
-               conn->tuple = *tuple;
-               conn->addr = *addr;
-               hlist_add_head(&conn->node, hash);
-               ++matches;
+       return length;
+}
+
+static void tree_nodes_free(struct rb_root *root,
+                           struct xt_connlimit_rb *gc_nodes[],
+                           unsigned int gc_count)
+{
+       struct xt_connlimit_rb *rbconn;
+
+       while (gc_count) {
+               rbconn = gc_nodes[--gc_count];
+               rb_erase(&rbconn->node, root);
+               kmem_cache_free(connlimit_rb_cachep, rbconn);
+       }
+}
+
+static unsigned int
+count_tree(struct net *net, struct rb_root *root,
+          const struct nf_conntrack_tuple *tuple,
+          const union nf_inet_addr *addr, const union nf_inet_addr *mask,
+          u8 family)
+{
+       struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
+       struct rb_node **rbnode, *parent;
+       struct xt_connlimit_rb *rbconn;
+       struct xt_connlimit_conn *conn;
+       unsigned int gc_count;
+       bool no_gc = false;
+
+ restart:
+       gc_count = 0;
+       parent = NULL;
+       rbnode = &(root->rb_node);
+       while (*rbnode) {
+               int diff;
+               bool addit;
+
+               rbconn = container_of(*rbnode, struct xt_connlimit_rb, node);
+
+               parent = *rbnode;
+               diff = same_source_net(addr, mask, &rbconn->addr, family);
+               if (diff < 0) {
+                       rbnode = &((*rbnode)->rb_left);
+               } else if (diff > 0) {
+                       rbnode = &((*rbnode)->rb_right);
+               } else {
+                       /* same source network -> be counted! */
+                       unsigned int count;
+                       count = check_hlist(net, &rbconn->hhead, tuple, &addit);
+
+                       tree_nodes_free(root, gc_nodes, gc_count);
+                       if (!addit)
+                               return count;
+
+                       if (!add_hlist(&rbconn->hhead, tuple, addr))
+                               return 0; /* hotdrop */
+
+                       return count + 1;
+               }
+
+               if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes))
+                       continue;
+
+               /* only used for GC on hhead, retval and 'addit' ignored */
+               check_hlist(net, &rbconn->hhead, tuple, &addit);
+               if (hlist_empty(&rbconn->hhead))
+                       gc_nodes[gc_count++] = rbconn;
+       }
+
+       if (gc_count) {
+               no_gc = true;
+               tree_nodes_free(root, gc_nodes, gc_count);
+               /* tree_node_free before new allocation permits
+                * allocator to re-use newly free'd object.
+                *
+                * This is a rare event; in most cases we will find
+                * existing node to re-use. (or gc_count is 0).
+                */
+               goto restart;
+       }
+
+       /* no match, need to insert new node */
+       rbconn = kmem_cache_alloc(connlimit_rb_cachep, GFP_ATOMIC);
+       if (rbconn == NULL)
+               return 0;
+
+       conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC);
+       if (conn == NULL) {
+               kmem_cache_free(connlimit_rb_cachep, rbconn);
+               return 0;
+       }
+
+       conn->tuple = *tuple;
+       conn->addr = *addr;
+       rbconn->addr = *addr;
+
+       INIT_HLIST_HEAD(&rbconn->hhead);
+       hlist_add_head(&conn->node, &rbconn->hhead);
+
+       rb_link_node(&rbconn->node, parent, rbnode);
+       rb_insert_color(&rbconn->node, root);
+       return 1;
+}
+
+static int count_them(struct net *net,
+                     struct xt_connlimit_data *data,
+                     const struct nf_conntrack_tuple *tuple,
+                     const union nf_inet_addr *addr,
+                     const union nf_inet_addr *mask,
+                     u_int8_t family)
+{
+       struct rb_root *root;
+       int count;
+       u32 hash;
+
+       if (family == NFPROTO_IPV6) {
+               hash = connlimit_iphash6(addr, mask);
+               root = &data->climit_root6[hash];
+       } else {
+               hash = connlimit_iphash(addr->ip & mask->ip);
+               root = &data->climit_root4[hash];
        }
 
-       return matches;
+       spin_lock_bh(&data->locks[hash % CONNLIMIT_LOCK_SLOTS]);
+
+       count = count_tree(net, root, tuple, addr, mask, family);
+
+       spin_unlock_bh(&data->locks[hash % CONNLIMIT_LOCK_SLOTS]);
+
+       return count;
 }
 
 static bool
@@ -183,7 +316,7 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
        const struct nf_conntrack_tuple *tuple_ptr = &tuple;
        enum ip_conntrack_info ctinfo;
        const struct nf_conn *ct;
-       int connections;
+       unsigned int connections;
 
        ct = nf_ct_get(skb, &ctinfo);
        if (ct != NULL)
@@ -202,12 +335,9 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
                          iph->daddr : iph->saddr;
        }
 
-       spin_lock_bh(&info->data->lock);
        connections = count_them(net, info->data, tuple_ptr, &addr,
                                 &info->mask, par->family);
-       spin_unlock_bh(&info->data->lock);
-
-       if (connections < 0)
+       if (connections == 0)
                /* kmalloc failed, drop it entirely */
                goto hotdrop;
 
@@ -247,29 +377,47 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par)
                return -ENOMEM;
        }
 
-       spin_lock_init(&info->data->lock);
-       for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i)
-               INIT_HLIST_HEAD(&info->data->iphash[i]);
+       for (i = 0; i < ARRAY_SIZE(info->data->locks); ++i)
+               spin_lock_init(&info->data->locks[i]);
+
+       for (i = 0; i < ARRAY_SIZE(info->data->climit_root4); ++i)
+               info->data->climit_root4[i] = RB_ROOT;
+       for (i = 0; i < ARRAY_SIZE(info->data->climit_root6); ++i)
+               info->data->climit_root6[i] = RB_ROOT;
 
        return 0;
 }
 
-static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
+static void destroy_tree(struct rb_root *r)
 {
-       const struct xt_connlimit_info *info = par->matchinfo;
        struct xt_connlimit_conn *conn;
+       struct xt_connlimit_rb *rbconn;
        struct hlist_node *n;
-       struct hlist_head *hash = info->data->iphash;
+       struct rb_node *node;
+
+       while ((node = rb_first(r)) != NULL) {
+               rbconn = container_of(node, struct xt_connlimit_rb, node);
+
+               rb_erase(node, r);
+
+               hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node)
+                       kmem_cache_free(connlimit_conn_cachep, conn);
+
+               kmem_cache_free(connlimit_rb_cachep, rbconn);
+       }
+}
+
+static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
+{
+       const struct xt_connlimit_info *info = par->matchinfo;
        unsigned int i;
 
        nf_ct_l3proto_module_put(par->family);
 
-       for (i = 0; i < ARRAY_SIZE(info->data->iphash); ++i) {
-               hlist_for_each_entry_safe(conn, n, &hash[i], node) {
-                       hlist_del(&conn->node);
-                       kfree(conn);
-               }
-       }
+       for (i = 0; i < ARRAY_SIZE(info->data->climit_root4); ++i)
+               destroy_tree(&info->data->climit_root4[i]);
+       for (i = 0; i < ARRAY_SIZE(info->data->climit_root6); ++i)
+               destroy_tree(&info->data->climit_root6[i]);
 
        kfree(info->data);
 }
@@ -287,12 +435,37 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
 
 static int __init connlimit_mt_init(void)
 {
-       return xt_register_match(&connlimit_mt_reg);
+       int ret;
+
+       BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS);
+       BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0);
+
+       connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn",
+                                          sizeof(struct xt_connlimit_conn),
+                                          0, 0, NULL);
+       if (!connlimit_conn_cachep)
+               return -ENOMEM;
+
+       connlimit_rb_cachep = kmem_cache_create("xt_connlimit_rb",
+                                          sizeof(struct xt_connlimit_rb),
+                                          0, 0, NULL);
+       if (!connlimit_rb_cachep) {
+               kmem_cache_destroy(connlimit_conn_cachep);
+               return -ENOMEM;
+       }
+       ret = xt_register_match(&connlimit_mt_reg);
+       if (ret != 0) {
+               kmem_cache_destroy(connlimit_conn_cachep);
+               kmem_cache_destroy(connlimit_rb_cachep);
+       }
+       return ret;
 }
 
 static void __exit connlimit_mt_exit(void)
 {
        xt_unregister_match(&connlimit_mt_reg);
+       kmem_cache_destroy(connlimit_conn_cachep);
+       kmem_cache_destroy(connlimit_rb_cachep);
 }
 
 module_init(connlimit_mt_init);
index a4c7561698c5c2fa5dd9b79717e0cc7116347e54..89d53104c6b365b12c76ff684064bc5d032656c3 100644 (file)
@@ -60,7 +60,7 @@ static bool comp_mt(const struct sk_buff *skb, struct xt_action_param *par)
        }
 
        return spi_match(compinfo->spis[0], compinfo->spis[1],
-                        ntohl(chdr->cpi << 16),
+                        ntohs(chdr->cpi),
                         !!(compinfo->invflags & XT_IPCOMP_INV_SPI));
 }
 
index 04748ab649c25bbb2d2d7cee6fbc192fb267dfbc..c2d585c4f7c5cb2c82ea2adb03723e90b61c91f1 100644 (file)
@@ -1460,7 +1460,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
        if (nlk->netlink_bind && nlk->groups[0]) {
                int i;
 
-               for (i=0; i<nlk->ngroups; i++) {
+               for (i = 0; i < nlk->ngroups; i++) {
                        if (test_bit(i, nlk->groups))
                                nlk->netlink_bind(i);
                }
@@ -2343,6 +2343,11 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
        }
 #endif
 
+       /* Record the max length of recvmsg() calls for future allocations */
+       nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
+       nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
+                                    16384);
+
        copied = data_skb->len;
        if (len < copied) {
                msg->msg_flags |= MSG_TRUNC;
@@ -2549,7 +2554,7 @@ __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int fla
        struct nlmsghdr *nlh;
        int size = nlmsg_msg_size(len);
 
-       nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
+       nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
        nlh->nlmsg_type = type;
        nlh->nlmsg_len = size;
        nlh->nlmsg_flags = flags;
@@ -2587,7 +2592,27 @@ static int netlink_dump(struct sock *sk)
        if (!netlink_rx_is_mmaped(sk) &&
            atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto errout_skb;
-       skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
+
+       /* NLMSG_GOODSIZE is small to avoid high order allocations being
+        * required, but it makes sense to _attempt_ a 16K bytes allocation
+        * to reduce number of system calls on dump operations, if user
+        * ever provided a big enough buffer.
+        */
+       if (alloc_size < nlk->max_recvmsg_len) {
+               skb = netlink_alloc_skb(sk,
+                                       nlk->max_recvmsg_len,
+                                       nlk->portid,
+                                       GFP_KERNEL |
+                                       __GFP_NOWARN |
+                                       __GFP_NORETRY);
+               /* available room should be exact amount to avoid MSG_TRUNC */
+               if (skb)
+                       skb_reserve(skb, skb_tailroom(skb) -
+                                        nlk->max_recvmsg_len);
+       }
+       if (!skb)
+               skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
+                                       GFP_KERNEL);
        if (!skb)
                goto errout_skb;
        netlink_skb_set_owner_r(skb, sk);
index acbd774eeb7c5afd568d8eb9aa375ecc81949793..ed13a790b00e1684215e04c6a68f71f9491cb3c9 100644 (file)
@@ -31,6 +31,7 @@ struct netlink_sock {
        u32                     ngroups;
        unsigned long           *groups;
        unsigned long           state;
+       size_t                  max_recvmsg_len;
        wait_queue_head_t       wait;
        bool                    cb_running;
        struct netlink_callback cb;
index ca1e65f4b1331bfd164245996ed462ab61f46a4e..819b87702b7039ceed68085139d2820a2d5711c0 100644 (file)
@@ -280,9 +280,6 @@ static struct nfc_target *nfc_find_target(struct nfc_dev *dev, u32 target_idx)
 {
        int i;
 
-       if (dev->n_targets == 0)
-               return NULL;
-
        for (i = 0; i < dev->n_targets; i++) {
                if (dev->targets[i].idx == target_idx)
                        return &dev->targets[i];
@@ -546,9 +543,9 @@ error:
 
 struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx)
 {
-       struct nfc_se *se, *n;
+       struct nfc_se *se;
 
-       list_for_each_entry_safe(se, n, &dev->secure_elements, list)
+       list_for_each_entry(se, &dev->secure_elements, list)
                if (se->idx == se_idx)
                        return se;
 
@@ -655,9 +652,6 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
 {
        pr_debug("dev_name=%s gb_len=%d\n", dev_name(&dev->dev), gb_len);
 
-       if (gb_len > NFC_MAX_GT_LEN)
-               return -EINVAL;
-
        return nfc_llcp_set_remote_gb(dev, gb, gb_len);
 }
 EXPORT_SYMBOL(nfc_set_remote_general_bytes);
index 08b29b55ea63acdff30d2ba29c1cda51a52f7cb8..3759add68b1b8af4b4eae50af6eb4a3cd98fd345 100644 (file)
@@ -72,6 +72,12 @@ void digital_poll_next_tech(struct nfc_digital_dev *ddev);
 
 int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech);
 int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+int digital_in_iso_dep_pull_sod(struct nfc_digital_dev *ddev,
+                               struct sk_buff *skb);
+int digital_in_iso_dep_push_sod(struct nfc_digital_dev *ddev,
+                               struct sk_buff *skb);
 
 int digital_target_found(struct nfc_digital_dev *ddev,
                         struct nfc_target *target, u8 protocol);
index c129d1571ca6c55281807781aeb803d6f9f9415b..e01e15dbf1abe1541aeb2643cf5521e8e1419e75 100644 (file)
@@ -25,6 +25,8 @@
 #define DIGITAL_PROTO_NFCF_RF_TECH \
        (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK)
 
+#define DIGITAL_PROTO_ISO15693_RF_TECH NFC_PROTO_ISO15693_MASK
+
 struct digital_cmd {
        struct list_head queue;
 
@@ -331,6 +333,18 @@ int digital_target_found(struct nfc_digital_dev *ddev,
                }
                break;
 
+       case NFC_PROTO_ISO15693:
+               framing = NFC_DIGITAL_FRAMING_ISO15693_T5T;
+               check_crc = digital_skb_check_crc_b;
+               add_crc = digital_skb_add_crc_b;
+               break;
+
+       case NFC_PROTO_ISO14443:
+               framing = NFC_DIGITAL_FRAMING_NFCA_T4T;
+               check_crc = digital_skb_check_crc_a;
+               add_crc = digital_skb_add_crc_a;
+               break;
+
        default:
                pr_err("Invalid protocol %d\n", protocol);
                return -EINVAL;
@@ -461,7 +475,7 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
                digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
                                      digital_in_send_sens_req);
 
-       if (im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
+       if (matching_im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
                digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
                                      digital_in_send_sensf_req);
 
@@ -469,7 +483,11 @@ static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
                                      digital_in_send_sensf_req);
        }
 
-       if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+       if (matching_im_protocols & DIGITAL_PROTO_ISO15693_RF_TECH)
+               digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_ISO15693,
+                                     digital_in_send_iso15693_inv_req);
+
+       if (matching_tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
                if (ddev->ops->tg_listen_mdaa) {
                        digital_add_poll_tech(ddev, 0,
                                              digital_tg_listen_mdaa);
@@ -607,20 +625,30 @@ static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg,
 
        if (IS_ERR(resp)) {
                rc = PTR_ERR(resp);
+               resp = NULL;
                goto done;
        }
 
-       if (ddev->curr_protocol == NFC_PROTO_MIFARE)
+       if (ddev->curr_protocol == NFC_PROTO_MIFARE) {
                rc = digital_in_recv_mifare_res(resp);
-       else
-               rc = ddev->skb_check_crc(resp);
+               /* crc check is done in digital_in_recv_mifare_res() */
+               goto done;
+       }
 
+       if (ddev->curr_protocol == NFC_PROTO_ISO14443) {
+               rc = digital_in_iso_dep_pull_sod(ddev, resp);
+               if (rc)
+                       goto done;
+       }
+
+       rc = ddev->skb_check_crc(resp);
+
+done:
        if (rc) {
                kfree_skb(resp);
                resp = NULL;
        }
 
-done:
        data_exch->cb(data_exch->cb_context, resp, rc);
 
        kfree(data_exch);
@@ -632,6 +660,7 @@ static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
 {
        struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
        struct digital_data_exch *data_exch;
+       int rc;
 
        data_exch = kzalloc(sizeof(struct digital_data_exch), GFP_KERNEL);
        if (!data_exch) {
@@ -642,13 +671,27 @@ static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
        data_exch->cb = cb;
        data_exch->cb_context = cb_context;
 
-       if (ddev->curr_protocol == NFC_PROTO_NFC_DEP)
-               return digital_in_send_dep_req(ddev, target, skb, data_exch);
+       if (ddev->curr_protocol == NFC_PROTO_NFC_DEP) {
+               rc = digital_in_send_dep_req(ddev, target, skb, data_exch);
+               goto exit;
+       }
+
+       if (ddev->curr_protocol == NFC_PROTO_ISO14443) {
+               rc = digital_in_iso_dep_push_sod(ddev, skb);
+               if (rc)
+                       goto exit;
+       }
 
        ddev->skb_add_crc(skb);
 
-       return digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete,
-                                  data_exch);
+       rc = digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete,
+                                data_exch);
+
+exit:
+       if (rc)
+               kfree(data_exch);
+
+       return rc;
 }
 
 static struct nfc_ops digital_nfc_ops = {
@@ -700,6 +743,10 @@ struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
                ddev->protocols |= NFC_PROTO_FELICA_MASK;
        if (supported_protocols & NFC_PROTO_NFC_DEP_MASK)
                ddev->protocols |= NFC_PROTO_NFC_DEP_MASK;
+       if (supported_protocols & NFC_PROTO_ISO15693_MASK)
+               ddev->protocols |= NFC_PROTO_ISO15693_MASK;
+       if (supported_protocols & NFC_PROTO_ISO14443_MASK)
+               ddev->protocols |= NFC_PROTO_ISO14443_MASK;
 
        ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN;
        ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN;
index 251c8c753ebe0f48be67beadff61ec10b8a843b1..278c3fed27e01f255374713ab2c0a545752671ee 100644 (file)
@@ -30,6 +30,7 @@
 
 #define DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res) (!((sel_res) & 0x04))
 #define DIGITAL_SEL_RES_IS_T2T(sel_res) (!((sel_res) & 0x60))
+#define DIGITAL_SEL_RES_IS_T4T(sel_res) ((sel_res) & 0x20)
 #define DIGITAL_SEL_RES_IS_NFC_DEP(sel_res) ((sel_res) & 0x40)
 
 #define DIGITAL_SENS_RES_IS_T1T(sens_res) (((sens_res) & 0x0C00) == 0x0C00)
 #define DIGITAL_SENSF_REQ_RC_SC   1
 #define DIGITAL_SENSF_REQ_RC_AP   2
 
+#define DIGITAL_CMD_ISO15693_INVENTORY_REQ     0x01
+
+#define DIGITAL_ISO15693_REQ_FLAG_DATA_RATE    BIT(1)
+#define DIGITAL_ISO15693_REQ_FLAG_INVENTORY    BIT(2)
+#define DIGITAL_ISO15693_REQ_FLAG_NB_SLOTS     BIT(5)
+#define DIGITAL_ISO15693_RES_FLAG_ERROR                BIT(0)
+#define DIGITAL_ISO15693_RES_IS_VALID(flags) \
+       (!((flags) & DIGITAL_ISO15693_RES_FLAG_ERROR))
+
+#define DIGITAL_ISO_DEP_I_PCB   0x02
+#define DIGITAL_ISO_DEP_PNI(pni) ((pni) & 0x01)
+
+#define DIGITAL_ISO_DEP_PCB_TYPE(pcb) ((pcb) & 0xC0)
+
+#define DIGITAL_ISO_DEP_I_BLOCK 0x00
+
+#define DIGITAL_ISO_DEP_BLOCK_HAS_DID(pcb) ((pcb) & 0x08)
+
+static const u8 digital_ats_fsc[] = {
+        16,  24,  32,  40,  48,  64,  96, 128,
+};
+
+#define DIGITAL_ATS_FSCI(t0) ((t0) & 0x0F)
+#define DIGITAL_ATS_MAX_FSC  256
+
+#define DIGITAL_RATS_BYTE1 0xE0
+#define DIGITAL_RATS_PARAM 0x80
+
 struct digital_sdd_res {
        u8 nfcid1[4];
        u8 bcc;
@@ -82,9 +111,127 @@ struct digital_sensf_res {
        u8 rd[2];
 } __packed;
 
+struct digital_iso15693_inv_req {
+       u8 flags;
+       u8 cmd;
+       u8 mask_len;
+       u64 mask;
+} __packed;
+
+struct digital_iso15693_inv_res {
+       u8 flags;
+       u8 dsfid;
+       u64 uid;
+} __packed;
+
 static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
                                   struct nfc_target *target);
 
+int digital_in_iso_dep_pull_sod(struct nfc_digital_dev *ddev,
+                               struct sk_buff *skb)
+{
+       u8 pcb;
+       u8 block_type;
+
+       if (skb->len < 1)
+               return -EIO;
+
+       pcb = *skb->data;
+       block_type = DIGITAL_ISO_DEP_PCB_TYPE(pcb);
+
+       /* No support fo R-block nor S-block */
+       if (block_type != DIGITAL_ISO_DEP_I_BLOCK) {
+               pr_err("ISO_DEP R-block and S-block not supported\n");
+               return -EIO;
+       }
+
+       if (DIGITAL_ISO_DEP_BLOCK_HAS_DID(pcb)) {
+               pr_err("DID field in ISO_DEP PCB not supported\n");
+               return -EIO;
+       }
+
+       skb_pull(skb, 1);
+
+       return 0;
+}
+
+int digital_in_iso_dep_push_sod(struct nfc_digital_dev *ddev,
+                               struct sk_buff *skb)
+{
+       /*
+        * Chaining not supported so skb->len + 1 PCB byte + 2 CRC bytes must
+        * not be greater than remote FSC
+        */
+       if (skb->len + 3 > ddev->target_fsc)
+               return -EIO;
+
+       skb_push(skb, 1);
+
+       *skb->data = DIGITAL_ISO_DEP_I_PCB | ddev->curr_nfc_dep_pni;
+
+       ddev->curr_nfc_dep_pni =
+               DIGITAL_ISO_DEP_PNI(ddev->curr_nfc_dep_pni + 1);
+
+       return 0;
+}
+
+static void digital_in_recv_ats(struct nfc_digital_dev *ddev, void *arg,
+                               struct sk_buff *resp)
+{
+       struct nfc_target *target = arg;
+       u8 fsdi;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (resp->len < 2) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       fsdi = DIGITAL_ATS_FSCI(resp->data[1]);
+       if (fsdi >= 8)
+               ddev->target_fsc = DIGITAL_ATS_MAX_FSC;
+       else
+               ddev->target_fsc = digital_ats_fsc[fsdi];
+
+       ddev->curr_nfc_dep_pni = 0;
+
+       rc = digital_target_found(ddev, target, NFC_PROTO_ISO14443);
+
+exit:
+       dev_kfree_skb(resp);
+       kfree(target);
+
+       if (rc)
+               digital_poll_next_tech(ddev);
+}
+
+static int digital_in_send_rats(struct nfc_digital_dev *ddev,
+                               struct nfc_target *target)
+{
+       int rc;
+       struct sk_buff *skb;
+
+       skb = digital_skb_alloc(ddev, 2);
+       if (!skb)
+               return -ENOMEM;
+
+       *skb_put(skb, 1) = DIGITAL_RATS_BYTE1;
+       *skb_put(skb, 1) = DIGITAL_RATS_PARAM;
+
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_ats,
+                                target);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
 static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
                                    struct sk_buff *resp)
 {
@@ -122,8 +269,19 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
                goto exit_free_skb;
        }
 
+       target->sel_res = sel_res;
+
        if (DIGITAL_SEL_RES_IS_T2T(sel_res)) {
                nfc_proto = NFC_PROTO_MIFARE;
+       } else if (DIGITAL_SEL_RES_IS_T4T(sel_res)) {
+               rc = digital_in_send_rats(ddev, target);
+               if (rc)
+                       goto exit;
+               /*
+                * Skip target_found and don't free it for now. This will be
+                * done when receiving the ATS
+                */
+               goto exit_free_skb;
        } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) {
                nfc_proto = NFC_PROTO_NFC_DEP;
        } else {
@@ -131,8 +289,6 @@ static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
                goto exit;
        }
 
-       target->sel_res = sel_res;
-
        rc = digital_target_found(ddev, target, nfc_proto);
 
 exit:
@@ -473,6 +629,93 @@ int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech)
        return rc;
 }
 
+static void digital_in_recv_iso15693_inv_res(struct nfc_digital_dev *ddev,
+               void *arg, struct sk_buff *resp)
+{
+       struct digital_iso15693_inv_res *res;
+       struct nfc_target *target = NULL;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto out_free_skb;
+       }
+
+       if (resp->len != sizeof(*res)) {
+               rc = -EIO;
+               goto out_free_skb;
+       }
+
+       res = (struct digital_iso15693_inv_res *)resp->data;
+
+       if (!DIGITAL_ISO15693_RES_IS_VALID(res->flags)) {
+               PROTOCOL_ERR("ISO15693 - 10.3.1");
+               rc = -EINVAL;
+               goto out_free_skb;
+       }
+
+       target = kzalloc(sizeof(*target), GFP_KERNEL);
+       if (!target) {
+               rc = -ENOMEM;
+               goto out_free_skb;
+       }
+
+       target->is_iso15693 = 1;
+       target->iso15693_dsfid = res->dsfid;
+       memcpy(target->iso15693_uid, &res->uid, sizeof(target->iso15693_uid));
+
+       rc = digital_target_found(ddev, target, NFC_PROTO_ISO15693);
+
+       kfree(target);
+
+out_free_skb:
+       dev_kfree_skb(resp);
+
+       if (rc)
+               digital_poll_next_tech(ddev);
+}
+
+int digital_in_send_iso15693_inv_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+       struct digital_iso15693_inv_req *req;
+       struct sk_buff *skb;
+       int rc;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+                                    NFC_DIGITAL_RF_TECH_ISO15693);
+       if (rc)
+               return rc;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_ISO15693_INVENTORY);
+       if (rc)
+               return rc;
+
+       skb = digital_skb_alloc(ddev, sizeof(*req));
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(*req) - sizeof(req->mask)); /* No mask */
+       req = (struct digital_iso15693_inv_req *)skb->data;
+
+       /* Single sub-carrier, high data rate, no AFI, single slot
+        * Inventory command
+        */
+       req->flags = DIGITAL_ISO15693_REQ_FLAG_DATA_RATE |
+                    DIGITAL_ISO15693_REQ_FLAG_INVENTORY |
+                    DIGITAL_ISO15693_REQ_FLAG_NB_SLOTS;
+       req->cmd = DIGITAL_CMD_ISO15693_INVENTORY_REQ;
+       req->mask_len = 0;
+
+       rc = digital_in_send_cmd(ddev, skb, 30,
+                                digital_in_recv_iso15693_inv_res, NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
 static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev)
 {
        struct sk_buff *skb;
index a07d2b81848781af7ace0becc7fa233726f34e8f..1b90c05318521c76703db7ca86607decf006cbe8 100644 (file)
 
 #include "llc.h"
 
-static struct list_head llc_engines;
+static LIST_HEAD(llc_engines);
 
 int nfc_llc_init(void)
 {
        int r;
 
-       INIT_LIST_HEAD(&llc_engines);
-
        r = nfc_llc_nop_register();
        if (r)
                goto exit;
index 6184bd1fba3a05c92fcefb45782d07ffbabac8ff..b486f12ae2433f175cdcf62af7284c812c9ea29e 100644 (file)
@@ -27,7 +27,7 @@
 
 static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
 
-static struct list_head llcp_devices;
+static LIST_HEAD(llcp_devices);
 
 static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb);
 
@@ -293,9 +293,9 @@ static void nfc_llcp_sdreq_timer(unsigned long data)
 
 struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
 {
-       struct nfc_llcp_local *local, *n;
+       struct nfc_llcp_local *local;
 
-       list_for_each_entry_safe(local, n, &llcp_devices, list)
+       list_for_each_entry(local, &llcp_devices, list)
                if (local->dev == dev)
                        return local;
 
@@ -609,14 +609,16 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
 
 int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
 {
-       struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
+       struct nfc_llcp_local *local;
+
+       if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN)
+               return -EINVAL;
 
+       local = nfc_llcp_find_local(dev);
        if (local == NULL) {
                pr_err("No LLCP device\n");
                return -ENODEV;
        }
-       if (gb_len < 3)
-               return -EINVAL;
 
        memset(local->remote_gb, 0, NFC_MAX_GT_LEN);
        memcpy(local->remote_gb, gb, gb_len);
@@ -1622,8 +1624,6 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
 
 int __init nfc_llcp_init(void)
 {
-       INIT_LIST_HEAD(&llcp_devices);
-
        return nfc_llcp_sock_init();
 }
 
index 56db888b1cd56785e23f0e0272d4ae3af87e32e6..6c34ac978501705a0c743289b34b2f62750bc441 100644 (file)
@@ -74,7 +74,7 @@ static int __nci_request(struct nci_dev *ndev,
 
        ndev->req_status = NCI_REQ_PEND;
 
-       init_completion(&ndev->req_completion);
+       reinit_completion(&ndev->req_completion);
        req(ndev, opt);
        completion_rc =
                wait_for_completion_interruptible_timeout(&ndev->req_completion,
@@ -709,6 +709,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
        ndev->ops = ops;
        ndev->tx_headroom = tx_headroom;
        ndev->tx_tailroom = tx_tailroom;
+       init_completion(&ndev->req_completion);
 
        ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
                                            supported_protocols,
index f1d426f10cceac10b0befc9f1c98caa6ad5b7793..ec250e77763a1d1e23f0560c151972ce274f82bc 100644 (file)
@@ -105,7 +105,7 @@ int nci_spi_send(struct nci_spi *nspi,
        if (ret != 0 || nspi->acknowledge_mode == NCI_SPI_CRC_DISABLED)
                goto done;
 
-       init_completion(&nspi->req_completion);
+       reinit_completion(&nspi->req_completion);
        completion_rc = wait_for_completion_interruptible_timeout(
                                                        &nspi->req_completion,
                                                        NCI_SPI_SEND_TIMEOUT);
@@ -145,6 +145,7 @@ struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
 
        nspi->spi = spi;
        nspi->ndev = ndev;
+       init_completion(&nspi->req_completion);
 
        return nspi;
 }
index ebbf6fb88b3596708597331ae630be2a7dcade6d..43cb1c17e267d0f614029bad9d3eebcf7b6f5e4c 100644 (file)
@@ -94,6 +94,14 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
                    target->sensf_res))
                goto nla_put_failure;
 
+       if (target->is_iso15693) {
+               if (nla_put_u8(msg, NFC_ATTR_TARGET_ISO15693_DSFID,
+                              target->iso15693_dsfid) ||
+                   nla_put(msg, NFC_ATTR_TARGET_ISO15693_UID,
+                           sizeof(target->iso15693_uid), target->iso15693_uid))
+                       goto nla_put_failure;
+       }
+
        return genlmsg_end(msg, hdr);
 
 nla_put_failure:
index 270b77dfac304afea375d4a5999b88cca863da90..a3276e3c4feb065278b5195652378157619ff94c 100644 (file)
@@ -256,10 +256,10 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
 
 out:
        /* Update datapath statistics. */
-       u64_stats_update_begin(&stats->sync);
+       u64_stats_update_begin(&stats->syncp);
        (*stats_counter)++;
        stats->n_mask_hit += n_mask_hit;
-       u64_stats_update_end(&stats->sync);
+       u64_stats_update_end(&stats->syncp);
 }
 
 static struct genl_family dp_packet_genl_family = {
@@ -295,9 +295,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
 err:
        stats = this_cpu_ptr(dp->stats_percpu);
 
-       u64_stats_update_begin(&stats->sync);
+       u64_stats_update_begin(&stats->syncp);
        stats->n_lost++;
-       u64_stats_update_end(&stats->sync);
+       u64_stats_update_end(&stats->syncp);
 
        return err;
 }
@@ -610,9 +610,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
                percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+                       start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
                        local_stats = *percpu_stats;
-               } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+               } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
 
                stats->n_hit += local_stats.n_hit;
                stats->n_missed += local_stats.n_missed;
@@ -1219,18 +1219,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        if (err)
                goto err_free_dp;
 
-       dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+       dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
        if (!dp->stats_percpu) {
                err = -ENOMEM;
                goto err_destroy_table;
        }
 
-       for_each_possible_cpu(i) {
-               struct dp_stats_percpu *dpath_stats;
-               dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
-               u64_stats_init(&dpath_stats->sync);
-       }
-
        dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
                            GFP_KERNEL);
        if (!dp->ports) {
index 6be9fbb5e9cbd57e11cce9ebac259d896db5a79c..05317380fc03a03af708716f162738727cab5fff 100644 (file)
@@ -55,7 +55,7 @@ struct dp_stats_percpu {
        u64 n_missed;
        u64 n_lost;
        u64 n_mask_hit;
-       struct u64_stats_sync sync;
+       struct u64_stats_sync syncp;
 };
 
 /**
index 208dd9a26dd16d003a0f49d4288f8704cbe15c8e..42c0f4a0b78c4c3033ab77d12c376590c228459d 100644 (file)
@@ -121,7 +121,6 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
 {
        struct vport *vport;
        size_t alloc_size;
-       int i;
 
        alloc_size = sizeof(struct vport);
        if (priv_size) {
@@ -139,19 +138,12 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
        vport->ops = ops;
        INIT_HLIST_NODE(&vport->dp_hash_node);
 
-       vport->percpu_stats = alloc_percpu(struct pcpu_sw_netstats);
+       vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!vport->percpu_stats) {
                kfree(vport);
                return ERR_PTR(-ENOMEM);
        }
 
-       for_each_possible_cpu(i) {
-               struct pcpu_sw_netstats *vport_stats;
-               vport_stats = per_cpu_ptr(vport->percpu_stats, i);
-               u64_stats_init(&vport_stats->syncp);
-       }
-
-
        spin_lock_init(&vport->stats_lock);
 
        return vport;
@@ -285,9 +277,9 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
                percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&percpu_stats->syncp);
+                       start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
                        local_stats = *percpu_stats;
-               } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start));
+               } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
 
                stats->rx_bytes         += local_stats.rx_bytes;
                stats->rx_packets       += local_stats.rx_packets;
index 48a6a93db29602c032fe254d4a1d41cbc7b3f051..01039d2b16955ad459c4e14362b940ef794beed1 100644 (file)
@@ -243,40 +243,40 @@ static int packet_direct_xmit(struct sk_buff *skb)
        const struct net_device_ops *ops = dev->netdev_ops;
        netdev_features_t features;
        struct netdev_queue *txq;
+       int ret = NETDEV_TX_BUSY;
        u16 queue_map;
-       int ret;
 
        if (unlikely(!netif_running(dev) ||
-                    !netif_carrier_ok(dev))) {
-               kfree_skb(skb);
-               return NET_XMIT_DROP;
-       }
+                    !netif_carrier_ok(dev)))
+               goto drop;
 
        features = netif_skb_features(skb);
        if (skb_needs_linearize(skb, features) &&
-           __skb_linearize(skb)) {
-               kfree_skb(skb);
-               return NET_XMIT_DROP;
-       }
+           __skb_linearize(skb))
+               goto drop;
 
        queue_map = skb_get_queue_mapping(skb);
        txq = netdev_get_tx_queue(dev, queue_map);
 
-       __netif_tx_lock_bh(txq);
-       if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
-               ret = NETDEV_TX_BUSY;
-               kfree_skb(skb);
-               goto out;
+       local_bh_disable();
+
+       HARD_TX_LOCK(dev, txq, smp_processor_id());
+       if (!netif_xmit_frozen_or_stopped(txq)) {
+               ret = ops->ndo_start_xmit(skb, dev);
+               if (ret == NETDEV_TX_OK)
+                       txq_trans_update(txq);
        }
+       HARD_TX_UNLOCK(dev, txq);
 
-       ret = ops->ndo_start_xmit(skb, dev);
-       if (likely(dev_xmit_complete(ret)))
-               txq_trans_update(txq);
-       else
+       local_bh_enable();
+
+       if (!dev_xmit_complete(ret))
                kfree_skb(skb);
-out:
-       __netif_tx_unlock_bh(txq);
+
        return ret;
+drop:
+       kfree_skb(skb);
+       return NET_XMIT_DROP;
 }
 
 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
@@ -1277,7 +1277,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
                                      struct sk_buff *skb,
                                      unsigned int num)
 {
-       return reciprocal_scale(skb->rxhash, num);
+       return reciprocal_scale(skb_get_hash(skb), num);
 }
 
 static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1362,7 +1362,6 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
                        if (!skb)
                                return 0;
                }
-               skb_get_hash(skb);
                idx = fanout_demux_hash(f, skb, num);
                break;
        case PACKET_FANOUT_LB:
@@ -2257,8 +2256,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        if (unlikely(!(dev->flags & IFF_UP)))
                goto out_put;
 
-       reserve = dev->hard_header_len;
-
+       reserve = dev->hard_header_len + VLAN_HLEN;
        size_max = po->tx_ring.frame_size
                - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
 
@@ -2285,8 +2283,19 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                        goto out_status;
 
                tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
-                               addr, hlen);
+                                         addr, hlen);
+               if (tp_len > dev->mtu + dev->hard_header_len) {
+                       struct ethhdr *ehdr;
+                       /* Earlier code assumed this would be a VLAN pkt,
+                        * double-check this now that we have the actual
+                        * packet in hand.
+                        */
 
+                       skb_reset_mac_header(skb);
+                       ehdr = eth_hdr(skb);
+                       if (ehdr->h_proto != htons(ETH_P_8021Q))
+                               tp_len = -EMSGSIZE;
+               }
                if (unlikely(tp_len < 0)) {
                        if (po->tp_loss) {
                                __packet_set_status(po, ph,
index 7826d46baa7038366872ec412d97ecd94eb1d13f..589935661d667d81b2f6159eb69c237f95329a63 100644 (file)
@@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr)
        ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
        /* due to this, we will claim to support IB devices unless we
           check node_type. */
-       if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
+       if (ret || !cm_id->device ||
+           cm_id->device->node_type != RDMA_NODE_RNIC)
                ret = -EADDRNOTAVAIL;
 
        rdsdebug("addr %pI4 ret %d node type %d\n",
index ed7e0b4e7f90730b7530bbcf20106deaeb3d48bd..b3b16c070a7fae95c6bc5a5bdd087ce436476c5a 100644 (file)
@@ -789,7 +789,8 @@ void rfkill_resume_polling(struct rfkill *rfkill)
        if (!rfkill->ops->poll)
                return;
 
-       schedule_work(&rfkill->poll_work.work);
+       queue_delayed_work(system_power_efficient_wq,
+                          &rfkill->poll_work, 0);
 }
 EXPORT_SYMBOL(rfkill_resume_polling);
 
@@ -894,7 +895,8 @@ static void rfkill_poll(struct work_struct *work)
         */
        rfkill->ops->poll(rfkill, rfkill->data);
 
-       schedule_delayed_work(&rfkill->poll_work,
+       queue_delayed_work(system_power_efficient_wq,
+               &rfkill->poll_work,
                round_jiffies_relative(POLL_INTERVAL));
 }
 
@@ -958,7 +960,8 @@ int __must_check rfkill_register(struct rfkill *rfkill)
        INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
 
        if (rfkill->ops->poll)
-               schedule_delayed_work(&rfkill->poll_work,
+               queue_delayed_work(system_power_efficient_wq,
+                       &rfkill->poll_work,
                        round_jiffies_relative(POLL_INTERVAL));
 
        if (!rfkill->persistent || rfkill_epo_lock_active) {
index d1c3429b69eddd49face1e8cc4967ad8afa07c7c..ec126f91276b349c7fe8e181ad4ce7aec24f0c67 100644 (file)
@@ -20,9 +20,8 @@ af-rxrpc-y := \
        ar-skbuff.o \
        ar-transport.o
 
-ifeq ($(CONFIG_PROC_FS),y)
-af-rxrpc-y += ar-proc.o
-endif
+af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o
+af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
 
 obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
 
index e61aa6001c65fb5c7521b02cb693a409f62fe1fe..7b1670489638e565c7ccea71961f8532986c94cb 100644 (file)
@@ -838,6 +838,12 @@ static int __init af_rxrpc_init(void)
                goto error_key_type_s;
        }
 
+       ret = rxrpc_sysctl_init();
+       if (ret < 0) {
+               printk(KERN_CRIT "RxRPC: Cannot register sysctls\n");
+               goto error_sysctls;
+       }
+
 #ifdef CONFIG_PROC_FS
        proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
        proc_create("rxrpc_conns", 0, init_net.proc_net,
@@ -845,6 +851,8 @@ static int __init af_rxrpc_init(void)
 #endif
        return 0;
 
+error_sysctls:
+       unregister_key_type(&key_type_rxrpc_s);
 error_key_type_s:
        unregister_key_type(&key_type_rxrpc);
 error_key_type:
@@ -865,6 +873,7 @@ error_call_jar:
 static void __exit af_rxrpc_exit(void)
 {
        _enter("");
+       rxrpc_sysctl_exit();
        unregister_key_type(&key_type_rxrpc_s);
        unregister_key_type(&key_type_rxrpc);
        sock_unregister(PF_RXRPC);
index cd97a0ce48d80f73efbf2476e272f737c787bad9..c6be17a959a6e4981ecfff38af85805df6d8b26e 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-static unsigned int rxrpc_ack_defer = 1;
+/*
+ * How long to wait before scheduling ACK generation after seeing a
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
+ */
+unsigned rxrpc_requested_ack_delay = 1;
+
+/*
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ *
+ * We use this when we've received new data packets.  If those packets aren't
+ * all consumed within this time we will send a DELAY ACK if an ACK was not
+ * requested to let the sender know it doesn't need to resend.
+ */
+unsigned rxrpc_soft_ack_delay = 1 * HZ;
+
+/*
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ *
+ * We use this when we've consumed some previously soft-ACK'd packets when
+ * further packets aren't immediately received to decide when to send an IDLE
+ * ACK let the other end know that it can free up its Tx buffer space.
+ */
+unsigned rxrpc_idle_ack_delay = 0.5 * HZ;
+
+/*
+ * Receive window size in packets.  This indicates the maximum number of
+ * unconsumed received packets we're willing to retain in memory.  Once this
+ * limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
+ * packets.
+ */
+unsigned rxrpc_rx_window_size = 32;
+
+/*
+ * Maximum Rx MTU size.  This indicates to the sender the size of jumbo packet
+ * made by gluing normal packets together that we're willing to handle.
+ */
+unsigned rxrpc_rx_mtu = 5692;
+
+/*
+ * The maximum number of fragments in a received jumbo packet that we tell the
+ * sender that we're willing to handle.
+ */
+unsigned rxrpc_rx_jumbo_max = 4;
 
 static const char *rxrpc_acks(u8 reason)
 {
@@ -82,24 +124,23 @@ void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
        switch (ack_reason) {
        case RXRPC_ACK_DELAY:
                _debug("run delay timer");
-               call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ;
-               add_timer(&call->ack_timer);
-               return;
+               expiry = rxrpc_soft_ack_delay;
+               goto run_timer;
 
        case RXRPC_ACK_IDLE:
                if (!immediate) {
                        _debug("run defer timer");
-                       expiry = 1;
+                       expiry = rxrpc_idle_ack_delay;
                        goto run_timer;
                }
                goto cancel_timer;
 
        case RXRPC_ACK_REQUESTED:
-               if (!rxrpc_ack_defer)
+               expiry = rxrpc_requested_ack_delay;
+               if (!expiry)
                        goto cancel_timer;
                if (!immediate || serial == cpu_to_be32(1)) {
                        _debug("run defer timer");
-                       expiry = rxrpc_ack_defer;
                        goto run_timer;
                }
 
@@ -1174,11 +1215,11 @@ send_ACK:
        mtu = call->conn->trans->peer->if_mtu;
        mtu -= call->conn->trans->peer->hdrsize;
        ackinfo.maxMTU  = htonl(mtu);
-       ackinfo.rwind   = htonl(32);
+       ackinfo.rwind   = htonl(rxrpc_rx_window_size);
 
        /* permit the peer to send us jumbo packets if it wants to */
-       ackinfo.rxMTU   = htonl(5692);
-       ackinfo.jumbo_max = htonl(4);
+       ackinfo.rxMTU   = htonl(rxrpc_rx_mtu);
+       ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
 
        hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
        _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
index a3bbb360a3f96e0ee4aa6aac2e7eea830305b564..a9e05db0f5d5900e93f87a8567e7533a1745c82a 100644 (file)
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/circ_buf.h>
+#include <linux/hashtable.h>
+#include <linux/spinlock_types.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
+/*
+ * Maximum lifetime of a call (in jiffies).
+ */
+unsigned rxrpc_max_call_lifetime = 60 * HZ;
+
+/*
+ * Time till dead call expires after last use (in jiffies).
+ */
+unsigned rxrpc_dead_call_expiry = 2 * HZ;
+
 const char *const rxrpc_call_states[] = {
        [RXRPC_CALL_CLIENT_SEND_REQUEST]        = "ClSndReq",
        [RXRPC_CALL_CLIENT_AWAIT_REPLY]         = "ClAwtRpl",
@@ -38,8 +50,6 @@ const char *const rxrpc_call_states[] = {
 struct kmem_cache *rxrpc_call_jar;
 LIST_HEAD(rxrpc_calls);
 DEFINE_RWLOCK(rxrpc_call_lock);
-static unsigned int rxrpc_call_max_lifetime = 60;
-static unsigned int rxrpc_dead_call_timeout = 2;
 
 static void rxrpc_destroy_call(struct work_struct *work);
 static void rxrpc_call_life_expired(unsigned long _call);
@@ -47,6 +57,145 @@ static void rxrpc_dead_call_expired(unsigned long _call);
 static void rxrpc_ack_time_expired(unsigned long _call);
 static void rxrpc_resend_time_expired(unsigned long _call);
 
+static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
+static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
+
+/*
+ * Hash function for rxrpc_call_hash
+ */
+static unsigned long rxrpc_call_hashfunc(
+       u8              clientflag,
+       __be32          cid,
+       __be32          call_id,
+       __be32          epoch,
+       __be16          service_id,
+       sa_family_t     proto,
+       void            *localptr,
+       unsigned int    addr_size,
+       const u8        *peer_addr)
+{
+       const u16 *p;
+       unsigned int i;
+       unsigned long key;
+       u32 hcid = ntohl(cid);
+
+       _enter("");
+
+       key = (unsigned long)localptr;
+       /* We just want to add up the __be32 values, so forcing the
+        * cast should be okay.
+        */
+       key += (__force u32)epoch;
+       key += (__force u16)service_id;
+       key += (__force u32)call_id;
+       key += (hcid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
+       key += hcid & RXRPC_CHANNELMASK;
+       key += clientflag;
+       key += proto;
+       /* Step through the peer address in 16-bit portions for speed */
+       for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
+               key += *p;
+       _leave(" key = 0x%lx", key);
+       return key;
+}
+
+/*
+ * Add a call to the hashtable
+ */
+static void rxrpc_call_hash_add(struct rxrpc_call *call)
+{
+       unsigned long key;
+       unsigned int addr_size = 0;
+
+       _enter("");
+       switch (call->proto) {
+       case AF_INET:
+               addr_size = sizeof(call->peer_ip.ipv4_addr);
+               break;
+       case AF_INET6:
+               addr_size = sizeof(call->peer_ip.ipv6_addr);
+               break;
+       default:
+               break;
+       }
+       key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
+                                 call->call_id, call->epoch,
+                                 call->service_id, call->proto,
+                                 call->conn->trans->local, addr_size,
+                                 call->peer_ip.ipv6_addr);
+       /* Store the full key in the call */
+       call->hash_key = key;
+       spin_lock(&rxrpc_call_hash_lock);
+       hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
+       spin_unlock(&rxrpc_call_hash_lock);
+       _leave("");
+}
+
+/*
+ * Remove a call from the hashtable
+ */
+static void rxrpc_call_hash_del(struct rxrpc_call *call)
+{
+       _enter("");
+       spin_lock(&rxrpc_call_hash_lock);
+       hash_del_rcu(&call->hash_node);
+       spin_unlock(&rxrpc_call_hash_lock);
+       _leave("");
+}
+
+/*
+ * Find a call in the hashtable and return it, or NULL if it
+ * isn't there.
+ */
+struct rxrpc_call *rxrpc_find_call_hash(
+       u8              clientflag,
+       __be32          cid,
+       __be32          call_id,
+       __be32          epoch,
+       __be16          service_id,
+       void            *localptr,
+       sa_family_t     proto,
+       const u8        *peer_addr)
+{
+       unsigned long key;
+       unsigned int addr_size = 0;
+       struct rxrpc_call *call = NULL;
+       struct rxrpc_call *ret = NULL;
+
+       _enter("");
+       switch (proto) {
+       case AF_INET:
+               addr_size = sizeof(call->peer_ip.ipv4_addr);
+               break;
+       case AF_INET6:
+               addr_size = sizeof(call->peer_ip.ipv6_addr);
+               break;
+       default:
+               break;
+       }
+
+       key = rxrpc_call_hashfunc(clientflag, cid, call_id, epoch,
+                                 service_id, proto, localptr, addr_size,
+                                 peer_addr);
+       hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
+               if (call->hash_key == key &&
+                   call->call_id == call_id &&
+                   call->cid == cid &&
+                   call->in_clientflag == clientflag &&
+                   call->service_id == service_id &&
+                   call->proto == proto &&
+                   call->local == localptr &&
+                   memcmp(call->peer_ip.ipv6_addr, peer_addr,
+                             addr_size) == 0 &&
+                   call->epoch == epoch) {
+                       ret = call;
+                       break;
+               }
+       }
+       _leave(" = %p", ret);
+       return ret;
+}
+
 /*
  * allocate a new call
  */
@@ -91,7 +240,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
        call->rx_data_expect = 1;
        call->rx_data_eaten = 0;
        call->rx_first_oos = 0;
-       call->ackr_win_top = call->rx_data_eaten + 1 + RXRPC_MAXACKS;
+       call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
        call->creation_jif = jiffies;
        return call;
 }
@@ -128,11 +277,31 @@ static struct rxrpc_call *rxrpc_alloc_client_call(
                return ERR_PTR(ret);
        }
 
+       /* Record copies of information for hashtable lookup */
+       call->proto = rx->proto;
+       call->local = trans->local;
+       switch (call->proto) {
+       case AF_INET:
+               call->peer_ip.ipv4_addr =
+                       trans->peer->srx.transport.sin.sin_addr.s_addr;
+               break;
+       case AF_INET6:
+               memcpy(call->peer_ip.ipv6_addr,
+                      trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
+                      sizeof(call->peer_ip.ipv6_addr));
+               break;
+       }
+       call->epoch = call->conn->epoch;
+       call->service_id = call->conn->service_id;
+       call->in_clientflag = call->conn->in_clientflag;
+       /* Add the new call to the hashtable */
+       rxrpc_call_hash_add(call);
+
        spin_lock(&call->conn->trans->peer->lock);
        list_add(&call->error_link, &call->conn->trans->peer->error_targets);
        spin_unlock(&call->conn->trans->peer->lock);
 
-       call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
+       call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
        add_timer(&call->lifetimer);
 
        _leave(" = %p", call);
@@ -320,9 +489,12 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
                parent = *p;
                call = rb_entry(parent, struct rxrpc_call, conn_node);
 
-               if (call_id < call->call_id)
+               /* The tree is sorted in order of the __be32 value without
+                * turning it into host order.
+                */
+               if ((__force u32)call_id < (__force u32)call->call_id)
                        p = &(*p)->rb_left;
-               else if (call_id > call->call_id)
+               else if ((__force u32)call_id > (__force u32)call->call_id)
                        p = &(*p)->rb_right;
                else
                        goto old_call;
@@ -347,9 +519,31 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
        list_add_tail(&call->link, &rxrpc_calls);
        write_unlock_bh(&rxrpc_call_lock);
 
+       /* Record copies of information for hashtable lookup */
+       call->proto = rx->proto;
+       call->local = conn->trans->local;
+       switch (call->proto) {
+       case AF_INET:
+               call->peer_ip.ipv4_addr =
+                       conn->trans->peer->srx.transport.sin.sin_addr.s_addr;
+               break;
+       case AF_INET6:
+               memcpy(call->peer_ip.ipv6_addr,
+                      conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
+                      sizeof(call->peer_ip.ipv6_addr));
+               break;
+       default:
+               break;
+       }
+       call->epoch = conn->epoch;
+       call->service_id = conn->service_id;
+       call->in_clientflag = conn->in_clientflag;
+       /* Add the new call to the hashtable */
+       rxrpc_call_hash_add(call);
+
        _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
 
-       call->lifetimer.expires = jiffies + rxrpc_call_max_lifetime * HZ;
+       call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
        add_timer(&call->lifetimer);
        _leave(" = %p {%d} [new]", call, call->debug_id);
        return call;
@@ -533,7 +727,7 @@ void rxrpc_release_call(struct rxrpc_call *call)
        del_timer_sync(&call->resend_timer);
        del_timer_sync(&call->ack_timer);
        del_timer_sync(&call->lifetimer);
-       call->deadspan.expires = jiffies + rxrpc_dead_call_timeout * HZ;
+       call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
        add_timer(&call->deadspan);
 
        _leave("");
@@ -665,6 +859,9 @@ static void rxrpc_cleanup_call(struct rxrpc_call *call)
                rxrpc_put_connection(call->conn);
        }
 
+       /* Remove the call from the hash */
+       rxrpc_call_hash_del(call);
+
        if (call->acks_window) {
                _debug("kill Tx window %d",
                       CIRC_CNT(call->acks_head, call->acks_tail,
index 7bf5b5b9e8b9400af1cbaecaeaf9a8c222670492..6631f4f1e39be713029c8b9b504db4ea741fb3e6 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
+/*
+ * Time till a connection expires after last use (in seconds).
+ */
+unsigned rxrpc_connection_expiry = 10 * 60;
+
 static void rxrpc_connection_reaper(struct work_struct *work);
 
 LIST_HEAD(rxrpc_connections);
 DEFINE_RWLOCK(rxrpc_connection_lock);
-static unsigned long rxrpc_connection_timeout = 10 * 60;
 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
 
 /*
@@ -862,7 +866,7 @@ static void rxrpc_connection_reaper(struct work_struct *work)
 
                spin_lock(&conn->trans->client_lock);
                write_lock(&conn->trans->conn_lock);
-               reap_time = conn->put_time + rxrpc_connection_timeout;
+               reap_time = conn->put_time + rxrpc_connection_expiry;
 
                if (atomic_read(&conn->usage) > 0) {
                        ;
@@ -916,7 +920,7 @@ void __exit rxrpc_destroy_all_connections(void)
 {
        _enter("");
 
-       rxrpc_connection_timeout = 0;
+       rxrpc_connection_expiry = 0;
        cancel_delayed_work(&rxrpc_connection_reap);
        rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
 
index a9206087b4d7a87ced5cef6c77b69fc2bb86a11c..db57458c824c87b463ceff200be22c07df936b73 100644 (file)
@@ -83,6 +83,7 @@ void rxrpc_UDP_error_report(struct sock *sk)
 
                if (mtu == 0) {
                        /* they didn't give us a size, estimate one */
+                       mtu = peer->if_mtu;
                        if (mtu > 1500) {
                                mtu >>= 1;
                                if (mtu < 1500)
index 529572f18d1fa56b71a3154c9d822445fffef358..73742647c1354ebc76cdc44289fcc5948ca188ac 100644 (file)
@@ -25,8 +25,6 @@
 #include <net/net_namespace.h>
 #include "ar-internal.h"
 
-unsigned long rxrpc_ack_timeout = 1;
-
 const char *rxrpc_pkts[] = {
        "?00",
        "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
@@ -349,8 +347,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
         * it */
        if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
                _proto("ACK Requested on %%%u", serial);
-               rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial,
-                                 !(sp->hdr.flags & RXRPC_MORE_PACKETS));
+               rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false);
        }
 
        switch (sp->hdr.type) {
@@ -526,36 +523,38 @@ protocol_error:
  * post an incoming packet to the appropriate call/socket to deal with
  * - must get rid of the sk_buff, either by freeing it or by queuing it
  */
-static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
+static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
                                      struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp;
-       struct rxrpc_call *call;
-       struct rb_node *p;
-       __be32 call_id;
-
-       _enter("%p,%p", conn, skb);
 
-       read_lock_bh(&conn->lock);
+       _enter("%p,%p", call, skb);
 
        sp = rxrpc_skb(skb);
 
-       /* look at extant calls by channel number first */
-       call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK];
-       if (!call || call->call_id != sp->hdr.callNumber)
-               goto call_not_extant;
-
        _debug("extant call [%d]", call->state);
-       ASSERTCMP(call->conn, ==, conn);
 
        read_lock(&call->state_lock);
        switch (call->state) {
        case RXRPC_CALL_LOCALLY_ABORTED:
-               if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+               if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) {
                        rxrpc_queue_call(call);
+                       goto free_unlock;
+               }
        case RXRPC_CALL_REMOTELY_ABORTED:
        case RXRPC_CALL_NETWORK_ERROR:
        case RXRPC_CALL_DEAD:
+               goto dead_call;
+       case RXRPC_CALL_COMPLETE:
+       case RXRPC_CALL_CLIENT_FINAL_ACK:
+               /* complete server call */
+               if (call->conn->in_clientflag)
+                       goto dead_call;
+               /* resend last packet of a completed call */
+               _debug("final ack again");
+               rxrpc_get_call(call);
+               set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
+               rxrpc_queue_call(call);
                goto free_unlock;
        default:
                break;
@@ -563,7 +562,6 @@ static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
 
        read_unlock(&call->state_lock);
        rxrpc_get_call(call);
-       read_unlock_bh(&conn->lock);
 
        if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
            sp->hdr.flags & RXRPC_JUMBO_PACKET)
@@ -574,78 +572,16 @@ static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
        rxrpc_put_call(call);
        goto done;
 
-call_not_extant:
-       /* search the completed calls in case what we're dealing with is
-        * there */
-       _debug("call not extant");
-
-       call_id = sp->hdr.callNumber;
-       p = conn->calls.rb_node;
-       while (p) {
-               call = rb_entry(p, struct rxrpc_call, conn_node);
-
-               if (call_id < call->call_id)
-                       p = p->rb_left;
-               else if (call_id > call->call_id)
-                       p = p->rb_right;
-               else
-                       goto found_completed_call;
-       }
-
 dead_call:
-       /* it's a either a really old call that we no longer remember or its a
-        * new incoming call */
-       read_unlock_bh(&conn->lock);
-
-       if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
-           sp->hdr.seq == cpu_to_be32(1)) {
-               _debug("incoming call");
-               skb_queue_tail(&conn->trans->local->accept_queue, skb);
-               rxrpc_queue_work(&conn->trans->local->acceptor);
-               goto done;
-       }
-
-       _debug("dead call");
-       skb->priority = RX_CALL_DEAD;
-       rxrpc_reject_packet(conn->trans->local, skb);
-       goto done;
-
-       /* resend last packet of a completed call
-        * - client calls may have been aborted or ACK'd
-        * - server calls may have been aborted
-        */
-found_completed_call:
-       _debug("completed call");
-
-       if (atomic_read(&call->usage) == 0)
-               goto dead_call;
-
-       /* synchronise any state changes */
-       read_lock(&call->state_lock);
-       ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK,
-                   call->state, >=, RXRPC_CALL_COMPLETE);
-
-       if (call->state == RXRPC_CALL_LOCALLY_ABORTED ||
-           call->state == RXRPC_CALL_REMOTELY_ABORTED ||
-           call->state == RXRPC_CALL_DEAD) {
-               read_unlock(&call->state_lock);
-               goto dead_call;
-       }
-
-       if (call->conn->in_clientflag) {
-               read_unlock(&call->state_lock);
-               goto dead_call; /* complete server call */
+       if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
+               skb->priority = RX_CALL_DEAD;
+               rxrpc_reject_packet(call->conn->trans->local, skb);
+               goto unlock;
        }
-
-       _debug("final ack again");
-       rxrpc_get_call(call);
-       set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
-       rxrpc_queue_call(call);
-
 free_unlock:
-       read_unlock(&call->state_lock);
-       read_unlock_bh(&conn->lock);
        rxrpc_free_skb(skb);
+unlock:
+       read_unlock(&call->state_lock);
 done:
        _leave("");
 }
@@ -664,17 +600,42 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
        rxrpc_queue_conn(conn);
 }
 
+static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local,
+                                              struct sk_buff *skb,
+                                              struct rxrpc_skb_priv *sp)
+{
+       struct rxrpc_peer *peer;
+       struct rxrpc_transport *trans;
+       struct rxrpc_connection *conn;
+
+       peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr,
+                               udp_hdr(skb)->source);
+       if (IS_ERR(peer))
+               goto cant_find_conn;
+
+       trans = rxrpc_find_transport(local, peer);
+       rxrpc_put_peer(peer);
+       if (!trans)
+               goto cant_find_conn;
+
+       conn = rxrpc_find_connection(trans, &sp->hdr);
+       rxrpc_put_transport(trans);
+       if (!conn)
+               goto cant_find_conn;
+
+       return conn;
+cant_find_conn:
+       return NULL;
+}
+
 /*
  * handle data received on the local endpoint
  * - may be called in interrupt context
  */
 void rxrpc_data_ready(struct sock *sk, int count)
 {
-       struct rxrpc_connection *conn;
-       struct rxrpc_transport *trans;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_local *local;
-       struct rxrpc_peer *peer;
        struct sk_buff *skb;
        int ret;
 
@@ -749,27 +710,34 @@ void rxrpc_data_ready(struct sock *sk, int count)
            (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
                goto bad_message;
 
-       peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source);
-       if (IS_ERR(peer))
-               goto cant_route_call;
+       if (sp->hdr.callNumber == 0) {
+               /* This is a connection-level packet. These should be
+                * fairly rare, so the extra overhead of looking them up the
+                * old-fashioned way doesn't really hurt */
+               struct rxrpc_connection *conn;
 
-       trans = rxrpc_find_transport(local, peer);
-       rxrpc_put_peer(peer);
-       if (!trans)
-               goto cant_route_call;
+               conn = rxrpc_conn_from_local(local, skb, sp);
+               if (!conn)
+                       goto cant_route_call;
 
-       conn = rxrpc_find_connection(trans, &sp->hdr);
-       rxrpc_put_transport(trans);
-       if (!conn)
-               goto cant_route_call;
-
-       _debug("CONN %p {%d}", conn, conn->debug_id);
-
-       if (sp->hdr.callNumber == 0)
+               _debug("CONN %p {%d}", conn, conn->debug_id);
                rxrpc_post_packet_to_conn(conn, skb);
-       else
-               rxrpc_post_packet_to_call(conn, skb);
-       rxrpc_put_connection(conn);
+               rxrpc_put_connection(conn);
+       } else {
+               struct rxrpc_call *call;
+               u8 in_clientflag = 0;
+
+               if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+                       in_clientflag = RXRPC_CLIENT_INITIATED;
+               call = rxrpc_find_call_hash(in_clientflag, sp->hdr.cid,
+                                           sp->hdr.callNumber, sp->hdr.epoch,
+                                           sp->hdr.serviceId, local, AF_INET,
+                                           (u8 *)&ip_hdr(skb)->saddr);
+               if (call)
+                       rxrpc_post_packet_to_call(call, skb);
+               else
+                       goto cant_route_call;
+       }
        rxrpc_put_local(local);
        return;
 
@@ -790,8 +758,10 @@ cant_route_call:
                skb->priority = RX_CALL_DEAD;
        }
 
-       _debug("reject");
-       rxrpc_reject_packet(local, skb);
+       if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
+               _debug("reject type %d",sp->hdr.type);
+               rxrpc_reject_packet(local, skb);
+       }
        rxrpc_put_local(local);
        _leave(" [no call]");
        return;
index 5f43675ee1df3822ecf98e432797f9b498047b58..c831d44b0841a07233c20881a1fc516ab425041d 100644 (file)
@@ -396,9 +396,20 @@ struct rxrpc_call {
 #define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
        unsigned long           ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
 
+       struct hlist_node       hash_node;
+       unsigned long           hash_key;       /* Full hash key */
+       u8                      in_clientflag;  /* Copy of conn->in_clientflag for hashing */
+       struct rxrpc_local      *local;         /* Local endpoint. Used for hashing. */
+       sa_family_t             proto;          /* Frame protocol */
        /* the following should all be in net order */
        __be32                  cid;            /* connection ID + channel index  */
        __be32                  call_id;        /* call ID on connection  */
+       __be32                  epoch;          /* epoch of this connection */
+       __be16                  service_id;     /* service ID */
+       union {                                 /* Peer IP address for hashing */
+               __be32  ipv4_addr;
+               __u8    ipv6_addr[16];          /* Anticipates eventual IPv6 support */
+       } peer_ip;
 };
 
 /*
@@ -433,6 +444,13 @@ int rxrpc_reject_call(struct rxrpc_sock *);
 /*
  * ar-ack.c
  */
+extern unsigned rxrpc_requested_ack_delay;
+extern unsigned rxrpc_soft_ack_delay;
+extern unsigned rxrpc_idle_ack_delay;
+extern unsigned rxrpc_rx_window_size;
+extern unsigned rxrpc_rx_mtu;
+extern unsigned rxrpc_rx_jumbo_max;
+
 void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
 void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
 void rxrpc_process_call(struct work_struct *);
@@ -440,10 +458,14 @@ void rxrpc_process_call(struct work_struct *);
 /*
  * ar-call.c
  */
+extern unsigned rxrpc_max_call_lifetime;
+extern unsigned rxrpc_dead_call_expiry;
 extern struct kmem_cache *rxrpc_call_jar;
 extern struct list_head rxrpc_calls;
 extern rwlock_t rxrpc_call_lock;
 
+struct rxrpc_call *rxrpc_find_call_hash(u8,  __be32, __be32, __be32,
+                                       __be16, void *, sa_family_t, const u8 *);
 struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
                                         struct rxrpc_transport *,
                                         struct rxrpc_conn_bundle *,
@@ -460,6 +482,7 @@ void __exit rxrpc_destroy_all_calls(void);
 /*
  * ar-connection.c
  */
+extern unsigned rxrpc_connection_expiry;
 extern struct list_head rxrpc_connections;
 extern rwlock_t rxrpc_connection_lock;
 
@@ -493,7 +516,6 @@ void rxrpc_UDP_error_handler(struct work_struct *);
 /*
  * ar-input.c
  */
-extern unsigned long rxrpc_ack_timeout;
 extern const char *rxrpc_pkts[];
 
 void rxrpc_data_ready(struct sock *, int);
@@ -504,6 +526,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
  * ar-local.c
  */
 extern rwlock_t rxrpc_local_lock;
+
 struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
 void rxrpc_put_local(struct rxrpc_local *);
 void __exit rxrpc_destroy_all_locals(void);
@@ -522,7 +545,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
 /*
  * ar-output.c
  */
-extern int rxrpc_resend_timeout;
+extern unsigned rxrpc_resend_timeout;
 
 int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
 int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
@@ -572,6 +595,8 @@ void rxrpc_packet_destructor(struct sk_buff *);
 /*
  * ar-transport.c
  */
+extern unsigned rxrpc_transport_expiry;
+
 struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
                                            struct rxrpc_peer *, gfp_t);
 void rxrpc_put_transport(struct rxrpc_transport *);
@@ -579,6 +604,17 @@ void __exit rxrpc_destroy_all_transports(void);
 struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
                                             struct rxrpc_peer *);
 
+/*
+ * sysctl.c
+ */
+#ifdef CONFIG_SYSCTL
+extern int __init rxrpc_sysctl_init(void);
+extern void rxrpc_sysctl_exit(void);
+#else
+static inline int __init rxrpc_sysctl_init(void) { return 0; }
+static inline void rxrpc_sysctl_exit(void) {}
+#endif
+
 /*
  * debug tracing
  */
index d0e8f1c1898a092437bb035208c9c616f16a17ae..0b4b9a79f5abd0fb24e413551550aba25e7ce8e4 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-int rxrpc_resend_timeout = 4;
+/*
+ * Time till packet resend (in jiffies).
+ */
+unsigned rxrpc_resend_timeout = 4 * HZ;
 
 static int rxrpc_send_data(struct kiocb *iocb,
                           struct rxrpc_sock *rx,
@@ -487,7 +490,7 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
               ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
 
        sp->need_resend = false;
-       sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
+       sp->resend_at = jiffies + rxrpc_resend_timeout;
        if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
                _debug("run timer");
                call->resend_timer.expires = sp->resend_at;
@@ -666,6 +669,7 @@ static int rxrpc_send_data(struct kiocb *iocb,
                /* add the packet to the send queue if it's now full */
                if (sp->remain <= 0 || (segment == 0 && !more)) {
                        struct rxrpc_connection *conn = call->conn;
+                       uint32_t seq;
                        size_t pad;
 
                        /* pad out if we're using security */
@@ -678,11 +682,12 @@ static int rxrpc_send_data(struct kiocb *iocb,
                                        memset(skb_put(skb, pad), 0, pad);
                        }
 
+                       seq = atomic_inc_return(&call->sequence);
+
                        sp->hdr.epoch = conn->epoch;
                        sp->hdr.cid = call->cid;
                        sp->hdr.callNumber = call->call_id;
-                       sp->hdr.seq =
-                               htonl(atomic_inc_return(&call->sequence));
+                       sp->hdr.seq = htonl(seq);
                        sp->hdr.serial =
                                htonl(atomic_inc_return(&conn->serial));
                        sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
@@ -697,6 +702,8 @@ static int rxrpc_send_data(struct kiocb *iocb,
                        else if (CIRC_SPACE(call->acks_head, call->acks_tail,
                                            call->acks_winsz) > 1)
                                sp->hdr.flags |= RXRPC_MORE_PACKETS;
+                       if (more && seq & 1)
+                               sp->hdr.flags |= RXRPC_REQUEST_ACK;
 
                        ret = rxrpc_secure_packet(
                                call, skb, skb->mark,
index 34b5490dde655ccdbac5dcbbc7b0df1b88ab42ca..e9aaa65c07784cf9991f5bef279e4dab49a6912f 100644 (file)
@@ -180,16 +180,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
                if (copy > len - copied)
                        copy = len - copied;
 
-               if (skb->ip_summed == CHECKSUM_UNNECESSARY ||
-                   skb->ip_summed == CHECKSUM_PARTIAL) {
-                       ret = skb_copy_datagram_iovec(skb, offset,
-                                                     msg->msg_iov, copy);
-               } else {
-                       ret = skb_copy_and_csum_datagram_iovec(skb, offset,
-                                                              msg->msg_iov);
-                       if (ret == -EINVAL)
-                               goto csum_copy_error;
-               }
+               ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy);
 
                if (ret < 0)
                        goto copy_error;
@@ -348,20 +339,6 @@ copy_error:
        _leave(" = %d", ret);
        return ret;
 
-csum_copy_error:
-       _debug("csum error");
-       release_sock(&rx->sk);
-       if (continue_call)
-               rxrpc_put_call(continue_call);
-       rxrpc_kill_skb(skb);
-       if (!(flags & MSG_PEEK)) {
-               if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
-                       BUG();
-       }
-       skb_kill_datagram(&rx->sk, skb, flags);
-       rxrpc_put_call(call);
-       return -EAGAIN;
-
 wait_interrupted:
        ret = sock_intr_errno(timeo);
 wait_error:
index de755e04d29ce1495a0f0c9459b487da85c0b413..4cfab49e329dbba1a2cfa05c1faeff66fd016031 100644 (file)
@@ -83,9 +83,14 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
                rxrpc_request_final_ACK(call);
        } else if (atomic_dec_and_test(&call->ackr_not_idle) &&
                   test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
+               /* We previously soft-ACK'd some received packets that have now
+                * been consumed, so send a hard-ACK if no more packets are
+                * immediately forthcoming to allow the transmitter to free up
+                * its Tx bufferage.
+                */
                _debug("send Rx idle ACK");
                __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial,
-                                   true);
+                                   false);
        }
 
        spin_unlock_bh(&call->lock);
index 92df566930b9a05c23d109cb9e42806698cbb535..1976dec84f297cfb126df6bcd53129f1d518001b 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
+/*
+ * Time after last use at which transport record is cleaned up.
+ */
+unsigned rxrpc_transport_expiry = 3600 * 24;
+
 static void rxrpc_transport_reaper(struct work_struct *work);
 
 static LIST_HEAD(rxrpc_transports);
 static DEFINE_RWLOCK(rxrpc_transport_lock);
-static unsigned long rxrpc_transport_timeout = 3600 * 24;
 static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
 
 /*
@@ -235,7 +239,7 @@ static void rxrpc_transport_reaper(struct work_struct *work)
                if (likely(atomic_read(&trans->usage) > 0))
                        continue;
 
-               reap_time = trans->put_time + rxrpc_transport_timeout;
+               reap_time = trans->put_time + rxrpc_transport_expiry;
                if (reap_time <= now)
                        list_move_tail(&trans->link, &graveyard);
                else if (reap_time < earliest)
@@ -271,7 +275,7 @@ void __exit rxrpc_destroy_all_transports(void)
 {
        _enter("");
 
-       rxrpc_transport_timeout = 0;
+       rxrpc_transport_expiry = 0;
        cancel_delayed_work(&rxrpc_transport_reap);
        rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
 
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
new file mode 100644 (file)
index 0000000..50a98a9
--- /dev/null
@@ -0,0 +1,146 @@
+/* sysctls for configuring RxRPC operating parameters
+ *
+ * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sysctl.h>
+#include <net/sock.h>
+#include <net/af_rxrpc.h>
+#include "ar-internal.h"
+
+static struct ctl_table_header *rxrpc_sysctl_reg_table;
+static const unsigned zero = 0;
+static const unsigned one = 1;
+static const unsigned four = 4;
+static const unsigned n_65535 = 65535;
+static const unsigned n_max_acks = RXRPC_MAXACKS;
+
+/*
+ * RxRPC operating parameters.
+ *
+ * See Documentation/networking/rxrpc.txt and the variable definitions for more
+ * information on the individual parameters.
+ */
+static struct ctl_table rxrpc_sysctl_table[] = {
+       /* Values measured in milliseconds */
+       {
+               .procname       = "req_ack_delay",
+               .data           = &rxrpc_requested_ack_delay,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_ms_jiffies,
+               .extra1         = (void *)&zero,
+       },
+       {
+               .procname       = "soft_ack_delay",
+               .data           = &rxrpc_soft_ack_delay,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_ms_jiffies,
+               .extra1         = (void *)&one,
+       },
+       {
+               .procname       = "idle_ack_delay",
+               .data           = &rxrpc_idle_ack_delay,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_ms_jiffies,
+               .extra1         = (void *)&one,
+       },
+       {
+               .procname       = "resend_timeout",
+               .data           = &rxrpc_resend_timeout,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_ms_jiffies,
+               .extra1         = (void *)&one,
+       },
+
+       /* Values measured in seconds but used in jiffies */
+       {
+               .procname       = "max_call_lifetime",
+               .data           = &rxrpc_max_call_lifetime,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+               .extra1         = (void *)&one,
+       },
+       {
+               .procname       = "dead_call_expiry",
+               .data           = &rxrpc_dead_call_expiry,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+               .extra1         = (void *)&one,
+       },
+
+       /* Values measured in seconds */
+       {
+               .procname       = "connection_expiry",
+               .data           = &rxrpc_connection_expiry,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (void *)&one,
+       },
+       {
+               .procname       = "transport_expiry",
+               .data           = &rxrpc_transport_expiry,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (void *)&one,
+       },
+
+       /* Non-time values */
+       {
+               .procname       = "rx_window_size",
+               .data           = &rxrpc_rx_window_size,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (void *)&one,
+               .extra2         = (void *)&n_max_acks,
+       },
+       {
+               .procname       = "rx_mtu",
+               .data           = &rxrpc_rx_mtu,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (void *)&one,
+               .extra1         = (void *)&n_65535,
+       },
+       {
+               .procname       = "rx_jumbo_max",
+               .data           = &rxrpc_rx_jumbo_max,
+               .maxlen         = sizeof(unsigned),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (void *)&one,
+               .extra2         = (void *)&four,
+       },
+
+       { }
+};
+
+int __init rxrpc_sysctl_init(void)
+{
+       rxrpc_sysctl_reg_table = register_net_sysctl(&init_net, "net/rxrpc",
+                                                    rxrpc_sysctl_table);
+       if (!rxrpc_sysctl_reg_table)
+               return -ENOMEM;
+       return 0;
+}
+
+void rxrpc_sysctl_exit(void)
+{
+       if (rxrpc_sysctl_reg_table)
+               unregister_net_sysctl_table(rxrpc_sysctl_reg_table);
+}
index 72bdc71663458e8cc131ab392b6af7673f35207d..8a5ba5add4bcd60e59a9b2468df88812212012f4 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+void tcf_hash_destroy(struct tc_action *a)
 {
+       struct tcf_common *p = a->priv;
+       struct tcf_hashinfo *hinfo = a->ops->hinfo;
+
        spin_lock_bh(&hinfo->lock);
        hlist_del(&p->tcfc_head);
        spin_unlock_bh(&hinfo->lock);
@@ -42,18 +45,22 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
 }
 EXPORT_SYMBOL(tcf_hash_destroy);
 
-int tcf_hash_release(struct tcf_common *p, int bind,
-                    struct tcf_hashinfo *hinfo)
+int tcf_hash_release(struct tc_action *a, int bind)
 {
+       struct tcf_common *p = a->priv;
        int ret = 0;
 
        if (p) {
                if (bind)
                        p->tcfc_bindcnt--;
+               else if (p->tcfc_bindcnt > 0)
+                       return -EPERM;
 
                p->tcfc_refcnt--;
                if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
-                       tcf_hash_destroy(p, hinfo);
+                       if (a->ops->cleanup)
+                               a->ops->cleanup(a, bind);
+                       tcf_hash_destroy(a);
                        ret = 1;
                }
        }
@@ -118,6 +125,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
        struct tcf_common *p;
        struct nlattr *nest;
        int i = 0, n_i = 0;
+       int ret = -EINVAL;
 
        nest = nla_nest_start(skb, a->order);
        if (nest == NULL)
@@ -127,10 +135,13 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
        for (i = 0; i < (hinfo->hmask + 1); i++) {
                head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
                hlist_for_each_entry_safe(p, n, head, tcfc_head) {
-                       if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) {
+                       a->priv = p;
+                       ret = tcf_hash_release(a, 0);
+                       if (ret == ACT_P_DELETED) {
                                module_put(a->ops->owner);
                                n_i++;
-                       }
+                       } else if (ret < 0)
+                               goto nla_put_failure;
                }
        }
        if (nla_put_u32(skb, TCA_FCNT, n_i))
@@ -140,7 +151,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
        return n_i;
 nla_put_failure:
        nla_nest_cancel(skb, nest);
-       return -EINVAL;
+       return ret;
 }
 
 static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
@@ -198,7 +209,7 @@ int tcf_hash_search(struct tc_action *a, u32 index)
 }
 EXPORT_SYMBOL(tcf_hash_search);
 
-struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind)
+int tcf_hash_check(u32 index, struct tc_action *a, int bind)
 {
        struct tcf_hashinfo *hinfo = a->ops->hinfo;
        struct tcf_common *p = NULL;
@@ -207,19 +218,30 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind)
                        p->tcfc_bindcnt++;
                p->tcfc_refcnt++;
                a->priv = p;
+               return 1;
        }
-       return p;
+       return 0;
 }
 EXPORT_SYMBOL(tcf_hash_check);
 
-struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
-                                  struct tc_action *a, int size, int bind)
+void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
+{
+       struct tcf_common *pc = a->priv;
+       if (est)
+               gen_kill_estimator(&pc->tcfc_bstats,
+                                  &pc->tcfc_rate_est);
+       kfree_rcu(pc, tcfc_rcu);
+}
+EXPORT_SYMBOL(tcf_hash_cleanup);
+
+int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+                   int size, int bind)
 {
        struct tcf_hashinfo *hinfo = a->ops->hinfo;
        struct tcf_common *p = kzalloc(size, GFP_KERNEL);
 
        if (unlikely(!p))
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        p->tcfc_refcnt = 1;
        if (bind)
                p->tcfc_bindcnt = 1;
@@ -234,17 +256,19 @@ struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
                                            &p->tcfc_lock, est);
                if (err) {
                        kfree(p);
-                       return ERR_PTR(err);
+                       return err;
                }
        }
 
        a->priv = (void *) p;
-       return p;
+       return 0;
 }
 EXPORT_SYMBOL(tcf_hash_create);
 
-void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+void tcf_hash_insert(struct tc_action *a)
 {
+       struct tcf_common *p = a->priv;
+       struct tcf_hashinfo *hinfo = a->ops->hinfo;
        unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
 
        spin_lock_bh(&hinfo->lock);
@@ -256,12 +280,13 @@ EXPORT_SYMBOL(tcf_hash_insert);
 static LIST_HEAD(act_base);
 static DEFINE_RWLOCK(act_mod_lock);
 
-int tcf_register_action(struct tc_action_ops *act)
+int tcf_register_action(struct tc_action_ops *act, unsigned int mask)
 {
        struct tc_action_ops *a;
+       int err;
 
-       /* Must supply act, dump, cleanup and init */
-       if (!act->act || !act->dump || !act->cleanup || !act->init)
+       /* Must supply act, dump and init */
+       if (!act->act || !act->dump || !act->init)
                return -EINVAL;
 
        /* Supply defaults */
@@ -270,10 +295,21 @@ int tcf_register_action(struct tc_action_ops *act)
        if (!act->walk)
                act->walk = tcf_generic_walker;
 
+       act->hinfo = kmalloc(sizeof(struct tcf_hashinfo), GFP_KERNEL);
+       if (!act->hinfo)
+               return -ENOMEM;
+       err = tcf_hashinfo_init(act->hinfo, mask);
+       if (err) {
+               kfree(act->hinfo);
+               return err;
+       }
+
        write_lock(&act_mod_lock);
        list_for_each_entry(a, &act_base, head) {
                if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
                        write_unlock(&act_mod_lock);
+                       tcf_hashinfo_destroy(act->hinfo);
+                       kfree(act->hinfo);
                        return -EEXIST;
                }
        }
@@ -292,6 +328,8 @@ int tcf_unregister_action(struct tc_action_ops *act)
        list_for_each_entry(a, &act_base, head) {
                if (a == act) {
                        list_del(&act->head);
+                       tcf_hashinfo_destroy(act->hinfo);
+                       kfree(act->hinfo);
                        err = 0;
                        break;
                }
@@ -368,16 +406,21 @@ exec_done:
 }
 EXPORT_SYMBOL(tcf_action_exec);
 
-void tcf_action_destroy(struct list_head *actions, int bind)
+int tcf_action_destroy(struct list_head *actions, int bind)
 {
        struct tc_action *a, *tmp;
+       int ret = 0;
 
        list_for_each_entry_safe(a, tmp, actions, list) {
-               if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
+               ret = tcf_hash_release(a, bind);
+               if (ret == ACT_P_DELETED)
                        module_put(a->ops->owner);
+               else if (ret < 0)
+                       return ret;
                list_del(&a->list);
                kfree(a);
        }
+       return ret;
 }
 
 int
@@ -642,6 +685,20 @@ act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
        return rtnl_unicast(skb, net, portid);
 }
 
+static struct tc_action *create_a(int i)
+{
+       struct tc_action *act;
+
+       act = kzalloc(sizeof(*act), GFP_KERNEL);
+       if (act == NULL) {
+               pr_debug("create_a: failed to alloc!\n");
+               return NULL;
+       }
+       act->order = i;
+       INIT_LIST_HEAD(&act->list);
+       return act;
+}
+
 static struct tc_action *
 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
 {
@@ -661,11 +718,10 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
        index = nla_get_u32(tb[TCA_ACT_INDEX]);
 
        err = -ENOMEM;
-       a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
+       a = create_a(0);
        if (a == NULL)
                goto err_out;
 
-       INIT_LIST_HEAD(&a->list);
        err = -EINVAL;
        a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
        if (a->ops == NULL) /* could happen in batch of actions */
@@ -695,20 +751,6 @@ static void cleanup_a(struct list_head *actions)
        }
 }
 
-static struct tc_action *create_a(int i)
-{
-       struct tc_action *act;
-
-       act = kzalloc(sizeof(*act), GFP_KERNEL);
-       if (act == NULL) {
-               pr_debug("create_a: failed to alloc!\n");
-               return NULL;
-       }
-       act->order = i;
-       INIT_LIST_HEAD(&act->list);
-       return act;
-}
-
 static int tca_action_flush(struct net *net, struct nlattr *nla,
                            struct nlmsghdr *n, u32 portid)
 {
@@ -720,18 +762,12 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        struct nlattr *nest;
        struct nlattr *tb[TCA_ACT_MAX + 1];
        struct nlattr *kind;
-       struct tc_action *a = create_a(0);
+       struct tc_action a;
        int err = -ENOMEM;
 
-       if (a == NULL) {
-               pr_debug("tca_action_flush: couldnt create tc_action\n");
-               return err;
-       }
-
        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb) {
                pr_debug("tca_action_flush: failed skb alloc\n");
-               kfree(a);
                return err;
        }
 
@@ -743,8 +779,10 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
 
        err = -EINVAL;
        kind = tb[TCA_ACT_KIND];
-       a->ops = tc_lookup_action(kind);
-       if (a->ops == NULL) /*some idjot trying to flush unknown action */
+       memset(&a, 0, sizeof(struct tc_action));
+       INIT_LIST_HEAD(&a.list);
+       a.ops = tc_lookup_action(kind);
+       if (a.ops == NULL) /*some idjot trying to flush unknown action */
                goto err_out;
 
        nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
@@ -759,7 +797,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        if (nest == NULL)
                goto out_module_put;
 
-       err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
+       err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a);
        if (err < 0)
                goto out_module_put;
        if (err == 0)
@@ -769,8 +807,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
 
        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
        nlh->nlmsg_flags |= NLM_F_ROOT;
-       module_put(a->ops->owner);
-       kfree(a);
+       module_put(a.ops->owner);
        err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                             n->nlmsg_flags & NLM_F_ECHO);
        if (err > 0)
@@ -779,11 +816,10 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
        return err;
 
 out_module_put:
-       module_put(a->ops->owner);
+       module_put(a.ops->owner);
 err_out:
 noflush_out:
        kfree_skb(skb);
-       kfree(a);
        return err;
 }
 
@@ -805,7 +841,11 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
        }
 
        /* now do the delete */
-       tcf_action_destroy(actions, 0);
+       ret = tcf_action_destroy(actions, 0);
+       if (ret < 0) {
+               kfree_skb(skb);
+               return ret;
+       }
 
        ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
                             n->nlmsg_flags & NLM_F_ECHO);
index 2210187c45c2772a9f21dae081af50eeabcf6366..edbf40dac709df3e88f33948a1c77800dbe25b75 100644 (file)
@@ -37,7 +37,6 @@
 #include <net/tc_act/tc_csum.h>
 
 #define CSUM_TAB_MASK 15
-static struct tcf_hashinfo csum_hash_info;
 
 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
        [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
@@ -48,7 +47,6 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
 {
        struct nlattr *tb[TCA_CSUM_MAX + 1];
        struct tc_csum *parm;
-       struct tcf_common *pc;
        struct tcf_csum *p;
        int ret = 0, err;
 
@@ -63,38 +61,31 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
                return -EINVAL;
        parm = nla_data(tb[TCA_CSUM_PARMS]);
 
-       pc = tcf_hash_check(parm->index, a, bind);
-       if (!pc) {
-               pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
+       if (!tcf_hash_check(parm->index, a, bind)) {
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               if (ret)
+                       return ret;
                ret = ACT_P_CREATED;
        } else {
                if (bind)/* dont override defaults */
                        return 0;
-               tcf_hash_release(pc, bind, a->ops->hinfo);
+               tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
        }
 
-       p = to_tcf_csum(pc);
+       p = to_tcf_csum(a);
        spin_lock_bh(&p->tcf_lock);
        p->tcf_action = parm->action;
        p->update_flags = parm->update_flags;
        spin_unlock_bh(&p->tcf_lock);
 
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
 
        return ret;
 }
 
-static int tcf_csum_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_csum *p = a->priv;
-       return tcf_hash_release(&p->common, bind, &csum_hash_info);
-}
-
 /**
  * tcf_csum_skb_nextlayer - Get next layer pointer
  * @skb: sk_buff to use
@@ -569,12 +560,10 @@ nla_put_failure:
 
 static struct tc_action_ops act_csum_ops = {
        .kind           = "csum",
-       .hinfo          = &csum_hash_info,
        .type           = TCA_ACT_CSUM,
        .owner          = THIS_MODULE,
        .act            = tcf_csum,
        .dump           = tcf_csum_dump,
-       .cleanup        = tcf_csum_cleanup,
        .init           = tcf_csum_init,
 };
 
@@ -583,11 +572,7 @@ MODULE_LICENSE("GPL");
 
 static int __init csum_init_module(void)
 {
-       int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
-       if (err)
-               return err;
-
-       return tcf_register_action(&act_csum_ops);
+       return tcf_register_action(&act_csum_ops, CSUM_TAB_MASK);
 }
 
 static void __exit csum_cleanup_module(void)
index a0eed30d58111ab49e11f9cc97c4dc0d30e93c9e..d6bcbd9f7791eb25cd250358b21b73b7bb80c3a0 100644 (file)
@@ -24,7 +24,6 @@
 #include <net/tc_act/tc_gact.h>
 
 #define GACT_TAB_MASK  15
-static struct tcf_hashinfo gact_hash_info;
 
 #ifdef CONFIG_GACT_PROB
 static int gact_net_rand(struct tcf_gact *gact)
@@ -57,7 +56,6 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_GACT_MAX + 1];
        struct tc_gact *parm;
        struct tcf_gact *gact;
-       struct tcf_common *pc;
        int ret = 0;
        int err;
 #ifdef CONFIG_GACT_PROB
@@ -86,21 +84,20 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
        }
 #endif
 
-       pc = tcf_hash_check(parm->index, a, bind);
-       if (!pc) {
-               pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
+       if (!tcf_hash_check(parm->index, a, bind)) {
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+               if (ret)
+                       return ret;
                ret = ACT_P_CREATED;
        } else {
                if (bind)/* dont override defaults */
                        return 0;
-               tcf_hash_release(pc, bind, a->ops->hinfo);
+               tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
        }
 
-       gact = to_gact(pc);
+       gact = to_gact(a);
 
        spin_lock_bh(&gact->tcf_lock);
        gact->tcf_action = parm->action;
@@ -113,19 +110,10 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 #endif
        spin_unlock_bh(&gact->tcf_lock);
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
        return ret;
 }
 
-static int tcf_gact_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_gact *gact = a->priv;
-
-       if (gact)
-               return tcf_hash_release(&gact->common, bind, a->ops->hinfo);
-       return 0;
-}
-
 static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
                    struct tcf_result *res)
 {
@@ -191,12 +179,10 @@ nla_put_failure:
 
 static struct tc_action_ops act_gact_ops = {
        .kind           =       "gact",
-       .hinfo          =       &gact_hash_info,
        .type           =       TCA_ACT_GACT,
        .owner          =       THIS_MODULE,
        .act            =       tcf_gact,
        .dump           =       tcf_gact_dump,
-       .cleanup        =       tcf_gact_cleanup,
        .init           =       tcf_gact_init,
 };
 
@@ -206,21 +192,17 @@ MODULE_LICENSE("GPL");
 
 static int __init gact_init_module(void)
 {
-       int err = tcf_hashinfo_init(&gact_hash_info, GACT_TAB_MASK);
-       if (err)
-               return err;
 #ifdef CONFIG_GACT_PROB
        pr_info("GACT probability on\n");
 #else
        pr_info("GACT probability NOT on\n");
 #endif
-       return tcf_register_action(&act_gact_ops);
+       return tcf_register_action(&act_gact_ops, GACT_TAB_MASK);
 }
 
 static void __exit gact_cleanup_module(void)
 {
        tcf_unregister_action(&act_gact_ops);
-       tcf_hashinfo_destroy(&gact_hash_info);
 }
 
 module_init(gact_init_module);
index 0a6d621740273ffddec7dffc1837f483be8bb0e2..8a64a0734aeebd29894111b4e93d288076b1df35 100644 (file)
@@ -29,7 +29,6 @@
 
 
 #define IPT_TAB_MASK     15
-static struct tcf_hashinfo ipt_hash_info;
 
 static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
 {
@@ -69,22 +68,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
        module_put(par.target->me);
 }
 
-static int tcf_ipt_release(struct tcf_ipt *ipt, int bind)
+static void tcf_ipt_release(struct tc_action *a, int bind)
 {
-       int ret = 0;
-       if (ipt) {
-               if (bind)
-                       ipt->tcf_bindcnt--;
-               ipt->tcf_refcnt--;
-               if (ipt->tcf_bindcnt <= 0 && ipt->tcf_refcnt <= 0) {
-                       ipt_destroy_target(ipt->tcfi_t);
-                       kfree(ipt->tcfi_tname);
-                       kfree(ipt->tcfi_t);
-                       tcf_hash_destroy(&ipt->common, &ipt_hash_info);
-                       ret = ACT_P_DELETED;
-               }
-       }
-       return ret;
+       struct tcf_ipt *ipt = to_ipt(a);
+       ipt_destroy_target(ipt->tcfi_t);
+       kfree(ipt->tcfi_tname);
+       kfree(ipt->tcfi_t);
 }
 
 static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
@@ -99,7 +88,6 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
 {
        struct nlattr *tb[TCA_IPT_MAX + 1];
        struct tcf_ipt *ipt;
-       struct tcf_common *pc;
        struct xt_entry_target *td, *t;
        char *tname;
        int ret = 0, err;
@@ -125,21 +113,20 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        if (tb[TCA_IPT_INDEX] != NULL)
                index = nla_get_u32(tb[TCA_IPT_INDEX]);
 
-       pc = tcf_hash_check(index, a, bind);
-       if (!pc) {
-               pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
+       if (!tcf_hash_check(index, a, bind) ) {
+               ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+               if (ret)
+                       return ret;
                ret = ACT_P_CREATED;
        } else {
                if (bind)/* dont override defaults */
                        return 0;
-               tcf_ipt_release(to_ipt(pc), bind);
+               tcf_hash_release(a, bind);
 
                if (!ovr)
                        return -EEXIST;
        }
-       ipt = to_ipt(pc);
+       ipt = to_ipt(a);
 
        hook = nla_get_u32(tb[TCA_IPT_HOOK]);
 
@@ -170,7 +157,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        ipt->tcfi_hook  = hook;
        spin_unlock_bh(&ipt->tcf_lock);
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
        return ret;
 
 err3:
@@ -178,21 +165,11 @@ err3:
 err2:
        kfree(tname);
 err1:
-       if (ret == ACT_P_CREATED) {
-               if (est)
-                       gen_kill_estimator(&pc->tcfc_bstats,
-                                          &pc->tcfc_rate_est);
-               kfree_rcu(pc, tcfc_rcu);
-       }
+       if (ret == ACT_P_CREATED)
+               tcf_hash_cleanup(a, est);
        return err;
 }
 
-static int tcf_ipt_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_ipt *ipt = a->priv;
-       return tcf_ipt_release(ipt, bind);
-}
-
 static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
                   struct tcf_result *res)
 {
@@ -284,23 +261,21 @@ nla_put_failure:
 
 static struct tc_action_ops act_ipt_ops = {
        .kind           =       "ipt",
-       .hinfo          =       &ipt_hash_info,
        .type           =       TCA_ACT_IPT,
        .owner          =       THIS_MODULE,
        .act            =       tcf_ipt,
        .dump           =       tcf_ipt_dump,
-       .cleanup        =       tcf_ipt_cleanup,
+       .cleanup        =       tcf_ipt_release,
        .init           =       tcf_ipt_init,
 };
 
 static struct tc_action_ops act_xt_ops = {
        .kind           =       "xt",
-       .hinfo          =       &ipt_hash_info,
        .type           =       TCA_ACT_XT,
        .owner          =       THIS_MODULE,
        .act            =       tcf_ipt,
        .dump           =       tcf_ipt_dump,
-       .cleanup        =       tcf_ipt_cleanup,
+       .cleanup        =       tcf_ipt_release,
        .init           =       tcf_ipt_init,
 };
 
@@ -311,20 +286,16 @@ MODULE_ALIAS("act_xt");
 
 static int __init ipt_init_module(void)
 {
-       int ret1, ret2, err;
-       err = tcf_hashinfo_init(&ipt_hash_info, IPT_TAB_MASK);
-       if (err)
-               return err;
+       int ret1, ret2;
 
-       ret1 = tcf_register_action(&act_xt_ops);
+       ret1 = tcf_register_action(&act_xt_ops, IPT_TAB_MASK);
        if (ret1 < 0)
                printk("Failed to load xt action\n");
-       ret2 = tcf_register_action(&act_ipt_ops);
+       ret2 = tcf_register_action(&act_ipt_ops, IPT_TAB_MASK);
        if (ret2 < 0)
                printk("Failed to load ipt action\n");
 
        if (ret1 < 0 && ret2 < 0) {
-               tcf_hashinfo_destroy(&ipt_hash_info);
                return ret1;
        } else
                return 0;
@@ -334,7 +305,6 @@ static void __exit ipt_cleanup_module(void)
 {
        tcf_unregister_action(&act_xt_ops);
        tcf_unregister_action(&act_ipt_ops);
-       tcf_hashinfo_destroy(&ipt_hash_info);
 }
 
 module_init(ipt_init_module);
index 0b2c6d39d39671f9f96f57528e35588816a62e88..4f912c0e225b674dfa117060ee547b99feb972ca 100644 (file)
 
 #define MIRRED_TAB_MASK     7
 static LIST_HEAD(mirred_list);
-static struct tcf_hashinfo mirred_hash_info;
 
-static int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static void tcf_mirred_release(struct tc_action *a, int bind)
 {
-       if (m) {
-               if (bind)
-                       m->tcf_bindcnt--;
-               m->tcf_refcnt--;
-               if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
-                       list_del(&m->tcfm_list);
-                       if (m->tcfm_dev)
-                               dev_put(m->tcfm_dev);
-                       tcf_hash_destroy(&m->common, &mirred_hash_info);
-                       return 1;
-               }
-       }
-       return 0;
+       struct tcf_mirred *m = to_mirred(a);
+       list_del(&m->tcfm_list);
+       if (m->tcfm_dev)
+               dev_put(m->tcfm_dev);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -61,7 +51,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_MIRRED_MAX + 1];
        struct tc_mirred *parm;
        struct tcf_mirred *m;
-       struct tcf_common *pc;
        struct net_device *dev;
        int ret, ok_push = 0;
 
@@ -101,21 +90,20 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                dev = NULL;
        }
 
-       pc = tcf_hash_check(parm->index, a, bind);
-       if (!pc) {
+       if (!tcf_hash_check(parm->index, a, bind)) {
                if (dev == NULL)
                        return -EINVAL;
-               pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+               if (ret)
+                       return ret;
                ret = ACT_P_CREATED;
        } else {
                if (!ovr) {
-                       tcf_mirred_release(to_mirred(pc), bind);
+                       tcf_hash_release(a, bind);
                        return -EEXIST;
                }
        }
-       m = to_mirred(pc);
+       m = to_mirred(a);
 
        spin_lock_bh(&m->tcf_lock);
        m->tcf_action = parm->action;
@@ -131,21 +119,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        spin_unlock_bh(&m->tcf_lock);
        if (ret == ACT_P_CREATED) {
                list_add(&m->tcfm_list, &mirred_list);
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
        }
 
        return ret;
 }
 
-static int tcf_mirred_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_mirred *m = a->priv;
-
-       if (m)
-               return tcf_mirred_release(m, bind);
-       return 0;
-}
-
 static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
                      struct tcf_result *res)
 {
@@ -254,12 +233,11 @@ static struct notifier_block mirred_device_notifier = {
 
 static struct tc_action_ops act_mirred_ops = {
        .kind           =       "mirred",
-       .hinfo          =       &mirred_hash_info,
        .type           =       TCA_ACT_MIRRED,
        .owner          =       THIS_MODULE,
        .act            =       tcf_mirred,
        .dump           =       tcf_mirred_dump,
-       .cleanup        =       tcf_mirred_cleanup,
+       .cleanup        =       tcf_mirred_release,
        .init           =       tcf_mirred_init,
 };
 
@@ -273,19 +251,13 @@ static int __init mirred_init_module(void)
        if (err)
                return err;
 
-       err = tcf_hashinfo_init(&mirred_hash_info, MIRRED_TAB_MASK);
-       if (err) {
-               unregister_netdevice_notifier(&mirred_device_notifier);
-               return err;
-       }
        pr_info("Mirror/redirect action on\n");
-       return tcf_register_action(&act_mirred_ops);
+       return tcf_register_action(&act_mirred_ops, MIRRED_TAB_MASK);
 }
 
 static void __exit mirred_cleanup_module(void)
 {
        tcf_unregister_action(&act_mirred_ops);
-       tcf_hashinfo_destroy(&mirred_hash_info);
        unregister_netdevice_notifier(&mirred_device_notifier);
 }
 
index 81f0404bb33555244faf74504af4a8becf00d65a..270a030d5fd099ee7b6f6d74d51b6015aa690647 100644 (file)
@@ -31,8 +31,6 @@
 
 #define NAT_TAB_MASK   15
 
-static struct tcf_hashinfo nat_hash_info;
-
 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
        [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
 };
@@ -44,7 +42,6 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        struct tc_nat *parm;
        int ret = 0, err;
        struct tcf_nat *p;
-       struct tcf_common *pc;
 
        if (nla == NULL)
                return -EINVAL;
@@ -57,20 +54,19 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                return -EINVAL;
        parm = nla_data(tb[TCA_NAT_PARMS]);
 
-       pc = tcf_hash_check(parm->index, a, bind);
-       if (!pc) {
-               pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
+       if (!tcf_hash_check(parm->index, a, bind)) {
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               if (ret)
+                       return ret;
                ret = ACT_P_CREATED;
        } else {
                if (bind)
                        return 0;
-               tcf_hash_release(pc, bind, a->ops->hinfo);
+               tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
        }
-       p = to_tcf_nat(pc);
+       p = to_tcf_nat(a);
 
        spin_lock_bh(&p->tcf_lock);
        p->old_addr = parm->old_addr;
@@ -82,18 +78,11 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        spin_unlock_bh(&p->tcf_lock);
 
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
 
        return ret;
 }
 
-static int tcf_nat_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_nat *p = a->priv;
-
-       return tcf_hash_release(&p->common, bind, &nat_hash_info);
-}
-
 static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
                   struct tcf_result *res)
 {
@@ -293,12 +282,10 @@ nla_put_failure:
 
 static struct tc_action_ops act_nat_ops = {
        .kind           =       "nat",
-       .hinfo          =       &nat_hash_info,
        .type           =       TCA_ACT_NAT,
        .owner          =       THIS_MODULE,
        .act            =       tcf_nat,
        .dump           =       tcf_nat_dump,
-       .cleanup        =       tcf_nat_cleanup,
        .init           =       tcf_nat_init,
 };
 
@@ -307,16 +294,12 @@ MODULE_LICENSE("GPL");
 
 static int __init nat_init_module(void)
 {
-       int err = tcf_hashinfo_init(&nat_hash_info, NAT_TAB_MASK);
-       if (err)
-               return err;
-       return tcf_register_action(&act_nat_ops);
+       return tcf_register_action(&act_nat_ops, NAT_TAB_MASK);
 }
 
 static void __exit nat_cleanup_module(void)
 {
        tcf_unregister_action(&act_nat_ops);
-       tcf_hashinfo_destroy(&nat_hash_info);
 }
 
 module_init(nat_init_module);
index be3f0f6875bb1b594f35685336d41d1a3fe62515..5f9bcb2e080bcd666c7c374a837916a227a0e41c 100644 (file)
@@ -25,8 +25,6 @@
 
 #define PEDIT_TAB_MASK 15
 
-static struct tcf_hashinfo pedit_hash_info;
-
 static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
        [TCA_PEDIT_PARMS]       = { .len = sizeof(struct tc_pedit) },
 };
@@ -39,7 +37,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        struct tc_pedit *parm;
        int ret = 0, err;
        struct tcf_pedit *p;
-       struct tcf_common *pc;
        struct tc_pedit_key *keys = NULL;
        int ksize;
 
@@ -57,26 +54,22 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize)
                return -EINVAL;
 
-       pc = tcf_hash_check(parm->index, a, bind);
-       if (!pc) {
+       if (!tcf_hash_check(parm->index, a, bind)) {
                if (!parm->nkeys)
                        return -EINVAL;
-               pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
-               p = to_pedit(pc);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               if (ret)
+                       return ret;
+               p = to_pedit(a);
                keys = kmalloc(ksize, GFP_KERNEL);
                if (keys == NULL) {
-                       if (est)
-                               gen_kill_estimator(&pc->tcfc_bstats,
-                                                  &pc->tcfc_rate_est);
-                       kfree_rcu(pc, tcfc_rcu);
+                       tcf_hash_cleanup(a, est);
                        return -ENOMEM;
                }
                ret = ACT_P_CREATED;
        } else {
-               p = to_pedit(pc);
-               tcf_hash_release(pc, bind, a->ops->hinfo);
+               p = to_pedit(a);
+               tcf_hash_release(a, bind);
                if (bind)
                        return 0;
                if (!ovr)
@@ -100,22 +93,15 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        memcpy(p->tcfp_keys, parm->keys, ksize);
        spin_unlock_bh(&p->tcf_lock);
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
        return ret;
 }
 
-static int tcf_pedit_cleanup(struct tc_action *a, int bind)
+static void tcf_pedit_cleanup(struct tc_action *a, int bind)
 {
        struct tcf_pedit *p = a->priv;
-
-       if (p) {
-               struct tc_pedit_key *keys = p->tcfp_keys;
-               if (tcf_hash_release(&p->common, bind, &pedit_hash_info)) {
-                       kfree(keys);
-                       return 1;
-               }
-       }
-       return 0;
+       struct tc_pedit_key *keys = p->tcfp_keys;
+       kfree(keys);
 }
 
 static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
@@ -230,7 +216,6 @@ nla_put_failure:
 
 static struct tc_action_ops act_pedit_ops = {
        .kind           =       "pedit",
-       .hinfo          =       &pedit_hash_info,
        .type           =       TCA_ACT_PEDIT,
        .owner          =       THIS_MODULE,
        .act            =       tcf_pedit,
@@ -245,15 +230,11 @@ MODULE_LICENSE("GPL");
 
 static int __init pedit_init_module(void)
 {
-       int err = tcf_hashinfo_init(&pedit_hash_info, PEDIT_TAB_MASK);
-       if (err)
-               return err;
-       return tcf_register_action(&act_pedit_ops);
+       return tcf_register_action(&act_pedit_ops, PEDIT_TAB_MASK);
 }
 
 static void __exit pedit_cleanup_module(void)
 {
-       tcf_hashinfo_destroy(&pedit_hash_info);
        tcf_unregister_action(&act_pedit_ops);
 }
 
index 1778209a332fa1ac251c02beaa97a49cfec747f3..0566e4606a4ac86710ebbc04a80c9ad1729849cd 100644 (file)
@@ -41,7 +41,6 @@ struct tcf_police {
        container_of(pc, struct tcf_police, common)
 
 #define POL_TAB_MASK     15
-static struct tcf_hashinfo police_hash_info;
 
 /* old policer structure from before tc actions */
 struct tc_police_compat {
@@ -234,7 +233,7 @@ override:
 
        police->tcfp_t_c = ktime_to_ns(ktime_get());
        police->tcf_index = parm->index ? parm->index :
-               tcf_hash_new_index(a->ops->hinfo);
+               tcf_hash_new_index(hinfo);
        h = tcf_hash(police->tcf_index, POL_TAB_MASK);
        spin_lock_bh(&hinfo->lock);
        hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
@@ -253,14 +252,6 @@ failure:
        return err;
 }
 
-static int tcf_act_police_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_police *p = a->priv;
-       if (p)
-               return tcf_hash_release(&p->common, bind, &police_hash_info);
-       return 0;
-}
-
 static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
                          struct tcf_result *res)
 {
@@ -357,12 +348,10 @@ MODULE_LICENSE("GPL");
 
 static struct tc_action_ops act_police_ops = {
        .kind           =       "police",
-       .hinfo          =       &police_hash_info,
        .type           =       TCA_ID_POLICE,
        .owner          =       THIS_MODULE,
        .act            =       tcf_act_police,
        .dump           =       tcf_act_police_dump,
-       .cleanup        =       tcf_act_police_cleanup,
        .init           =       tcf_act_police_locate,
        .walk           =       tcf_act_police_walker
 };
@@ -370,19 +359,12 @@ static struct tc_action_ops act_police_ops = {
 static int __init
 police_init_module(void)
 {
-       int err = tcf_hashinfo_init(&police_hash_info, POL_TAB_MASK);
-       if (err)
-               return err;
-       err = tcf_register_action(&act_police_ops);
-       if (err)
-               tcf_hashinfo_destroy(&police_hash_info);
-       return err;
+       return tcf_register_action(&act_police_ops, POL_TAB_MASK);
 }
 
 static void __exit
 police_cleanup_module(void)
 {
-       tcf_hashinfo_destroy(&police_hash_info);
        tcf_unregister_action(&act_police_ops);
 }
 
index 8ef2f1fcbfba20d9dcf853f21e1101b0ff363108..992c2317ce8871c84c45f2d387c5a3b5408d11fb 100644 (file)
@@ -25,7 +25,6 @@
 #include <net/tc_act/tc_defact.h>
 
 #define SIMP_TAB_MASK     7
-static struct tcf_hashinfo simp_hash_info;
 
 #define SIMP_MAX_DATA  32
 static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
@@ -47,20 +46,10 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
        return d->tcf_action;
 }
 
-static int tcf_simp_release(struct tcf_defact *d, int bind)
+static void tcf_simp_release(struct tc_action *a, int bind)
 {
-       int ret = 0;
-       if (d) {
-               if (bind)
-                       d->tcf_bindcnt--;
-               d->tcf_refcnt--;
-               if (d->tcf_bindcnt <= 0 && d->tcf_refcnt <= 0) {
-                       kfree(d->tcfd_defdata);
-                       tcf_hash_destroy(&d->common, &simp_hash_info);
-                       ret = 1;
-               }
-       }
-       return ret;
+       struct tcf_defact *d = to_defact(a);
+       kfree(d->tcfd_defdata);
 }
 
 static int alloc_defdata(struct tcf_defact *d, char *defdata)
@@ -94,7 +83,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_DEF_MAX + 1];
        struct tc_defact *parm;
        struct tcf_defact *d;
-       struct tcf_common *pc;
        char *defdata;
        int ret = 0, err;
 
@@ -114,29 +102,25 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        parm = nla_data(tb[TCA_DEF_PARMS]);
        defdata = nla_data(tb[TCA_DEF_DATA]);
 
-       pc = tcf_hash_check(parm->index, a, bind);
-       if (!pc) {
-               pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
+       if (!tcf_hash_check(parm->index, a, bind)) {
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+               if (ret)
+                       return ret;
 
-               d = to_defact(pc);
+               d = to_defact(a);
                ret = alloc_defdata(d, defdata);
                if (ret < 0) {
-                       if (est)
-                               gen_kill_estimator(&pc->tcfc_bstats,
-                                                  &pc->tcfc_rate_est);
-                       kfree_rcu(pc, tcfc_rcu);
+                       tcf_hash_cleanup(a, est);
                        return ret;
                }
                d->tcf_action = parm->action;
                ret = ACT_P_CREATED;
        } else {
-               d = to_defact(pc);
+               d = to_defact(a);
 
                if (bind)
                        return 0;
-               tcf_simp_release(d, bind);
+               tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
 
@@ -144,19 +128,10 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        }
 
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
        return ret;
 }
 
-static int tcf_simp_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_defact *d = a->priv;
-
-       if (d)
-               return tcf_simp_release(d, bind);
-       return 0;
-}
-
 static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
                         int bind, int ref)
 {
@@ -187,12 +162,11 @@ nla_put_failure:
 
 static struct tc_action_ops act_simp_ops = {
        .kind           =       "simple",
-       .hinfo          =       &simp_hash_info,
        .type           =       TCA_ACT_SIMP,
        .owner          =       THIS_MODULE,
        .act            =       tcf_simp,
        .dump           =       tcf_simp_dump,
-       .cleanup        =       tcf_simp_cleanup,
+       .cleanup        =       tcf_simp_release,
        .init           =       tcf_simp_init,
 };
 
@@ -202,23 +176,15 @@ MODULE_LICENSE("GPL");
 
 static int __init simp_init_module(void)
 {
-       int err, ret;
-       err = tcf_hashinfo_init(&simp_hash_info, SIMP_TAB_MASK);
-       if (err)
-               return err;
-
-       ret = tcf_register_action(&act_simp_ops);
+       int ret;
+       ret = tcf_register_action(&act_simp_ops, SIMP_TAB_MASK);
        if (!ret)
                pr_info("Simple TC action Loaded\n");
-       else
-               tcf_hashinfo_destroy(&simp_hash_info);
-
        return ret;
 }
 
 static void __exit simp_cleanup_module(void)
 {
-       tcf_hashinfo_destroy(&simp_hash_info);
        tcf_unregister_action(&act_simp_ops);
 }
 
index 98725080b5aa73a1d402a704a5a316eead4410a8..fcfeeaf838beb9e75f07f7cbda7fb2b73237a17f 100644 (file)
@@ -28,7 +28,6 @@
 #include <net/tc_act/tc_skbedit.h>
 
 #define SKBEDIT_TAB_MASK     15
-static struct tcf_hashinfo skbedit_hash_info;
 
 static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
                       struct tcf_result *res)
@@ -65,7 +64,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
        struct tc_skbedit *parm;
        struct tcf_skbedit *d;
-       struct tcf_common *pc;
        u32 flags = 0, *priority = NULL, *mark = NULL;
        u16 *queue_mapping = NULL;
        int ret = 0, err;
@@ -100,19 +98,18 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
 
        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
-       pc = tcf_hash_check(parm->index, a, bind);
-       if (!pc) {
-               pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
-               if (IS_ERR(pc))
-                       return PTR_ERR(pc);
+       if (!tcf_hash_check(parm->index, a, bind)) {
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+               if (ret)
+                       return ret;
 
-               d = to_skbedit(pc);
+               d = to_skbedit(a);
                ret = ACT_P_CREATED;
        } else {
-               d = to_skbedit(pc);
+               d = to_skbedit(a);
                if (bind)
                        return 0;
-               tcf_hash_release(pc, bind, a->ops->hinfo);
+               tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
        }
@@ -132,19 +129,10 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        spin_unlock_bh(&d->tcf_lock);
 
        if (ret == ACT_P_CREATED)
-               tcf_hash_insert(pc, a->ops->hinfo);
+               tcf_hash_insert(a);
        return ret;
 }
 
-static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
-{
-       struct tcf_skbedit *d = a->priv;
-
-       if (d)
-               return tcf_hash_release(&d->common, bind, &skbedit_hash_info);
-       return 0;
-}
-
 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
                            int bind, int ref)
 {
@@ -186,12 +174,10 @@ nla_put_failure:
 
 static struct tc_action_ops act_skbedit_ops = {
        .kind           =       "skbedit",
-       .hinfo          =       &skbedit_hash_info,
        .type           =       TCA_ACT_SKBEDIT,
        .owner          =       THIS_MODULE,
        .act            =       tcf_skbedit,
        .dump           =       tcf_skbedit_dump,
-       .cleanup        =       tcf_skbedit_cleanup,
        .init           =       tcf_skbedit_init,
 };
 
@@ -201,15 +187,11 @@ MODULE_LICENSE("GPL");
 
 static int __init skbedit_init_module(void)
 {
-       int err = tcf_hashinfo_init(&skbedit_hash_info, SKBEDIT_TAB_MASK);
-       if (err)
-               return err;
-       return tcf_register_action(&act_skbedit_ops);
+       return tcf_register_action(&act_skbedit_ops, SKBEDIT_TAB_MASK);
 }
 
 static void __exit skbedit_cleanup_module(void)
 {
-       tcf_hashinfo_destroy(&skbedit_hash_info);
        tcf_unregister_action(&act_skbedit_ops);
 }
 
index a366537f82c6b7981472cdcc51e977a139f3b540..63a3ce75c02ee959fe67d6b203e01420d86b03ad 100644 (file)
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
-#define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
+#define HTSIZE 256
 
 struct fw_head {
-       struct fw_filter *ht[HTSIZE];
-       u32 mask;
+       u32                     mask;
+       struct fw_filter        *ht[HTSIZE];
 };
 
 struct fw_filter {
@@ -46,30 +46,11 @@ struct fw_filter {
        struct tcf_exts         exts;
 };
 
-static inline int fw_hash(u32 handle)
+static u32 fw_hash(u32 handle)
 {
-       if (HTSIZE == 4096)
-               return ((handle >> 24) & 0xFFF) ^
-                      ((handle >> 12) & 0xFFF) ^
-                      (handle & 0xFFF);
-       else if (HTSIZE == 2048)
-               return ((handle >> 22) & 0x7FF) ^
-                      ((handle >> 11) & 0x7FF) ^
-                      (handle & 0x7FF);
-       else if (HTSIZE == 1024)
-               return ((handle >> 20) & 0x3FF) ^
-                      ((handle >> 10) & 0x3FF) ^
-                      (handle & 0x3FF);
-       else if (HTSIZE == 512)
-               return (handle >> 27) ^
-                      ((handle >> 18) & 0x1FF) ^
-                      ((handle >> 9) & 0x1FF) ^
-                      (handle & 0x1FF);
-       else if (HTSIZE == 256) {
-               u8 *t = (u8 *) &handle;
-               return t[0] ^ t[1] ^ t[2] ^ t[3];
-       } else
-               return handle & (HTSIZE - 1);
+       handle ^= (handle >> 16);
+       handle ^= (handle >> 8);
+       return handle % HTSIZE;
 }
 
 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
index a07d55e75698cf52068ebd54fb651a45d264817a..a0b84e0e22deb4c9e998499b1e202ebaafda95f8 100644 (file)
@@ -1304,6 +1304,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
        struct gnet_dump d;
        struct qdisc_size_table *stab;
 
+       cond_resched();
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
@@ -1435,9 +1436,9 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
        s_idx = cb->args[0];
        s_q_idx = q_idx = cb->args[1];
 
-       rcu_read_lock();
        idx = 0;
-       for_each_netdev_rcu(net, dev) {
+       ASSERT_RTNL();
+       for_each_netdev(net, dev) {
                struct netdev_queue *dev_queue;
 
                if (idx < s_idx)
@@ -1460,8 +1461,6 @@ cont:
        }
 
 done:
-       rcu_read_unlock();
-
        cb->args[0] = idx;
        cb->args[1] = q_idx;
 
@@ -1618,6 +1617,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
        struct gnet_dump d;
        const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
 
+       cond_resched();
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
        if (!nlh)
                goto out_nlmsg_trim;
index 1f9c31411f1998dec927fccf05362f09408558b4..8449b337f9e3c9991b36144b98965a08d463cdf9 100644 (file)
@@ -623,8 +623,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
                if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
                        goto nla_put_failure;
        }
-       nla_nest_end(skb, nest);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
 nla_put_failure:
        nla_nest_cancel(skb, nest);
index 2f80d01d42a6d8b971345229d407f062df921b04..ead526467ccae574d6afddb7a5146fc7507e68a9 100644 (file)
@@ -1563,8 +1563,7 @@ static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
                goto nla_put_failure;
        if (cbq_dump_attr(skb, &q->link) < 0)
                goto nla_put_failure;
-       nla_nest_end(skb, nest);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
 nla_put_failure:
        nla_nest_cancel(skb, nest);
@@ -1599,8 +1598,7 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
                goto nla_put_failure;
        if (cbq_dump_attr(skb, cl) < 0)
                goto nla_put_failure;
-       nla_nest_end(skb, nest);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
 nla_put_failure:
        nla_nest_cancel(skb, nest);
index 21e251766eb1a099c8f603c1f7bb5d33ec5b04a2..23c682b42f99ecb86ca8a103005a52d692a21d38 100644 (file)
@@ -781,8 +781,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
                goto nla_put_failure;
 
-       nla_nest_end(skb, opts);
-       return skb->len;
+       return nla_nest_end(skb, opts);
 
 nla_put_failure:
        return -1;
index ba5bc929eac732900c1d6e3150ad224f3a50a1ae..0bf432c782c1f17cdb8bbb1b8ece16848e63c27e 100644 (file)
@@ -450,8 +450,7 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
                        q->flows_cnt))
                goto nla_put_failure;
 
-       nla_nest_end(skb, opts);
-       return skb->len;
+       return nla_nest_end(skb, opts);
 
 nla_put_failure:
        return -1;
index e82e43b69c335bccbd58e6ba4ef10b56acd8f5a6..e1543b03e39d10c9b952a7ccf454cbd76dd09f09 100644 (file)
@@ -310,6 +310,7 @@ void netif_carrier_on(struct net_device *dev)
        if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
                if (dev->reg_state == NETREG_UNINITIALIZED)
                        return;
+               atomic_inc(&dev->carrier_changes);
                linkwatch_fire_event(dev);
                if (netif_running(dev))
                        __netdev_watchdog_up(dev);
@@ -328,6 +329,7 @@ void netif_carrier_off(struct net_device *dev)
        if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
                if (dev->reg_state == NETREG_UNINITIALIZED)
                        return;
+               atomic_inc(&dev->carrier_changes);
                linkwatch_fire_event(dev);
        }
 }
index c4075610502cf3f53f4ac3f1bc1be7b096e7f488..ec8aeaac1dd7ad7a077fddac3535c18064351b77 100644 (file)
@@ -1353,8 +1353,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
                goto nla_put_failure;
        if (hfsc_dump_curves(skb, cl) < 0)
                goto nla_put_failure;
-       nla_nest_end(skb, nest);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
  nla_put_failure:
        nla_nest_cancel(skb, nest);
index 647680b1c625ec9ca41adff446cd2bc32bc76a54..edee03d922e28678cc4f4ba843f600236cf06f08 100644 (file)
@@ -691,8 +691,7 @@ static int hhf_dump(struct Qdisc *sch, struct sk_buff *skb)
            nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight))
                goto nla_put_failure;
 
-       nla_nest_end(skb, opts);
-       return skb->len;
+       return nla_nest_end(skb, opts);
 
 nla_put_failure:
        return -1;
index 722e137df244dfef1fbd1ab8e8b477546990acde..9f949abcacef1680dcdc15579c7fe56611f21852 100644 (file)
@@ -1062,12 +1062,13 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
 
 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
-       spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
        struct htb_sched *q = qdisc_priv(sch);
        struct nlattr *nest;
        struct tc_htb_glob gopt;
 
-       spin_lock_bh(root_lock);
+       /* Its safe to not acquire qdisc lock. As we hold RTNL,
+        * no change can happen on the qdisc parameters.
+        */
 
        gopt.direct_pkts = q->direct_pkts;
        gopt.version = HTB_VER;
@@ -1081,13 +1082,10 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
            nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
                goto nla_put_failure;
-       nla_nest_end(skb, nest);
 
-       spin_unlock_bh(root_lock);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
 nla_put_failure:
-       spin_unlock_bh(root_lock);
        nla_nest_cancel(skb, nest);
        return -1;
 }
@@ -1096,11 +1094,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
                          struct sk_buff *skb, struct tcmsg *tcm)
 {
        struct htb_class *cl = (struct htb_class *)arg;
-       spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
        struct nlattr *nest;
        struct tc_htb_opt opt;
 
-       spin_lock_bh(root_lock);
+       /* Its safe to not acquire qdisc lock. As we hold RTNL,
+        * no change can happen on the class parameters.
+        */
        tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
        tcm->tcm_handle = cl->common.classid;
        if (!cl->level && cl->un.leaf.q)
@@ -1128,12 +1127,9 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
            nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
                goto nla_put_failure;
 
-       nla_nest_end(skb, nest);
-       spin_unlock_bh(root_lock);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
 nla_put_failure:
-       spin_unlock_bh(root_lock);
        nla_nest_cancel(skb, nest);
        return -1;
 }
index bce1665239b8ff7e55901b216fb705bd6b31229e..62871c14e1f93fef31e5795ad2b8ae2f064399a4 100644 (file)
@@ -100,8 +100,7 @@ static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
-       nla_nest_end(skb, nest);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
 nla_put_failure:
        nla_nest_cancel(skb, nest);
index de1059af6da14c563115147852c4f52b80a9387d..f1669a00f5710c297310f4a1da4a1b4e72cd3cab 100644 (file)
@@ -117,6 +117,11 @@ struct netem_sched_data {
                LOST_IN_BURST_PERIOD,
        } _4_state_model;
 
+       enum {
+               GOOD_STATE = 1,
+               BAD_STATE,
+       } GE_state_model;
+
        /* Correlated Loss Generation models */
        struct clgstate {
                /* state of the Markov chain */
@@ -272,15 +277,15 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
        struct clgstate *clg = &q->clg;
 
        switch (clg->state) {
-       case 1:
+       case GOOD_STATE:
                if (prandom_u32() < clg->a1)
-                       clg->state = 2;
+                       clg->state = BAD_STATE;
                if (prandom_u32() < clg->a4)
                        return true;
                break;
-       case 2:
+       case BAD_STATE:
                if (prandom_u32() < clg->a2)
-                       clg->state = 1;
+                       clg->state = GOOD_STATE;
                if (prandom_u32() > clg->a3)
                        return true;
        }
@@ -689,9 +694,8 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        return 0;
 }
 
-static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
+static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 {
-       struct netem_sched_data *q = qdisc_priv(sch);
        const struct tc_netem_corr *c = nla_data(attr);
 
        init_crandom(&q->delay_cor, c->delay_corr);
@@ -699,27 +703,24 @@ static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
        init_crandom(&q->dup_cor, c->dup_corr);
 }
 
-static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
+static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
 {
-       struct netem_sched_data *q = qdisc_priv(sch);
        const struct tc_netem_reorder *r = nla_data(attr);
 
        q->reorder = r->probability;
        init_crandom(&q->reorder_cor, r->correlation);
 }
 
-static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
+static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
 {
-       struct netem_sched_data *q = qdisc_priv(sch);
        const struct tc_netem_corrupt *r = nla_data(attr);
 
        q->corrupt = r->probability;
        init_crandom(&q->corrupt_cor, r->correlation);
 }
 
-static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
+static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
 {
-       struct netem_sched_data *q = qdisc_priv(sch);
        const struct tc_netem_rate *r = nla_data(attr);
 
        q->rate = r->rate;
@@ -732,9 +733,8 @@ static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
                q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
 }
 
-static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
+static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
 {
-       struct netem_sched_data *q = qdisc_priv(sch);
        const struct nlattr *la;
        int rem;
 
@@ -752,7 +752,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
 
                        q->loss_model = CLG_4_STATES;
 
-                       q->clg.state = 1;
+                       q->clg.state = TX_IN_GAP_PERIOD;
                        q->clg.a1 = gi->p13;
                        q->clg.a2 = gi->p31;
                        q->clg.a3 = gi->p32;
@@ -770,7 +770,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
                        }
 
                        q->loss_model = CLG_GILB_ELL;
-                       q->clg.state = 1;
+                       q->clg.state = GOOD_STATE;
                        q->clg.a1 = ge->p;
                        q->clg.a2 = ge->r;
                        q->clg.a3 = ge->h;
@@ -821,6 +821,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        struct netem_sched_data *q = qdisc_priv(sch);
        struct nlattr *tb[TCA_NETEM_MAX + 1];
        struct tc_netem_qopt *qopt;
+       struct clgstate old_clg;
+       int old_loss_model = CLG_RANDOM;
        int ret;
 
        if (opt == NULL)
@@ -831,6 +833,33 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        if (ret < 0)
                return ret;
 
+       /* backup q->clg and q->loss_model */
+       old_clg = q->clg;
+       old_loss_model = q->loss_model;
+
+       if (tb[TCA_NETEM_LOSS]) {
+               ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
+               if (ret) {
+                       q->loss_model = old_loss_model;
+                       return ret;
+               }
+       } else {
+               q->loss_model = CLG_RANDOM;
+       }
+
+       if (tb[TCA_NETEM_DELAY_DIST]) {
+               ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
+               if (ret) {
+                       /* recover clg and loss_model, in case of
+                        * q->clg and q->loss_model were modified
+                        * in get_loss_clg()
+                        */
+                       q->clg = old_clg;
+                       q->loss_model = old_loss_model;
+                       return ret;
+               }
+       }
+
        sch->limit = qopt->limit;
 
        q->latency = qopt->latency;
@@ -848,22 +877,16 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
                q->reorder = ~0;
 
        if (tb[TCA_NETEM_CORR])
-               get_correlation(sch, tb[TCA_NETEM_CORR]);
-
-       if (tb[TCA_NETEM_DELAY_DIST]) {
-               ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
-               if (ret)
-                       return ret;
-       }
+               get_correlation(q, tb[TCA_NETEM_CORR]);
 
        if (tb[TCA_NETEM_REORDER])
-               get_reorder(sch, tb[TCA_NETEM_REORDER]);
+               get_reorder(q, tb[TCA_NETEM_REORDER]);
 
        if (tb[TCA_NETEM_CORRUPT])
-               get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
+               get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
 
        if (tb[TCA_NETEM_RATE])
-               get_rate(sch, tb[TCA_NETEM_RATE]);
+               get_rate(q, tb[TCA_NETEM_RATE]);
 
        if (tb[TCA_NETEM_RATE64])
                q->rate = max_t(u64, q->rate,
@@ -872,10 +895,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_NETEM_ECN])
                q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 
-       q->loss_model = CLG_RANDOM;
-       if (tb[TCA_NETEM_LOSS])
-               ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
-
        return ret;
 }
 
index 4f505a006896578ebdac1392af1dc064500665c9..18ff634337092d5f8e7cba8fe817542f1455738f 100644 (file)
 struct tbf_sched_data {
 /* Parameters */
        u32             limit;          /* Maximal length of backlog: bytes */
+       u32             max_size;
        s64             buffer;         /* Token bucket depth/rate: MUST BE >= MTU/B */
        s64             mtu;
-       u32             max_size;
        struct psched_ratecfg rate;
        struct psched_ratecfg peak;
-       bool peak_present;
 
 /* Variables */
        s64     tokens;                 /* Current number of B tokens */
@@ -222,6 +221,11 @@ static unsigned int tbf_drop(struct Qdisc *sch)
        return len;
 }
 
+static bool tbf_peak_present(const struct tbf_sched_data *q)
+{
+       return q->peak.rate_bytes_ps;
+}
+
 static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
@@ -238,7 +242,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
                now = ktime_to_ns(ktime_get());
                toks = min_t(s64, now - q->t_c, q->buffer);
 
-               if (q->peak_present) {
+               if (tbf_peak_present(q)) {
                        ptoks = toks + q->ptokens;
                        if (ptoks > q->mtu)
                                ptoks = q->mtu;
@@ -366,6 +370,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
                } else {
                        max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
                }
+       } else {
+               memset(&peak, 0, sizeof(peak));
        }
 
        if (max_size < psched_mtu(qdisc_dev(sch)))
@@ -410,12 +416,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
        q->ptokens = q->mtu;
 
        memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
-       if (qopt->peakrate.rate) {
-               memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
-               q->peak_present = true;
-       } else {
-               q->peak_present = false;
-       }
+       memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
 
        sch_tree_unlock(sch);
        err = 0;
@@ -458,7 +459,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
 
        opt.limit = q->limit;
        psched_ratecfg_getrate(&opt.rate, &q->rate);
-       if (q->peak_present)
+       if (tbf_peak_present(q))
                psched_ratecfg_getrate(&opt.peakrate, &q->peak);
        else
                memset(&opt.peakrate, 0, sizeof(opt.peakrate));
@@ -469,13 +470,12 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
            nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
                goto nla_put_failure;
-       if (q->peak_present &&
+       if (tbf_peak_present(q) &&
            q->peak.rate_bytes_ps >= (1ULL << 32) &&
            nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
                goto nla_put_failure;
 
-       nla_nest_end(skb, nest);
-       return skb->len;
+       return nla_nest_end(skb, nest);
 
 nla_put_failure:
        nla_nest_cancel(skb, nest);
index ee13d28d39d10702096f733ee8d88aa862459952..4f6d6f9d127474b457cf274a1a0977bb75c6e8dc 100644 (file)
@@ -1319,8 +1319,7 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
                        break;
        }
 
-       if (trans_next != NULL)
-               asoc->peer.retran_path = trans_next;
+       asoc->peer.retran_path = trans_next;
 
        pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
                 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
index d0810dc5f079f67de9bf8192fa3968fd81e245f0..1d348d15b33de4c3b20f7f071442c05058e7dcc3 100644 (file)
@@ -652,5 +652,4 @@ void sctp_transport_immediate_rtx(struct sctp_transport *t)
                if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
                        sctp_transport_hold(t);
        }
-       return;
 }
index a19ae1968d379d70ad36e51039d79469cfe30d18..f25eaa30b6907dd9d8c332c54a05cc38c843f7ff 100644 (file)
@@ -594,7 +594,7 @@ void sock_release(struct socket *sock)
        }
 
        if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
-               printk(KERN_ERR "sock_release: fasync list not empty!\n");
+               pr_err("%s: fasync list not empty!\n", __func__);
 
        if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
                return;
@@ -1266,8 +1266,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
                static int warned;
                if (!warned) {
                        warned = 1;
-                       printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n",
-                              current->comm);
+                       pr_info("%s uses obsolete (PF_INET,SOCK_PACKET)\n",
+                               current->comm);
                }
                family = PF_PACKET;
        }
@@ -2600,8 +2600,7 @@ int sock_register(const struct net_proto_family *ops)
        int err;
 
        if (ops->family >= NPROTO) {
-               printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family,
-                      NPROTO);
+               pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO);
                return -ENOBUFS;
        }
 
@@ -2615,7 +2614,7 @@ int sock_register(const struct net_proto_family *ops)
        }
        spin_unlock(&net_family_lock);
 
-       printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
+       pr_info("NET: Registered protocol family %d\n", ops->family);
        return err;
 }
 EXPORT_SYMBOL(sock_register);
@@ -2643,7 +2642,7 @@ void sock_unregister(int family)
 
        synchronize_rcu();
 
-       printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
+       pr_info("NET: Unregistered protocol family %d\n", family);
 }
 EXPORT_SYMBOL(sock_unregister);
 
index 60b00ab93d74159cbc18e1caa1e469a7e38b651c..a74acf9ee804b43496d2c53c5ebb26160378eaa6 100644 (file)
@@ -37,6 +37,8 @@
 #ifndef _TIPC_ADDR_H
 #define _TIPC_ADDR_H
 
+#include "core.h"
+
 #define TIPC_ZONE_MASK         0xff000000u
 #define TIPC_CLUSTER_MASK      0xfffff000u
 
index bf860d9e75af2363e6538ee525c60bf7c0821b6c..95ab5ef92920fddf34c02478973b98ebbe96ba59 100644 (file)
@@ -41,9 +41,9 @@
 #include "bcast.h"
 #include "name_distr.h"
 
-#define MAX_PKT_DEFAULT_MCAST 1500     /* bcast link max packet size (fixed) */
-
-#define BCLINK_WIN_DEFAULT 20          /* bcast link window size (default) */
+#define        MAX_PKT_DEFAULT_MCAST   1500    /* bcast link max packet size (fixed) */
+#define        BCLINK_WIN_DEFAULT      20      /* bcast link window size (default) */
+#define        BCBEARER                MAX_BEARERS
 
 /**
  * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
@@ -356,9 +356,9 @@ static void bclink_peek_nack(struct tipc_msg *msg)
 }
 
 /*
- * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
+ * tipc_bclink_xmit - broadcast a packet to all nodes in cluster
  */
-int tipc_bclink_send_msg(struct sk_buff *buf)
+int tipc_bclink_xmit(struct sk_buff *buf)
 {
        int res;
 
@@ -370,7 +370,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
                goto exit;
        }
 
-       res = tipc_link_send_buf(bcl, buf);
+       res = __tipc_link_xmit(bcl, buf);
        if (likely(res >= 0)) {
                bclink_set_last_sent();
                bcl->stats.queue_sz_counts++;
@@ -399,19 +399,18 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
         */
 
        if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
-               tipc_link_send_proto_msg(
-                       node->active_links[node->addr & 1],
-                       STATE_MSG, 0, 0, 0, 0, 0);
+               tipc_link_proto_xmit(node->active_links[node->addr & 1],
+                                    STATE_MSG, 0, 0, 0, 0, 0);
                bcl->stats.sent_acks++;
        }
 }
 
 /**
- * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
  *
  * tipc_net_lock is read_locked, no other locks set
  */
-void tipc_bclink_recv_pkt(struct sk_buff *buf)
+void tipc_bclink_rcv(struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        struct tipc_node *node;
@@ -468,7 +467,7 @@ receive:
                        spin_unlock_bh(&bc_lock);
                        tipc_node_unlock(node);
                        if (likely(msg_mcast(msg)))
-                               tipc_port_recv_mcast(buf, NULL);
+                               tipc_port_mcast_rcv(buf, NULL);
                        else
                                kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
@@ -478,12 +477,12 @@ receive:
                        bcl->stats.recv_bundled += msg_msgcnt(msg);
                        spin_unlock_bh(&bc_lock);
                        tipc_node_unlock(node);
-                       tipc_link_recv_bundle(buf);
+                       tipc_link_bundle_rcv(buf);
                } else if (msg_user(msg) == MSG_FRAGMENTER) {
                        int ret;
-                       ret = tipc_link_recv_fragment(&node->bclink.reasm_head,
-                                                     &node->bclink.reasm_tail,
-                                                     &buf);
+                       ret = tipc_link_frag_rcv(&node->bclink.reasm_head,
+                                                &node->bclink.reasm_tail,
+                                                &buf);
                        if (ret == LINK_REASM_ERROR)
                                goto unlock;
                        spin_lock_bh(&bc_lock);
@@ -503,7 +502,7 @@ receive:
                        bclink_accept_pkt(node, seqno);
                        spin_unlock_bh(&bc_lock);
                        tipc_node_unlock(node);
-                       tipc_named_recv(buf);
+                       tipc_named_rcv(buf);
                } else {
                        spin_lock_bh(&bc_lock);
                        bclink_accept_pkt(node, seqno);
@@ -669,9 +668,8 @@ void tipc_bcbearer_sort(void)
        memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
 
        for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
-               struct tipc_bearer *b = &tipc_bearers[b_index];
-
-               if (!b->active || !b->nodes.count)
+               struct tipc_bearer *b = bearer_list[b_index];
+               if (!b || !b->nodes.count)
                        continue;
 
                if (!bp_temp[b->priority].primary)
@@ -785,8 +783,8 @@ void tipc_bclink_init(void)
        bcl->owner = &bclink->node;
        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
-       spin_lock_init(&bcbearer->bearer.lock);
        bcl->b_ptr = &bcbearer->bearer;
+       bearer_list[BCBEARER] = &bcbearer->bearer;
        bcl->state = WORKING_WORKING;
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
 }
@@ -797,6 +795,7 @@ void tipc_bclink_stop(void)
        tipc_link_purge_queues(bcl);
        spin_unlock_bh(&bc_lock);
 
+       bearer_list[BCBEARER] = NULL;
        memset(bclink, 0, sizeof(*bclink));
        memset(bcbearer, 0, sizeof(*bcbearer));
 }
index 6ee587b469fd3fd9df70a7f086f1f5485dc5caa6..a80ef54b818e221a98bd9bd69a3adf28d0001f88 100644 (file)
@@ -90,8 +90,8 @@ void tipc_bclink_add_node(u32 addr);
 void tipc_bclink_remove_node(u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(void);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-int  tipc_bclink_send_msg(struct sk_buff *buf);
-void tipc_bclink_recv_pkt(struct sk_buff *buf);
+int  tipc_bclink_xmit(struct sk_buff *buf);
+void tipc_bclink_rcv(struct sk_buff *buf);
 u32  tipc_bclink_get_last_sent(void);
 u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
 void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
index 574b86193b15a8251dc85555cc03a8e5f9af1768..3fef7eb776dc12934654b2bfa7500cfa75138ad4 100644 (file)
@@ -49,9 +49,9 @@ static struct tipc_media * const media_info_array[] = {
        NULL
 };
 
-struct tipc_bearer tipc_bearers[MAX_BEARERS];
+struct tipc_bearer *bearer_list[MAX_BEARERS + 1];
 
-static void bearer_disable(struct tipc_bearer *b_ptr);
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
 
 /**
  * tipc_media_find - locates specified media object by name
@@ -177,8 +177,9 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
        struct tipc_bearer *b_ptr;
        u32 i;
 
-       for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
-               if (b_ptr->active && (!strcmp(b_ptr->name, name)))
+       for (i = 0; i < MAX_BEARERS; i++) {
+               b_ptr = bearer_list[i];
+               if (b_ptr && (!strcmp(b_ptr->name, name)))
                        return b_ptr;
        }
        return NULL;
@@ -200,8 +201,10 @@ struct sk_buff *tipc_bearer_get_names(void)
        read_lock_bh(&tipc_net_lock);
        for (i = 0; media_info_array[i] != NULL; i++) {
                for (j = 0; j < MAX_BEARERS; j++) {
-                       b = &tipc_bearers[j];
-                       if (b->active && (b->media == media_info_array[i])) {
+                       b = bearer_list[j];
+                       if (!b)
+                               continue;
+                       if (b->media == media_info_array[i]) {
                                tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
                                                    b->name,
                                                    strlen(b->name) + 1);
@@ -284,16 +287,17 @@ restart:
        bearer_id = MAX_BEARERS;
        with_this_prio = 1;
        for (i = MAX_BEARERS; i-- != 0; ) {
-               if (!tipc_bearers[i].active) {
+               b_ptr = bearer_list[i];
+               if (!b_ptr) {
                        bearer_id = i;
                        continue;
                }
-               if (!strcmp(name, tipc_bearers[i].name)) {
+               if (!strcmp(name, b_ptr->name)) {
                        pr_warn("Bearer <%s> rejected, already enabled\n",
                                name);
                        goto exit;
                }
-               if ((tipc_bearers[i].priority == priority) &&
+               if ((b_ptr->priority == priority) &&
                    (++with_this_prio > 2)) {
                        if (priority-- == 0) {
                                pr_warn("Bearer <%s> rejected, duplicate priority\n",
@@ -311,7 +315,11 @@ restart:
                goto exit;
        }
 
-       b_ptr = &tipc_bearers[bearer_id];
+       b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
+       if (!b_ptr) {
+               res = -ENOMEM;
+               goto exit;
+       }
        strcpy(b_ptr->name, name);
        b_ptr->media = m_ptr;
        res = m_ptr->enable_media(b_ptr);
@@ -324,19 +332,20 @@ restart:
        b_ptr->identity = bearer_id;
        b_ptr->tolerance = m_ptr->tolerance;
        b_ptr->window = m_ptr->window;
+       b_ptr->domain = disc_domain;
        b_ptr->net_plane = bearer_id + 'A';
-       b_ptr->active = 1;
        b_ptr->priority = priority;
-       INIT_LIST_HEAD(&b_ptr->links);
-       spin_lock_init(&b_ptr->lock);
 
-       res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr, disc_domain);
+       res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
        if (res) {
-               bearer_disable(b_ptr);
+               bearer_disable(b_ptr, false);
                pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
                        name);
                goto exit;
        }
+
+       bearer_list[bearer_id] = b_ptr;
+
        pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
                name,
                tipc_addr_string_fill(addr_string, disc_domain), priority);
@@ -350,20 +359,11 @@ exit:
  */
 static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
 {
-       struct tipc_link *l_ptr;
-       struct tipc_link *temp_l_ptr;
-
        read_lock_bh(&tipc_net_lock);
        pr_info("Resetting bearer <%s>\n", b_ptr->name);
-       spin_lock_bh(&b_ptr->lock);
-       list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
-               struct tipc_node *n_ptr = l_ptr->owner;
-
-               spin_lock_bh(&n_ptr->lock);
-               tipc_link_reset(l_ptr);
-               spin_unlock_bh(&n_ptr->lock);
-       }
-       spin_unlock_bh(&b_ptr->lock);
+       tipc_disc_delete(b_ptr->link_req);
+       tipc_link_reset_list(b_ptr->identity);
+       tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
        read_unlock_bh(&tipc_net_lock);
        return 0;
 }
@@ -373,26 +373,24 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
  *
  * Note: This routine assumes caller holds tipc_net_lock.
  */
-static void bearer_disable(struct tipc_bearer *b_ptr)
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
 {
-       struct tipc_link *l_ptr;
-       struct tipc_link *temp_l_ptr;
-       struct tipc_link_req *temp_req;
+       u32 i;
 
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
-       spin_lock_bh(&b_ptr->lock);
        b_ptr->media->disable_media(b_ptr);
-       list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
-               tipc_link_delete(l_ptr);
-       }
-       temp_req = b_ptr->link_req;
-       b_ptr->link_req = NULL;
-       spin_unlock_bh(&b_ptr->lock);
 
-       if (temp_req)
-               tipc_disc_delete(temp_req);
+       tipc_link_delete_list(b_ptr->identity, shutting_down);
+       if (b_ptr->link_req)
+               tipc_disc_delete(b_ptr->link_req);
 
-       memset(b_ptr, 0, sizeof(struct tipc_bearer));
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (b_ptr == bearer_list[i]) {
+                       bearer_list[i] = NULL;
+                       break;
+               }
+       }
+       kfree(b_ptr);
 }
 
 int tipc_disable_bearer(const char *name)
@@ -406,7 +404,7 @@ int tipc_disable_bearer(const char *name)
                pr_warn("Attempt to disable unknown bearer <%s>\n", name);
                res = -EINVAL;
        } else {
-               bearer_disable(b_ptr);
+               bearer_disable(b_ptr, false);
                res = 0;
        }
        write_unlock_bh(&tipc_net_lock);
@@ -585,7 +583,11 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                        break;
        case NETDEV_DOWN:
        case NETDEV_CHANGEMTU:
+               tipc_reset_bearer(b_ptr);
+               break;
        case NETDEV_CHANGEADDR:
+               tipc_l2_media_addr_set(b_ptr, &b_ptr->addr,
+                                      (char *)dev->dev_addr);
                tipc_reset_bearer(b_ptr);
                break;
        case NETDEV_UNREGISTER:
@@ -599,7 +601,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
 }
 
 static struct packet_type tipc_packet_type __read_mostly = {
-       .type = __constant_htons(ETH_P_TIPC),
+       .type = htons(ETH_P_TIPC),
        .func = tipc_l2_rcv_msg,
 };
 
@@ -627,10 +629,14 @@ void tipc_bearer_cleanup(void)
 
 void tipc_bearer_stop(void)
 {
+       struct tipc_bearer *b_ptr;
        u32 i;
 
        for (i = 0; i < MAX_BEARERS; i++) {
-               if (tipc_bearers[i].active)
-                       bearer_disable(&tipc_bearers[i]);
+               b_ptr = bearer_list[i];
+               if (b_ptr) {
+                       bearer_disable(b_ptr, true);
+                       bearer_list[i] = NULL;
+               }
        }
 }
index 4f5db9ad5bf639056bc56f1284d7400e87095dcc..ba48145e871dd8dcd357a193b61e9234e5ad7f0d 100644 (file)
@@ -107,10 +107,8 @@ struct tipc_media {
 
 /**
  * struct tipc_bearer - Generic TIPC bearer structure
- * @dev: ptr to associated network device
- * @usr_handle: pointer to additional media-specific information about bearer
+ * @media_ptr: pointer to additional media-specific information about bearer
  * @mtu: max packet size bearer can support
- * @lock: spinlock for controlling access to bearer
  * @addr: media-specific address associated with bearer
  * @name: bearer name (format = media:interface)
  * @media: ptr to media structure associated with bearer
@@ -118,10 +116,9 @@ struct tipc_media {
  * @priority: default link priority for bearer
  * @window: default window size for bearer
  * @tolerance: default link tolerance for bearer
+ * @domain: network domain to which links can be established
  * @identity: array index of this bearer within TIPC bearer array
  * @link_req: ptr to (optional) structure making periodic link setup requests
- * @links: list of non-congested links associated with bearer
- * @active: non-zero if bearer structure is represents a bearer
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
  * @nodes: indicates which nodes in cluster can be reached through bearer
  *
@@ -134,16 +131,14 @@ struct tipc_bearer {
        u32 mtu;                                /* initalized by media */
        struct tipc_media_addr addr;            /* initalized by media */
        char name[TIPC_MAX_BEARER_NAME];
-       spinlock_t lock;
        struct tipc_media *media;
        struct tipc_media_addr bcast_addr;
        u32 priority;
        u32 window;
        u32 tolerance;
+       u32 domain;
        u32 identity;
        struct tipc_link_req *link_req;
-       struct list_head links;
-       int active;
        char net_plane;
        struct tipc_node_map nodes;
 };
@@ -155,7 +150,7 @@ struct tipc_bearer_names {
 
 struct tipc_link;
 
-extern struct tipc_bearer tipc_bearers[];
+extern struct tipc_bearer *bearer_list[];
 
 /*
  * TIPC routines available to supported media types
index e6d721692ae016bbc900ac909406ca0a645848b0..4b981c053823e90cc31963277aedd8c3682bc1a0 100644 (file)
 #define REPLY_TRUNCATED "<truncated>\n"
 
 static DEFINE_MUTEX(config_mutex);
-static struct tipc_server cfgsrv;
 
 static const void *req_tlv_area;       /* request message TLV area */
 static int req_tlv_space;              /* request message TLV area size */
 static int rep_headroom;               /* reply message headroom to use */
 
-
 struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
 {
        struct sk_buff *buf;
@@ -185,18 +183,6 @@ static struct sk_buff *cfg_set_own_addr(void)
        return tipc_cfg_reply_none();
 }
 
-static struct sk_buff *cfg_set_remote_mng(void)
-{
-       u32 value;
-
-       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
-               return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
-       value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
-       tipc_remote_management = (value != 0);
-       return tipc_cfg_reply_none();
-}
-
 static struct sk_buff *cfg_set_max_ports(void)
 {
        u32 value;
@@ -247,21 +233,10 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        /* Check command authorization */
        if (likely(in_own_node(orig_node))) {
                /* command is permitted */
-       } else if (cmd >= 0x8000) {
+       } else {
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                          " (cannot be done remotely)");
                goto exit;
-       } else if (!tipc_remote_management) {
-               rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
-               goto exit;
-       } else if (cmd >= 0x4000) {
-               u32 domain = 0;
-
-               if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
-                   (domain != orig_node)) {
-                       rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
-                       goto exit;
-               }
        }
 
        /* Call appropriate processing routine */
@@ -310,18 +285,12 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_NODE_ADDR:
                rep_tlv_buf = cfg_set_own_addr();
                break;
-       case TIPC_CMD_SET_REMOTE_MNG:
-               rep_tlv_buf = cfg_set_remote_mng();
-               break;
        case TIPC_CMD_SET_MAX_PORTS:
                rep_tlv_buf = cfg_set_max_ports();
                break;
        case TIPC_CMD_SET_NETID:
                rep_tlv_buf = cfg_set_netid();
                break;
-       case TIPC_CMD_GET_REMOTE_MNG:
-               rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_remote_management);
-               break;
        case TIPC_CMD_GET_MAX_PORTS:
                rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
                break;
@@ -345,6 +314,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
        case TIPC_CMD_SET_MAX_PUBL:
        case TIPC_CMD_GET_MAX_PUBL:
        case TIPC_CMD_SET_LOG_SIZE:
+       case TIPC_CMD_SET_REMOTE_MNG:
+       case TIPC_CMD_GET_REMOTE_MNG:
        case TIPC_CMD_DUMP_LOG:
                rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                          " (obsolete command)");
@@ -369,75 +340,3 @@ exit:
        mutex_unlock(&config_mutex);
        return rep_tlv_buf;
 }
-
-static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
-                              void *usr_data, void *buf, size_t len)
-{
-       struct tipc_cfg_msg_hdr *req_hdr;
-       struct tipc_cfg_msg_hdr *rep_hdr;
-       struct sk_buff *rep_buf;
-
-       /* Validate configuration message header (ignore invalid message) */
-       req_hdr = (struct tipc_cfg_msg_hdr *)buf;
-       if ((len < sizeof(*req_hdr)) ||
-           (len != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
-           (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
-               pr_warn("Invalid configuration message discarded\n");
-               return;
-       }
-
-       /* Generate reply for request (if can't, return request) */
-       rep_buf = tipc_cfg_do_cmd(addr->addr.id.node, ntohs(req_hdr->tcm_type),
-                                 buf + sizeof(*req_hdr),
-                                 len - sizeof(*req_hdr),
-                                 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
-       if (rep_buf) {
-               skb_push(rep_buf, sizeof(*rep_hdr));
-               rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
-               memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
-               rep_hdr->tcm_len = htonl(rep_buf->len);
-               rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
-               tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
-                                 rep_buf->len);
-               kfree_skb(rep_buf);
-       }
-}
-
-static struct sockaddr_tipc cfgsrv_addr __read_mostly = {
-       .family                 = AF_TIPC,
-       .addrtype               = TIPC_ADDR_NAMESEQ,
-       .addr.nameseq.type      = TIPC_CFG_SRV,
-       .addr.nameseq.lower     = 0,
-       .addr.nameseq.upper     = 0,
-       .scope                  = TIPC_ZONE_SCOPE
-};
-
-static struct tipc_server cfgsrv __read_mostly = {
-       .saddr                  = &cfgsrv_addr,
-       .imp                    = TIPC_CRITICAL_IMPORTANCE,
-       .type                   = SOCK_RDM,
-       .max_rcvbuf_size        = 64 * 1024,
-       .name                   = "cfg_server",
-       .tipc_conn_recvmsg      = cfg_conn_msg_event,
-       .tipc_conn_new          = NULL,
-       .tipc_conn_shutdown     = NULL
-};
-
-int tipc_cfg_init(void)
-{
-       return tipc_server_start(&cfgsrv);
-}
-
-void tipc_cfg_reinit(void)
-{
-       tipc_server_stop(&cfgsrv);
-
-       cfgsrv_addr.addr.nameseq.lower = tipc_own_addr;
-       cfgsrv_addr.addr.nameseq.upper = tipc_own_addr;
-       tipc_server_start(&cfgsrv);
-}
-
-void tipc_cfg_stop(void)
-{
-       tipc_server_stop(&cfgsrv);
-}
index 1f252f3fa0586b103fc129059c0df3906b1f9365..47b1bf18161215afbbe7020845c9dd92c79381bd 100644 (file)
@@ -64,9 +64,4 @@ static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
 struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
                                const void *req_tlv_area, int req_tlv_space,
                                int headroom);
-
-int  tipc_cfg_init(void);
-void tipc_cfg_reinit(void);
-void tipc_cfg_stop(void);
-
 #endif
index 80c20647b3d29fd75d2ebb8c760c2aa75214c515..50d57429ebcaf82b8d36bcf49f6fa1585664180a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/core.c: TIPC module code
  *
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2013, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2013, Wind River Systems
  * All rights reserved.
  *
@@ -50,7 +50,6 @@ int tipc_random __read_mostly;
 u32 tipc_own_addr __read_mostly;
 int tipc_max_ports __read_mostly;
 int tipc_net_id __read_mostly;
-int tipc_remote_management __read_mostly;
 int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
 
 /**
@@ -85,7 +84,6 @@ static void tipc_core_stop(void)
        tipc_net_stop();
        tipc_bearer_cleanup();
        tipc_netlink_stop();
-       tipc_cfg_stop();
        tipc_subscr_stop();
        tipc_nametbl_stop();
        tipc_ref_table_stop();
@@ -130,18 +128,12 @@ static int tipc_core_start(void)
        if (err)
                goto out_subscr;
 
-       err = tipc_cfg_init();
-       if (err)
-               goto out_cfg;
-
        err = tipc_bearer_setup();
        if (err)
                goto out_bearer;
 
        return 0;
 out_bearer:
-       tipc_cfg_stop();
-out_cfg:
        tipc_subscr_stop();
 out_subscr:
        tipc_unregister_sysctl();
@@ -166,7 +158,6 @@ static int __init tipc_init(void)
        pr_info("Activated (version " TIPC_MOD_VER ")\n");
 
        tipc_own_addr = 0;
-       tipc_remote_management = 1;
        tipc_max_ports = CONFIG_TIPC_PORTS;
        tipc_net_id = 4711;
 
index 4dfe137587bbd35a519b87a107f4e665a625bdaa..8985bbcb942bdb3d6ef839c3249d4e547c2f75ce 100644 (file)
@@ -79,7 +79,6 @@ int tipc_snprintf(char *buf, int len, const char *fmt, ...);
 extern u32 tipc_own_addr __read_mostly;
 extern int tipc_max_ports __read_mostly;
 extern int tipc_net_id __read_mostly;
-extern int tipc_remote_management __read_mostly;
 extern int sysctl_tipc_rmem[3] __read_mostly;
 
 /*
index 412ff41b861166e5511ae3ad8c875fa27039ac6b..542fe3413dc4e8d06d97eb2d5a409d2a7b26459a 100644 (file)
@@ -48,7 +48,6 @@
  * struct tipc_link_req - information about an ongoing link setup request
  * @bearer: bearer issuing requests
  * @dest: destination address for request messages
- * @domain: network domain to which links can be established
  * @num_nodes: number of nodes currently discovered (i.e. with an active link)
  * @lock: spinlock for controlling access to requests
  * @buf: request message to be (repeatedly) sent
@@ -58,7 +57,6 @@
 struct tipc_link_req {
        struct tipc_bearer *bearer;
        struct tipc_media_addr dest;
-       u32 domain;
        int num_nodes;
        spinlock_t lock;
        struct sk_buff *buf;
@@ -69,14 +67,13 @@ struct tipc_link_req {
 /**
  * tipc_disc_init_msg - initialize a link setup message
  * @type: message type (request or response)
- * @dest_domain: network domain of node(s) which should respond to message
  * @b_ptr: ptr to bearer issuing message
  */
-static struct sk_buff *tipc_disc_init_msg(u32 type, u32 dest_domain,
-                                         struct tipc_bearer *b_ptr)
+static struct sk_buff *tipc_disc_init_msg(u32 type, struct tipc_bearer *b_ptr)
 {
        struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
        struct tipc_msg *msg;
+       u32 dest_domain = b_ptr->domain;
 
        if (buf) {
                msg = buf_msg(buf);
@@ -110,11 +107,11 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
 }
 
 /**
- * tipc_disc_recv_msg - handle incoming link setup message (request or response)
+ * tipc_disc_rcv - handle incoming link setup message (request or response)
  * @buf: buffer containing message
  * @b_ptr: bearer that message arrived on
  */
-void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr)
 {
        struct tipc_node *n_ptr;
        struct tipc_link *link;
@@ -149,7 +146,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
        }
        if (!tipc_in_scope(dest, tipc_own_addr))
                return;
-       if (!tipc_in_scope(b_ptr->link_req->domain, orig))
+       if (!tipc_in_scope(b_ptr->domain, orig))
                return;
 
        /* Locate structure corresponding to requesting node */
@@ -242,7 +239,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
        link_fully_up = link_working_working(link);
 
        if ((type == DSC_REQ_MSG) && !link_fully_up) {
-               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
+               rbuf = tipc_disc_init_msg(DSC_RESP_MSG, b_ptr);
                if (rbuf) {
                        tipc_bearer_send(b_ptr, rbuf, &media_addr);
                        kfree_skb(rbuf);
@@ -306,7 +303,7 @@ static void disc_timeout(struct tipc_link_req *req)
        spin_lock_bh(&req->lock);
 
        /* Stop searching if only desired node has been found */
-       if (tipc_node(req->domain) && req->num_nodes) {
+       if (tipc_node(req->bearer->domain) && req->num_nodes) {
                req->timer_intv = TIPC_LINK_REQ_INACTIVE;
                goto exit;
        }
@@ -342,8 +339,7 @@ exit:
  *
  * Returns 0 if successful, otherwise -errno.
  */
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
-                    u32 dest_domain)
+int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
 {
        struct tipc_link_req *req;
 
@@ -351,7 +347,7 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
        if (!req)
                return -ENOMEM;
 
-       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr);
+       req->buf = tipc_disc_init_msg(DSC_REQ_MSG, b_ptr);
        if (!req->buf) {
                kfree(req);
                return -ENOMSG;
@@ -359,7 +355,6 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
 
        memcpy(&req->dest, dest, sizeof(*dest));
        req->bearer = b_ptr;
-       req->domain = dest_domain;
        req->num_nodes = 0;
        req->timer_intv = TIPC_LINK_REQ_INIT;
        spin_lock_init(&req->lock);
index 75b67c403aa3629643fd547e4245c1c6dacbc335..07f34729459dcacb93b71d8a56c69263db5f563b 100644 (file)
 
 struct tipc_link_req;
 
-int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
-                    u32 dest_domain);
+int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
 void tipc_disc_delete(struct tipc_link_req *req);
 void tipc_disc_add_dest(struct tipc_link_req *req);
 void tipc_disc_remove_dest(struct tipc_link_req *req);
-void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
 
 #endif
index da6018beb6ebc149b72efd816f4d7154bc6cdfd7..c5190ab75290d04202b99a3e923a69fe1a9dad38 100644 (file)
@@ -77,19 +77,19 @@ static const char *link_unk_evt = "Unknown link event ";
 
 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                                       struct sk_buff *buf);
-static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
-static int  tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
+static int  tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
                                 struct sk_buff **buf);
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
-static int  link_send_sections_long(struct tipc_port *sender,
-                                   struct iovec const *msg_sect,
-                                   unsigned int len, u32 destnode);
+static int  tipc_link_iovec_long_xmit(struct tipc_port *sender,
+                                     struct iovec const *msg_sect,
+                                     unsigned int len, u32 destnode);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
-static void tipc_link_send_sync(struct tipc_link *l);
-static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
+static void tipc_link_sync_xmit(struct tipc_link *l);
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
 
 /*
  *  Simple link routines
@@ -147,11 +147,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
 /**
  * link_timeout - handle expiration of link timer
  * @l_ptr: pointer to link
- *
- * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
- * with tipc_link_delete().  (There is no risk that the node will be deleted by
- * another thread because tipc_link_delete() always cancels the link timer before
- * tipc_node_delete() is called.)
  */
 static void link_timeout(struct tipc_link *l_ptr)
 {
@@ -213,8 +208,8 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time)
  * Returns pointer to link.
  */
 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-                             struct tipc_bearer *b_ptr,
-                             const struct tipc_media_addr *media_addr)
+                                  struct tipc_bearer *b_ptr,
+                                  const struct tipc_media_addr *media_addr)
 {
        struct tipc_link *l_ptr;
        struct tipc_msg *msg;
@@ -279,41 +274,44 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 
        k_init_timer(&l_ptr->timer, (Handler)link_timeout,
                     (unsigned long)l_ptr);
-       list_add_tail(&l_ptr->link_list, &b_ptr->links);
 
        link_state_event(l_ptr, STARTING_EVT);
 
        return l_ptr;
 }
 
-/**
- * tipc_link_delete - delete a link
- * @l_ptr: pointer to link
- *
- * Note: 'tipc_net_lock' is write_locked, bearer is locked.
- * This routine must not grab the node lock until after link timer cancellation
- * to avoid a potential deadlock situation.
- */
-void tipc_link_delete(struct tipc_link *l_ptr)
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
 {
-       if (!l_ptr) {
-               pr_err("Attempt to delete non-existent link\n");
-               return;
-       }
-
-       k_cancel_timer(&l_ptr->timer);
+       struct tipc_link *l_ptr;
+       struct tipc_node *n_ptr;
 
-       tipc_node_lock(l_ptr->owner);
-       tipc_link_reset(l_ptr);
-       tipc_node_detach_link(l_ptr->owner, l_ptr);
-       tipc_link_purge_queues(l_ptr);
-       list_del_init(&l_ptr->link_list);
-       tipc_node_unlock(l_ptr->owner);
-       k_term_timer(&l_ptr->timer);
-       kfree(l_ptr);
+       rcu_read_lock();
+       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+               spin_lock_bh(&n_ptr->lock);
+               l_ptr = n_ptr->links[bearer_id];
+               if (l_ptr) {
+                       tipc_link_reset(l_ptr);
+                       if (shutting_down || !tipc_node_is_up(n_ptr)) {
+                               tipc_node_detach_link(l_ptr->owner, l_ptr);
+                               tipc_link_reset_fragments(l_ptr);
+                               spin_unlock_bh(&n_ptr->lock);
+
+                               /* Nobody else can access this link now: */
+                               del_timer_sync(&l_ptr->timer);
+                               kfree(l_ptr);
+                       } else {
+                               /* Detach/delete when failover is finished: */
+                               l_ptr->flags |= LINK_STOPPED;
+                               spin_unlock_bh(&n_ptr->lock);
+                               del_timer_sync(&l_ptr->timer);
+                       }
+                       continue;
+               }
+               spin_unlock_bh(&n_ptr->lock);
+       }
+       rcu_read_unlock();
 }
 
-
 /**
  * link_schedule_port - schedule port for deferred sending
  * @l_ptr: pointer to link
@@ -330,8 +328,6 @@ static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
        spin_lock_bh(&tipc_port_list_lock);
        p_ptr = tipc_port_lock(origport);
        if (p_ptr) {
-               if (!p_ptr->wakeup)
-                       goto exit;
                if (!list_empty(&p_ptr->wait_list))
                        goto exit;
                p_ptr->congested = 1;
@@ -366,7 +362,7 @@ void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
                list_del_init(&p_ptr->wait_list);
                spin_lock_bh(p_ptr->lock);
                p_ptr->congested = 0;
-               p_ptr->wakeup(p_ptr);
+               tipc_port_wakeup(p_ptr);
                win -= p_ptr->waiting_pkts;
                spin_unlock_bh(p_ptr->lock);
        }
@@ -461,6 +457,21 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        link_reset_statistics(l_ptr);
 }
 
+void tipc_link_reset_list(unsigned int bearer_id)
+{
+       struct tipc_link *l_ptr;
+       struct tipc_node *n_ptr;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+               spin_lock_bh(&n_ptr->lock);
+               l_ptr = n_ptr->links[bearer_id];
+               if (l_ptr)
+                       tipc_link_reset(l_ptr);
+               spin_unlock_bh(&n_ptr->lock);
+       }
+       rcu_read_unlock();
+}
 
 static void link_activate(struct tipc_link *l_ptr)
 {
@@ -479,7 +490,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
        struct tipc_link *other;
        u32 cont_intv = l_ptr->continuity_interval;
 
-       if (!l_ptr->started && (event != STARTING_EVT))
+       if (l_ptr->flags & LINK_STOPPED)
+               return;
+
+       if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
                return;         /* Not yet. */
 
        /* Check whether changeover is going on */
@@ -499,12 +513,12 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        if (l_ptr->next_in_no != l_ptr->checkpoint) {
                                l_ptr->checkpoint = l_ptr->next_in_no;
                                if (tipc_bclink_acks_missing(l_ptr->owner)) {
-                                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-                                                                0, 0, 0, 0, 0);
+                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
+                                                            0, 0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
-                                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-                                                                1, 0, 0, 0, 0);
+                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
+                                                            1, 0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
@@ -512,7 +526,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        }
                        l_ptr->state = WORKING_UNKNOWN;
                        l_ptr->fsm_msg_cnt = 0;
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv / 4);
                        break;
@@ -522,7 +536,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
-                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+                                            0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -544,7 +559,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
-                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+                                            0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -554,14 +570,14 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                l_ptr->fsm_msg_cnt = 0;
                                l_ptr->checkpoint = l_ptr->next_in_no;
                                if (tipc_bclink_acks_missing(l_ptr->owner)) {
-                                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-                                                                0, 0, 0, 0, 0);
+                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
+                                                            0, 0, 0, 0, 0);
                                        l_ptr->fsm_msg_cnt++;
                                }
                                link_set_timer(l_ptr, cont_intv);
                        } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
-                               tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-                                                        1, 0, 0, 0, 0);
+                               tipc_link_proto_xmit(l_ptr, STATE_MSG,
+                                                    1, 0, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv / 4);
                        } else {        /* Link has failed */
@@ -570,8 +586,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                                tipc_link_reset(l_ptr);
                                l_ptr->state = RESET_UNKNOWN;
                                l_ptr->fsm_msg_cnt = 0;
-                               tipc_link_send_proto_msg(l_ptr, RESET_MSG,
-                                                        0, 0, 0, 0, 0);
+                               tipc_link_proto_xmit(l_ptr, RESET_MSG,
+                                                    0, 0, 0, 0, 0);
                                l_ptr->fsm_msg_cnt++;
                                link_set_timer(l_ptr, cont_intv);
                        }
@@ -591,24 +607,25 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
-                               tipc_link_send_sync(l_ptr);
+                               tipc_link_sync_xmit(l_ptr);
                        link_set_timer(l_ptr, cont_intv);
                        break;
                case RESET_MSG:
                        l_ptr->state = RESET_RESET;
                        l_ptr->fsm_msg_cnt = 0;
-                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+                                            1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
                case STARTING_EVT:
-                       l_ptr->started = 1;
+                       l_ptr->flags |= LINK_STARTED;
                        /* fall through */
                case TIMEOUT_EVT:
-                       tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -626,16 +643,17 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        l_ptr->state = WORKING_WORKING;
                        l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
-                               tipc_link_send_sync(l_ptr);
+                               tipc_link_sync_xmit(l_ptr);
                        link_set_timer(l_ptr, cont_intv);
                        break;
                case RESET_MSG:
                        break;
                case TIMEOUT_EVT:
-                       tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+                                            0, 0, 0, 0, 0);
                        l_ptr->fsm_msg_cnt++;
                        link_set_timer(l_ptr, cont_intv);
                        break;
@@ -721,11 +739,11 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
 }
 
 /*
- * tipc_link_send_buf() is the 'full path' for messages, called from
- * inside TIPC when the 'fast path' in tipc_send_buf
+ * tipc_link_xmit() is the 'full path' for messages, called from
+ * inside TIPC when the 'fast path' in tipc_send_xmit
  * has failed, and from link_send()
  */
-int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        u32 size = msg_size(msg);
@@ -753,7 +771,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 
        /* Fragmentation needed ? */
        if (size > max_packet)
-               return link_send_long_buf(l_ptr, buf);
+               return tipc_link_frag_xmit(l_ptr, buf);
 
        /* Packet can be queued or sent. */
        if (likely(!link_congested(l_ptr))) {
@@ -797,11 +815,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
 }
 
 /*
- * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
- * not been selected yet, and the the owner node is not locked
+ * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
+ * has not been selected yet, and the the owner node is not locked
  * Called by TIPC internal users, e.g. the name distributor
  */
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
 {
        struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
@@ -813,7 +831,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
                tipc_node_lock(n_ptr);
                l_ptr = n_ptr->active_links[selector & 1];
                if (l_ptr)
-                       res = tipc_link_send_buf(l_ptr, buf);
+                       res = __tipc_link_xmit(l_ptr, buf);
                else
                        kfree_skb(buf);
                tipc_node_unlock(n_ptr);
@@ -825,14 +843,14 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
 }
 
 /*
- * tipc_link_send_sync - synchronize broadcast link endpoints.
+ * tipc_link_sync_xmit - synchronize broadcast link endpoints.
  *
  * Give a newly added peer node the sequence number where it should
  * start receiving and acking broadcast packets.
  *
  * Called with node locked
  */
-static void tipc_link_send_sync(struct tipc_link *l)
+static void tipc_link_sync_xmit(struct tipc_link *l)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -849,14 +867,14 @@ static void tipc_link_send_sync(struct tipc_link *l)
 }
 
 /*
- * tipc_link_recv_sync - synchronize broadcast link endpoints.
+ * tipc_link_sync_rcv - synchronize broadcast link endpoints.
  * Receive the sequence number where we should start receiving and
  * acking broadcast packets from a newly added peer node, and open
  * up for reception of such packets.
  *
  * Called with node locked
  */
-static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
 
@@ -866,7 +884,7 @@ static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
 }
 
 /*
- * tipc_link_send_names - send name table entries to new neighbor
+ * tipc_link_names_xmit - send name table entries to new neighbor
  *
  * Send routine for bulk delivery of name table messages when contact
  * with a new neighbor occurs. No link congestion checking is performed
@@ -874,7 +892,7 @@ static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
  * small enough not to require fragmentation.
  * Called without any locks held.
  */
-void tipc_link_send_names(struct list_head *message_list, u32 dest)
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
 {
        struct tipc_node *n_ptr;
        struct tipc_link *l_ptr;
@@ -909,13 +927,13 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
 }
 
 /*
- * link_send_buf_fast: Entry for data messages where the
+ * tipc_link_xmit_fast: Entry for data messages where the
  * destination link is known and the header is complete,
  * inclusive total message length. Very time critical.
  * Link is locked. Returns user data length.
  */
-static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
-                             u32 *used_max_pkt)
+static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
+                              u32 *used_max_pkt)
 {
        struct tipc_msg *msg = buf_msg(buf);
        int res = msg_data_sz(msg);
@@ -931,18 +949,18 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
                else
                        *used_max_pkt = l_ptr->max_pkt;
        }
-       return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
+       return __tipc_link_xmit(l_ptr, buf);  /* All other cases */
 }
 
 /*
- * tipc_link_send_sections_fast: Entry for messages where the
+ * tipc_link_iovec_xmit_fast: Entry for messages where the
  * destination processor is known and the header is complete,
  * except for total message length.
  * Returns user data length or errno.
  */
-int tipc_link_send_sections_fast(struct tipc_port *sender,
-                                struct iovec const *msg_sect,
-                                unsigned int len, u32 destaddr)
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+                             struct iovec const *msg_sect,
+                             unsigned int len, u32 destaddr)
 {
        struct tipc_msg *hdr = &sender->phdr;
        struct tipc_link *l_ptr;
@@ -968,8 +986,8 @@ again:
                l_ptr = node->active_links[selector];
                if (likely(l_ptr)) {
                        if (likely(buf)) {
-                               res = link_send_buf_fast(l_ptr, buf,
-                                                        &sender->max_pkt);
+                               res = tipc_link_xmit_fast(l_ptr, buf,
+                                                         &sender->max_pkt);
 exit:
                                tipc_node_unlock(node);
                                read_unlock_bh(&tipc_net_lock);
@@ -995,24 +1013,21 @@ exit:
                        if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
                                goto again;
 
-                       return link_send_sections_long(sender, msg_sect, len,
-                                                      destaddr);
+                       return tipc_link_iovec_long_xmit(sender, msg_sect,
+                                                        len, destaddr);
                }
                tipc_node_unlock(node);
        }
        read_unlock_bh(&tipc_net_lock);
 
        /* Couldn't find a link to the destination node */
-       if (buf)
-               return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
-       if (res >= 0)
-               return tipc_port_reject_sections(sender, hdr, msg_sect,
-                                                len, TIPC_ERR_NO_NODE);
-       return res;
+       kfree_skb(buf);
+       tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE);
+       return -ENETUNREACH;
 }
 
 /*
- * link_send_sections_long(): Entry for long messages where the
+ * tipc_link_iovec_long_xmit(): Entry for long messages where the
  * destination node is known and the header is complete,
  * inclusive total message length.
  * Link and bearer congestion status have been checked to be ok,
@@ -1025,9 +1040,9 @@ exit:
  *
  * Returns user data length or errno.
  */
-static int link_send_sections_long(struct tipc_port *sender,
-                                  struct iovec const *msg_sect,
-                                  unsigned int len, u32 destaddr)
+static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
+                                    struct iovec const *msg_sect,
+                                    unsigned int len, u32 destaddr)
 {
        struct tipc_link *l_ptr;
        struct tipc_node *node;
@@ -1146,8 +1161,9 @@ error:
        } else {
 reject:
                kfree_skb_list(buf_chain);
-               return tipc_port_reject_sections(sender, hdr, msg_sect,
-                                                len, TIPC_ERR_NO_NODE);
+               tipc_port_iovec_reject(sender, hdr, msg_sect, len,
+                                      TIPC_ERR_NO_NODE);
+               return -ENETUNREACH;
        }
 
        /* Append chain of fragments to send queue & send them */
@@ -1441,15 +1457,10 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                u32 seq_no;
                u32 ackd;
                u32 released = 0;
-               int type;
 
                head = head->next;
                buf->next = NULL;
 
-               /* Ensure bearer is still enabled */
-               if (unlikely(!b_ptr->active))
-                       goto discard;
-
                /* Ensure message is well-formed */
                if (unlikely(!link_recv_buf_validate(buf)))
                        goto discard;
@@ -1463,9 +1474,9 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
 
                if (unlikely(msg_non_seq(msg))) {
                        if (msg_user(msg) ==  LINK_CONFIG)
-                               tipc_disc_recv_msg(buf, b_ptr);
+                               tipc_disc_rcv(buf, b_ptr);
                        else
-                               tipc_bclink_recv_pkt(buf);
+                               tipc_bclink_rcv(buf);
                        continue;
                }
 
@@ -1489,7 +1500,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
                        msg_user(msg) == LINK_PROTOCOL &&
                        (msg_type(msg) == RESET_MSG ||
-                                       msg_type(msg) == ACTIVATE_MSG) &&
+                        msg_type(msg) == ACTIVATE_MSG) &&
                        !msg_redundant_link(msg))
                        n_ptr->block_setup &= ~WAIT_PEER_DOWN;
 
@@ -1508,7 +1519,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                while ((crs != l_ptr->next_out) &&
                       less_eq(buf_seqno(crs), ackd)) {
                        struct sk_buff *next = crs->next;
-
                        kfree_skb(crs);
                        crs = next;
                        released++;
@@ -1521,18 +1531,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                /* Try sending any messages link endpoint has pending */
                if (unlikely(l_ptr->next_out))
                        tipc_link_push_queue(l_ptr);
+
                if (unlikely(!list_empty(&l_ptr->waiting_ports)))
                        tipc_link_wakeup_ports(l_ptr, 0);
+
                if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
                        l_ptr->stats.sent_acks++;
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
 
-               /* Now (finally!) process the incoming message */
-protocol_check:
+               /* Process the incoming packet */
                if (unlikely(!link_working_working(l_ptr))) {
                        if (msg_user(msg) == LINK_PROTOCOL) {
-                               link_recv_proto_msg(l_ptr, buf);
+                               tipc_link_proto_rcv(l_ptr, buf);
                                head = link_insert_deferred_queue(l_ptr, head);
                                tipc_node_unlock(n_ptr);
                                continue;
@@ -1561,67 +1572,65 @@ protocol_check:
                l_ptr->next_in_no++;
                if (unlikely(l_ptr->oldest_deferred_in))
                        head = link_insert_deferred_queue(l_ptr, head);
-deliver:
-               if (likely(msg_isdata(msg))) {
-                       tipc_node_unlock(n_ptr);
-                       tipc_port_recv_msg(buf);
-                       continue;
+
+               /* Deliver packet/message to correct user: */
+               if (unlikely(msg_user(msg) ==  CHANGEOVER_PROTOCOL)) {
+                       if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
+                               tipc_node_unlock(n_ptr);
+                               continue;
+                       }
+                       msg = buf_msg(buf);
+               } else if (msg_user(msg) == MSG_FRAGMENTER) {
+                       int rc;
+
+                       l_ptr->stats.recv_fragments++;
+                       rc = tipc_link_frag_rcv(&l_ptr->reasm_head,
+                                               &l_ptr->reasm_tail,
+                                               &buf);
+                       if (rc == LINK_REASM_COMPLETE) {
+                               l_ptr->stats.recv_fragmented++;
+                               msg = buf_msg(buf);
+                       } else {
+                               if (rc == LINK_REASM_ERROR)
+                                       tipc_link_reset(l_ptr);
+                               tipc_node_unlock(n_ptr);
+                               continue;
+                       }
                }
+
                switch (msg_user(msg)) {
-                       int ret;
+               case TIPC_LOW_IMPORTANCE:
+               case TIPC_MEDIUM_IMPORTANCE:
+               case TIPC_HIGH_IMPORTANCE:
+               case TIPC_CRITICAL_IMPORTANCE:
+                       tipc_node_unlock(n_ptr);
+                       tipc_port_rcv(buf);
+                       continue;
                case MSG_BUNDLER:
                        l_ptr->stats.recv_bundles++;
                        l_ptr->stats.recv_bundled += msg_msgcnt(msg);
                        tipc_node_unlock(n_ptr);
-                       tipc_link_recv_bundle(buf);
+                       tipc_link_bundle_rcv(buf);
                        continue;
                case NAME_DISTRIBUTOR:
                        n_ptr->bclink.recv_permitted = true;
                        tipc_node_unlock(n_ptr);
-                       tipc_named_recv(buf);
-                       continue;
-               case BCAST_PROTOCOL:
-                       tipc_link_recv_sync(n_ptr, buf);
-                       tipc_node_unlock(n_ptr);
+                       tipc_named_rcv(buf);
                        continue;
                case CONN_MANAGER:
                        tipc_node_unlock(n_ptr);
-                       tipc_port_recv_proto_msg(buf);
+                       tipc_port_proto_rcv(buf);
                        continue;
-               case MSG_FRAGMENTER:
-                       l_ptr->stats.recv_fragments++;
-                       ret = tipc_link_recv_fragment(&l_ptr->reasm_head,
-                                                     &l_ptr->reasm_tail,
-                                                     &buf);
-                       if (ret == LINK_REASM_COMPLETE) {
-                               l_ptr->stats.recv_fragmented++;
-                               msg = buf_msg(buf);
-                               goto deliver;
-                       }
-                       if (ret == LINK_REASM_ERROR)
-                               tipc_link_reset(l_ptr);
-                       tipc_node_unlock(n_ptr);
-                       continue;
-               case CHANGEOVER_PROTOCOL:
-                       type = msg_type(msg);
-                       if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
-                               msg = buf_msg(buf);
-                               seq_no = msg_seqno(msg);
-                               if (type == ORIGINAL_MSG)
-                                       goto deliver;
-                               goto protocol_check;
-                       }
+               case BCAST_PROTOCOL:
+                       tipc_link_sync_rcv(n_ptr, buf);
                        break;
                default:
                        kfree_skb(buf);
-                       buf = NULL;
                        break;
                }
                tipc_node_unlock(n_ptr);
-               tipc_net_route_msg(buf);
                continue;
 unlock_discard:
-
                tipc_node_unlock(n_ptr);
 discard:
                kfree_skb(buf);
@@ -1688,7 +1697,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
        u32 seq_no = buf_seqno(buf);
 
        if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
-               link_recv_proto_msg(l_ptr, buf);
+               tipc_link_proto_rcv(l_ptr, buf);
                return;
        }
 
@@ -1711,7 +1720,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                l_ptr->stats.deferred_recv++;
                TIPC_SKB_CB(buf)->deferred = true;
                if ((l_ptr->deferred_inqueue_sz % 16) == 1)
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
        } else
                l_ptr->stats.duplicates++;
 }
@@ -1719,9 +1728,8 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
 /*
  * Send protocol message to the other endpoint.
  */
-void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
-                             int probe_msg, u32 gap, u32 tolerance,
-                             u32 priority, u32 ack_mtu)
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
+                         u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
 {
        struct sk_buff *buf = NULL;
        struct tipc_msg *msg = l_ptr->pmsg;
@@ -1820,7 +1828,7 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
  * Note that network plane id propagates through the network, and may
  * change at any time. The node with lowest address rules
  */
-static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
        u32 rec_gap = 0;
        u32 max_pkt_info;
@@ -1939,8 +1947,8 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
                                                      msg_last_bcast(msg));
 
                if (rec_gap || (msg_probe(msg))) {
-                       tipc_link_send_proto_msg(l_ptr, STATE_MSG,
-                                                0, rec_gap, 0, 0, max_pkt_ack);
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
+                                            0, max_pkt_ack);
                }
                if (msg_seq_gap(msg)) {
                        l_ptr->stats.recv_nacks++;
@@ -1979,7 +1987,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
        }
        skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
        skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
-       tipc_link_send_buf(tunnel, buf);
+       __tipc_link_xmit(tunnel, buf);
 }
 
 
@@ -2012,7 +2020,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
                if (buf) {
                        skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
                        msg_set_size(&tunnel_hdr, INT_H_SIZE);
-                       tipc_link_send_buf(tunnel, buf);
+                       __tipc_link_xmit(tunnel, buf);
                } else {
                        pr_warn("%sunable to send changeover msg\n",
                                link_co_err);
@@ -2046,7 +2054,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
        }
 }
 
-/* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
+/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
  * duplicate of the first link's send queue via the new link. This way, we
  * are guaranteed that currently queued packets from a socket are delivered
  * before future traffic from the same socket, even if this is using the
@@ -2055,7 +2063,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
  * and sequence order is preserved per sender/receiver socket pair.
  * Owner node is locked.
  */
-void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
                              struct tipc_link *tunnel)
 {
        struct sk_buff *iter;
@@ -2085,7 +2093,7 @@ void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
                skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
                skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
                                               length);
-               tipc_link_send_buf(tunnel, outbuf);
+               __tipc_link_xmit(tunnel, outbuf);
                if (!tipc_link_is_up(l_ptr))
                        return;
                iter = iter->next;
@@ -2112,89 +2120,114 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
        return eb;
 }
 
-/*  tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
- *  via other link as result of a failover (ORIGINAL_MSG) or
- *  a new active link (DUPLICATE_MSG). Failover packets are
- *  returned to the active link for delivery upwards.
+
+
+/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
+ * Owner node is locked.
+ */
+static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
+                             struct sk_buff *t_buf)
+{
+       struct sk_buff *buf;
+
+       if (!tipc_link_is_up(l_ptr))
+               return;
+
+       buf = buf_extract(t_buf, INT_H_SIZE);
+       if (buf == NULL) {
+               pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
+               return;
+       }
+
+       /* Add buffer to deferred queue, if applicable: */
+       link_handle_out_of_seq_msg(l_ptr, buf);
+}
+
+/*  tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
  *  Owner node is locked.
  */
-static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
-                               struct sk_buff **buf)
+static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
+                                             struct sk_buff *t_buf)
 {
-       struct sk_buff *tunnel_buf = *buf;
-       struct tipc_link *dest_link;
+       struct tipc_msg *t_msg = buf_msg(t_buf);
+       struct sk_buff *buf = NULL;
        struct tipc_msg *msg;
-       struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
-       u32 msg_typ = msg_type(tunnel_msg);
-       u32 msg_count = msg_msgcnt(tunnel_msg);
-       u32 bearer_id = msg_bearer_id(tunnel_msg);
 
-       if (bearer_id >= MAX_BEARERS)
-               goto exit;
-       dest_link = (*l_ptr)->owner->links[bearer_id];
-       if (!dest_link)
-               goto exit;
-       if (dest_link == *l_ptr) {
-               pr_err("Unexpected changeover message on link <%s>\n",
-                      (*l_ptr)->name);
-               goto exit;
-       }
-       *l_ptr = dest_link;
-       msg = msg_get_wrapped(tunnel_msg);
+       if (tipc_link_is_up(l_ptr))
+               tipc_link_reset(l_ptr);
 
-       if (msg_typ == DUPLICATE_MSG) {
-               if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
-                       goto exit;
-               *buf = buf_extract(tunnel_buf, INT_H_SIZE);
-               if (*buf == NULL) {
-                       pr_warn("%sduplicate msg dropped\n", link_co_err);
+       /* First failover packet? */
+       if (l_ptr->exp_msg_count == START_CHANGEOVER)
+               l_ptr->exp_msg_count = msg_msgcnt(t_msg);
+
+       /* Should there be an inner packet? */
+       if (l_ptr->exp_msg_count) {
+               l_ptr->exp_msg_count--;
+               buf = buf_extract(t_buf, INT_H_SIZE);
+               if (buf == NULL) {
+                       pr_warn("%sno inner failover pkt\n", link_co_err);
                        goto exit;
                }
-               kfree_skb(tunnel_buf);
-               return 1;
-       }
+               msg = buf_msg(buf);
 
-       /* First original message ?: */
-       if (tipc_link_is_up(dest_link)) {
-               pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
-                       dest_link->name);
-               tipc_link_reset(dest_link);
-               dest_link->exp_msg_count = msg_count;
-               if (!msg_count)
-                       goto exit;
-       } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
-               dest_link->exp_msg_count = msg_count;
-               if (!msg_count)
+               if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
+                       kfree_skb(buf);
+                       buf = NULL;
                        goto exit;
+               }
+               if (msg_user(msg) == MSG_FRAGMENTER) {
+                       l_ptr->stats.recv_fragments++;
+                       tipc_link_frag_rcv(&l_ptr->reasm_head,
+                                          &l_ptr->reasm_tail,
+                                          &buf);
+               }
+       }
+exit:
+       if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
+               tipc_node_detach_link(l_ptr->owner, l_ptr);
+               kfree(l_ptr);
        }
+       return buf;
+}
 
-       /* Receive original message */
-       if (dest_link->exp_msg_count == 0) {
-               pr_warn("%sgot too many tunnelled messages\n", link_co_err);
+/*  tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
+ *  via other link as result of a failover (ORIGINAL_MSG) or
+ *  a new active link (DUPLICATE_MSG). Failover packets are
+ *  returned to the active link for delivery upwards.
+ *  Owner node is locked.
+ */
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
+                               struct sk_buff **buf)
+{
+       struct sk_buff *t_buf = *buf;
+       struct tipc_link *l_ptr;
+       struct tipc_msg *t_msg = buf_msg(t_buf);
+       u32 bearer_id = msg_bearer_id(t_msg);
+
+       *buf = NULL;
+
+       if (bearer_id >= MAX_BEARERS)
                goto exit;
-       }
-       dest_link->exp_msg_count--;
-       if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
+
+       l_ptr = n_ptr->links[bearer_id];
+       if (!l_ptr)
                goto exit;
-       } else {
-               *buf = buf_extract(tunnel_buf, INT_H_SIZE);
-               if (*buf != NULL) {
-                       kfree_skb(tunnel_buf);
-                       return 1;
-               } else {
-                       pr_warn("%soriginal msg dropped\n", link_co_err);
-               }
-       }
+
+       if (msg_type(t_msg) == DUPLICATE_MSG)
+               tipc_link_dup_rcv(l_ptr, t_buf);
+       else if (msg_type(t_msg) == ORIGINAL_MSG)
+               *buf = tipc_link_failover_rcv(l_ptr, t_buf);
+       else
+               pr_warn("%sunknown tunnel pkt received\n", link_co_err);
 exit:
-       *buf = NULL;
-       kfree_skb(tunnel_buf);
-       return 0;
+       kfree_skb(t_buf);
+       return *buf != NULL;
 }
 
 /*
  *  Bundler functionality:
  */
-void tipc_link_recv_bundle(struct sk_buff *buf)
+void tipc_link_bundle_rcv(struct sk_buff *buf)
 {
        u32 msgcount = msg_msgcnt(buf_msg(buf));
        u32 pos = INT_H_SIZE;
@@ -2217,11 +2250,11 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
  */
 
 /*
- * link_send_long_buf: Entry for buffers needing fragmentation.
+ * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
  * The buffer is complete, inclusive total message length.
  * Returns user data length.
  */
-static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
 {
        struct sk_buff *buf_chain = NULL;
        struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
@@ -2284,12 +2317,11 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
        return dsz;
 }
 
-/*
- * tipc_link_recv_fragment(): Called with node lock on. Returns
+/* tipc_link_frag_rcv(): Called with node lock on. Returns
  * the reassembled buffer if message is complete.
  */
-int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
-                           struct sk_buff **fbuf)
+int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
+                      struct sk_buff **fbuf)
 {
        struct sk_buff *frag = *fbuf;
        struct tipc_msg *msg = buf_msg(frag);
@@ -2303,6 +2335,7 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
                        goto out_free;
                *head = frag;
                skb_frag_list_init(*head);
+               *fbuf = NULL;
                return 0;
        } else if (*head &&
                   skb_try_coalesce(*head, frag, &headstolen, &delta)) {
@@ -2322,10 +2355,12 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
                *tail = *head = NULL;
                return LINK_REASM_COMPLETE;
        }
+       *fbuf = NULL;
        return 0;
 out_free:
        pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
        kfree_skb(*fbuf);
+       *fbuf = NULL;
        return LINK_REASM_ERROR;
 }
 
@@ -2359,35 +2394,41 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
        l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
 }
 
-/**
- * link_find_link - locate link by name
- * @name: ptr to link name string
- * @node: ptr to area to be filled with ptr to associated node
- *
+/* tipc_link_find_owner - locate owner node of link by link's name
+ * @name: pointer to link name string
+ * @bearer_id: pointer to index in 'node->links' array where the link was found.
  * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
  * this also prevents link deletion.
  *
- * Returns pointer to link (or 0 if invalid link name).
+ * Returns pointer to node owning the link, or 0 if no matching link is found.
  */
-static struct tipc_link *link_find_link(const char *name,
-                                       struct tipc_node **node)
+static struct tipc_node *tipc_link_find_owner(const char *link_name,
+                                             unsigned int *bearer_id)
 {
        struct tipc_link *l_ptr;
        struct tipc_node *n_ptr;
+       struct tipc_node *found_node = 0;
        int i;
 
-       list_for_each_entry(n_ptr, &tipc_node_list, list) {
+       *bearer_id = 0;
+       rcu_read_lock();
+       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+               tipc_node_lock(n_ptr);
                for (i = 0; i < MAX_BEARERS; i++) {
                        l_ptr = n_ptr->links[i];
-                       if (l_ptr && !strcmp(l_ptr->name, name))
-                               goto found;
+                       if (l_ptr && !strcmp(l_ptr->name, link_name)) {
+                               *bearer_id = i;
+                               found_node = n_ptr;
+                               break;
+                       }
                }
+               tipc_node_unlock(n_ptr);
+               if (found_node)
+                       break;
        }
-       l_ptr = NULL;
-       n_ptr = NULL;
-found:
-       *node = n_ptr;
-       return l_ptr;
+       rcu_read_unlock();
+
+       return found_node;
 }
 
 /**
@@ -2429,32 +2470,33 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        struct tipc_link *l_ptr;
        struct tipc_bearer *b_ptr;
        struct tipc_media *m_ptr;
+       int bearer_id;
        int res = 0;
 
-       l_ptr = link_find_link(name, &node);
-       if (l_ptr) {
-               /*
-                * acquire node lock for tipc_link_send_proto_msg().
-                * see "TIPC locking policy" in net.c.
-                */
+       node = tipc_link_find_owner(name, &bearer_id);
+       if (node) {
                tipc_node_lock(node);
-               switch (cmd) {
-               case TIPC_CMD_SET_LINK_TOL:
-                       link_set_supervision_props(l_ptr, new_value);
-                       tipc_link_send_proto_msg(l_ptr,
-                               STATE_MSG, 0, 0, new_value, 0, 0);
-                       break;
-               case TIPC_CMD_SET_LINK_PRI:
-                       l_ptr->priority = new_value;
-                       tipc_link_send_proto_msg(l_ptr,
-                               STATE_MSG, 0, 0, 0, new_value, 0);
-                       break;
-               case TIPC_CMD_SET_LINK_WINDOW:
-                       tipc_link_set_queue_limits(l_ptr, new_value);
-                       break;
-               default:
-                       res = -EINVAL;
-                       break;
+               l_ptr = node->links[bearer_id];
+
+               if (l_ptr) {
+                       switch (cmd) {
+                       case TIPC_CMD_SET_LINK_TOL:
+                               link_set_supervision_props(l_ptr, new_value);
+                               tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+                                                    new_value, 0, 0);
+                               break;
+                       case TIPC_CMD_SET_LINK_PRI:
+                               l_ptr->priority = new_value;
+                               tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+                                                    0, new_value, 0);
+                               break;
+                       case TIPC_CMD_SET_LINK_WINDOW:
+                               tipc_link_set_queue_limits(l_ptr, new_value);
+                               break;
+                       default:
+                               res = -EINVAL;
+                               break;
+                       }
                }
                tipc_node_unlock(node);
                return res;
@@ -2549,6 +2591,7 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
        char *link_name;
        struct tipc_link *l_ptr;
        struct tipc_node *node;
+       unsigned int bearer_id;
 
        if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
                return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -2559,15 +2602,19 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
                        return tipc_cfg_reply_error_string("link not found");
                return tipc_cfg_reply_none();
        }
-
        read_lock_bh(&tipc_net_lock);
-       l_ptr = link_find_link(link_name, &node);
-       if (!l_ptr) {
+       node = tipc_link_find_owner(link_name, &bearer_id);
+       if (!node) {
                read_unlock_bh(&tipc_net_lock);
                return tipc_cfg_reply_error_string("link not found");
        }
-
        tipc_node_lock(node);
+       l_ptr = node->links[bearer_id];
+       if (!l_ptr) {
+               tipc_node_unlock(node);
+               read_unlock_bh(&tipc_net_lock);
+               return tipc_cfg_reply_error_string("link not found");
+       }
        link_reset_statistics(l_ptr);
        tipc_node_unlock(node);
        read_unlock_bh(&tipc_net_lock);
@@ -2597,18 +2644,27 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
        struct tipc_node *node;
        char *status;
        u32 profile_total = 0;
+       unsigned int bearer_id;
        int ret;
 
        if (!strcmp(name, tipc_bclink_name))
                return tipc_bclink_stats(buf, buf_size);
 
        read_lock_bh(&tipc_net_lock);
-       l = link_find_link(name, &node);
-       if (!l) {
+       node = tipc_link_find_owner(name, &bearer_id);
+       if (!node) {
                read_unlock_bh(&tipc_net_lock);
                return 0;
        }
        tipc_node_lock(node);
+
+       l = node->links[bearer_id];
+       if (!l) {
+               tipc_node_unlock(node);
+               read_unlock_bh(&tipc_net_lock);
+               return 0;
+       }
+
        s = &l->stats;
 
        if (tipc_link_is_active(l))
index 3b6aa65b608c119311d3ef25429fbce2764f9a46..8c0b49b5b2ee6b0751f248cf740254a33e424c6a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/link.h: Include file for TIPC link code
  *
- * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 1995-2006, 2013, Ericsson AB
  * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
 #include "msg.h"
 #include "node.h"
 
-/*
- * Link reassembly status codes
+/* Link reassembly status codes
  */
 #define LINK_REASM_ERROR       -1
 #define LINK_REASM_COMPLETE    1
 
-/*
- * Out-of-range value for link sequence numbers
+/* Out-of-range value for link sequence numbers
  */
 #define INVALID_LINK_SEQ 0x10000
 
-/*
- * Link states
+/* Link working states
  */
 #define WORKING_WORKING 560810u
 #define WORKING_UNKNOWN 560811u
 #define RESET_UNKNOWN   560812u
 #define RESET_RESET     560813u
 
-/*
- * Starting value for maximum packet size negotiation on unicast links
+/* Link endpoint execution states
+ */
+#define LINK_STARTED    0x0001
+#define LINK_STOPPED    0x0002
+
+/* Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
  */
 #define MAX_PKT_DEFAULT 1500
@@ -102,8 +103,7 @@ struct tipc_stats {
  * @media_addr: media address to use when sending messages over link
  * @timer: link timer
  * @owner: pointer to peer node
- * @link_list: adjacent links in bearer's list of links
- * @started: indicates if link has been started
+ * @flags: execution state flags for link endpoint instance
  * @checkpoint: reference point for triggering link continuity checking
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
@@ -149,10 +149,9 @@ struct tipc_link {
        struct tipc_media_addr media_addr;
        struct timer_list timer;
        struct tipc_node *owner;
-       struct list_head link_list;
 
        /* Management and link supervision data */
-       int started;
+       unsigned int flags;
        u32 checkpoint;
        u32 peer_session;
        u32 peer_bearer_id;
@@ -215,10 +214,9 @@ struct tipc_port;
 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                              struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *l_ptr);
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down);
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
-                             struct tipc_link *dest);
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
 int tipc_link_is_up(struct tipc_link *l_ptr);
 int tipc_link_is_active(struct tipc_link *l_ptr);
@@ -231,23 +229,24 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
                                          int req_tlv_space);
 void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
-void tipc_link_send_names(struct list_head *message_list, u32 dest);
+void tipc_link_reset_list(unsigned int bearer_id);
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest);
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_send_sections_fast(struct tipc_port *sender,
-                                struct iovec const *msg_sect,
-                                unsigned int len, u32 destnode);
-void tipc_link_recv_bundle(struct sk_buff *buf);
-int  tipc_link_recv_fragment(struct sk_buff **reasm_head,
-                            struct sk_buff **reasm_tail,
-                            struct sk_buff **fbuf);
-void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob,
-                             u32 gap, u32 tolerance, u32 priority,
-                             u32 acked_mtu);
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+                             struct iovec const *msg_sect,
+                             unsigned int len, u32 destnode);
+void tipc_link_bundle_rcv(struct sk_buff *buf);
+int tipc_link_frag_rcv(struct sk_buff **reasm_head,
+                      struct sk_buff **reasm_tail,
+                      struct sk_buff **fbuf);
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
+                         u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
 void tipc_link_push_queue(struct tipc_link *l_ptr);
 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
-                  struct sk_buff *buf);
+                       struct sk_buff *buf);
 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
 void tipc_link_retransmit(struct tipc_link *l_ptr,
index e0d08055754ea656ab1afdd5dee59a51f16bb163..aff8041dc1573e3fea829e286e2675e322350b2e 100644 (file)
@@ -131,16 +131,24 @@ static void named_cluster_distribute(struct sk_buff *buf)
 {
        struct sk_buff *buf_copy;
        struct tipc_node *n_ptr;
+       struct tipc_link *l_ptr;
 
-       list_for_each_entry(n_ptr, &tipc_node_list, list) {
-               if (tipc_node_active_links(n_ptr)) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
+               spin_lock_bh(&n_ptr->lock);
+               l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+               if (l_ptr) {
                        buf_copy = skb_copy(buf, GFP_ATOMIC);
-                       if (!buf_copy)
+                       if (!buf_copy) {
+                               spin_unlock_bh(&n_ptr->lock);
                                break;
+                       }
                        msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
-                       tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+                       __tipc_link_xmit(l_ptr, buf_copy);
                }
+               spin_unlock_bh(&n_ptr->lock);
        }
+       rcu_read_unlock();
 
        kfree_skb(buf);
 }
@@ -262,7 +270,7 @@ void tipc_named_node_up(unsigned long nodearg)
        named_distribute(&message_list, node, &publ_zone, max_item_buf);
        read_unlock_bh(&tipc_nametbl_lock);
 
-       tipc_link_send_names(&message_list, node);
+       tipc_link_names_xmit(&message_list, node);
 }
 
 /**
@@ -293,9 +301,9 @@ static void named_purge_publ(struct publication *publ)
 }
 
 /**
- * tipc_named_recv - process name table update message sent by another node
+ * tipc_named_rcv - process name table update message sent by another node
  */
-void tipc_named_recv(struct sk_buff *buf)
+void tipc_named_rcv(struct sk_buff *buf)
 {
        struct publication *publ;
        struct tipc_msg *msg = buf_msg(buf);
index 1e41bdd4f2553a13a8a8f6ebeb75604ab9997fdb..9b312ccfd43e7da41bcab4ca33d5f0f4d5be86cf 100644 (file)
@@ -42,7 +42,7 @@
 void tipc_named_publish(struct publication *publ);
 void tipc_named_withdraw(struct publication *publ);
 void tipc_named_node_up(unsigned long node);
-void tipc_named_recv(struct sk_buff *buf);
+void tipc_named_rcv(struct sk_buff *buf);
 void tipc_named_reinit(void);
 
 #endif
index 7d305ecc09c2bf053376bb147c5cf917113022ae..0374a817631e47fc87a6f9d9ec04c34009063e53 100644 (file)
@@ -146,19 +146,19 @@ void tipc_net_route_msg(struct sk_buff *buf)
        if (tipc_in_scope(dnode, tipc_own_addr)) {
                if (msg_isdata(msg)) {
                        if (msg_mcast(msg))
-                               tipc_port_recv_mcast(buf, NULL);
+                               tipc_port_mcast_rcv(buf, NULL);
                        else if (msg_destport(msg))
-                               tipc_port_recv_msg(buf);
+                               tipc_port_rcv(buf);
                        else
                                net_route_named_msg(buf);
                        return;
                }
                switch (msg_user(msg)) {
                case NAME_DISTRIBUTOR:
-                       tipc_named_recv(buf);
+                       tipc_named_rcv(buf);
                        break;
                case CONN_MANAGER:
-                       tipc_port_recv_proto_msg(buf);
+                       tipc_port_proto_rcv(buf);
                        break;
                default:
                        kfree_skb(buf);
@@ -168,7 +168,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
 
        /* Handle message for another node */
        skb_trim(buf, msg_size(msg));
-       tipc_link_send(buf, dnode, msg_link_selector(msg));
+       tipc_link_xmit(buf, dnode, msg_link_selector(msg));
 }
 
 void tipc_net_start(u32 addr)
@@ -182,8 +182,6 @@ void tipc_net_start(u32 addr)
        tipc_bclink_init();
        write_unlock_bh(&tipc_net_lock);
 
-       tipc_cfg_reinit();
-
        pr_info("Started in network mode\n");
        pr_info("Own node address %s, network identity %u\n",
                tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
@@ -191,15 +189,14 @@ void tipc_net_start(u32 addr)
 
 void tipc_net_stop(void)
 {
-       struct tipc_node *node, *t_node;
-
        if (!tipc_own_addr)
                return;
+
        write_lock_bh(&tipc_net_lock);
        tipc_bearer_stop();
        tipc_bclink_stop();
-       list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
-               tipc_node_delete(node);
+       tipc_node_stop();
        write_unlock_bh(&tipc_net_lock);
+
        pr_info("Left network mode\n");
 }
index efe4d41bf11bacb258e3096dc4619e97828c2904..1d3a4999a70ff96a751f3908d0d8e274af63a962 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/node.c: TIPC node management routines
  *
  * Copyright (c) 2000-2006, 2012 Ericsson AB
- * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
+ * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
 
-static DEFINE_SPINLOCK(node_create_lock);
-
 static struct hlist_head node_htable[NODE_HTABLE_SIZE];
 LIST_HEAD(tipc_node_list);
 static u32 tipc_num_nodes;
-
-static atomic_t tipc_num_links = ATOMIC_INIT(0);
+static u32 tipc_num_links;
+static DEFINE_SPINLOCK(node_list_lock);
 
 /*
  * A trivial power-of-two bitmask technique is used for speed, since this
@@ -73,37 +71,26 @@ struct tipc_node *tipc_node_find(u32 addr)
        if (unlikely(!in_own_cluster_exact(addr)))
                return NULL;
 
-       hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) {
-               if (node->addr == addr)
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
+               if (node->addr == addr) {
+                       rcu_read_unlock();
                        return node;
+               }
        }
+       rcu_read_unlock();
        return NULL;
 }
 
-/**
- * tipc_node_create - create neighboring node
- *
- * Currently, this routine is called by neighbor discovery code, which holds
- * net_lock for reading only.  We must take node_create_lock to ensure a node
- * isn't created twice if two different bearers discover the node at the same
- * time.  (It would be preferable to switch to holding net_lock in write mode,
- * but this is a non-trivial change.)
- */
 struct tipc_node *tipc_node_create(u32 addr)
 {
        struct tipc_node *n_ptr, *temp_node;
 
-       spin_lock_bh(&node_create_lock);
-
-       n_ptr = tipc_node_find(addr);
-       if (n_ptr) {
-               spin_unlock_bh(&node_create_lock);
-               return n_ptr;
-       }
+       spin_lock_bh(&node_list_lock);
 
        n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
        if (!n_ptr) {
-               spin_unlock_bh(&node_create_lock);
+               spin_unlock_bh(&node_list_lock);
                pr_warn("Node creation failed, no memory\n");
                return NULL;
        }
@@ -114,31 +101,41 @@ struct tipc_node *tipc_node_create(u32 addr)
        INIT_LIST_HEAD(&n_ptr->list);
        INIT_LIST_HEAD(&n_ptr->nsub);
 
-       hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
+       hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
 
-       list_for_each_entry(temp_node, &tipc_node_list, list) {
+       list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
                if (n_ptr->addr < temp_node->addr)
                        break;
        }
-       list_add_tail(&n_ptr->list, &temp_node->list);
+       list_add_tail_rcu(&n_ptr->list, &temp_node->list);
        n_ptr->block_setup = WAIT_PEER_DOWN;
        n_ptr->signature = INVALID_NODE_SIG;
 
        tipc_num_nodes++;
 
-       spin_unlock_bh(&node_create_lock);
+       spin_unlock_bh(&node_list_lock);
        return n_ptr;
 }
 
-void tipc_node_delete(struct tipc_node *n_ptr)
+static void tipc_node_delete(struct tipc_node *n_ptr)
 {
-       list_del(&n_ptr->list);
-       hlist_del(&n_ptr->hash);
-       kfree(n_ptr);
+       list_del_rcu(&n_ptr->list);
+       hlist_del_rcu(&n_ptr->hash);
+       kfree_rcu(n_ptr, rcu);
 
        tipc_num_nodes--;
 }
 
+void tipc_node_stop(void)
+{
+       struct tipc_node *node, *t_node;
+
+       spin_lock_bh(&node_list_lock);
+       list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
+               tipc_node_delete(node);
+       spin_unlock_bh(&node_list_lock);
+}
+
 /**
  * tipc_node_link_up - handle addition of link
  *
@@ -162,7 +159,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
                pr_info("New link <%s> becomes standby\n", l_ptr->name);
                return;
        }
-       tipc_link_dup_send_queue(active[0], l_ptr);
+       tipc_link_dup_queue_xmit(active[0], l_ptr);
        if (l_ptr->priority == active[0]->priority) {
                active[0] = l_ptr;
                return;
@@ -243,15 +240,25 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
        n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
-       atomic_inc(&tipc_num_links);
+       spin_lock_bh(&node_list_lock);
+       tipc_num_links++;
+       spin_unlock_bh(&node_list_lock);
        n_ptr->link_cnt++;
 }
 
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
 {
-       n_ptr->links[l_ptr->b_ptr->identity] = NULL;
-       atomic_dec(&tipc_num_links);
-       n_ptr->link_cnt--;
+       int i;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (l_ptr != n_ptr->links[i])
+                       continue;
+               n_ptr->links[i] = NULL;
+               spin_lock_bh(&node_list_lock);
+               tipc_num_links--;
+               spin_unlock_bh(&node_list_lock);
+               n_ptr->link_cnt--;
+       }
 }
 
 static void node_established_contact(struct tipc_node *n_ptr)
@@ -335,27 +342,28 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
                return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
                                                   " (network address)");
 
-       read_lock_bh(&tipc_net_lock);
+       spin_lock_bh(&node_list_lock);
        if (!tipc_num_nodes) {
-               read_unlock_bh(&tipc_net_lock);
+               spin_unlock_bh(&node_list_lock);
                return tipc_cfg_reply_none();
        }
 
        /* For now, get space for all other nodes */
        payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
        if (payload_size > 32768u) {
-               read_unlock_bh(&tipc_net_lock);
+               spin_unlock_bh(&node_list_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (too many nodes)");
        }
+       spin_unlock_bh(&node_list_lock);
+
        buf = tipc_cfg_reply_alloc(payload_size);
-       if (!buf) {
-               read_unlock_bh(&tipc_net_lock);
+       if (!buf)
                return NULL;
-       }
 
        /* Add TLVs for all nodes in scope */
-       list_for_each_entry(n_ptr, &tipc_node_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
                if (!tipc_in_scope(domain, n_ptr->addr))
                        continue;
                node_info.addr = htonl(n_ptr->addr);
@@ -363,8 +371,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
                tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
                                    &node_info, sizeof(node_info));
        }
-
-       read_unlock_bh(&tipc_net_lock);
+       rcu_read_unlock();
        return buf;
 }
 
@@ -387,21 +394,19 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
        if (!tipc_own_addr)
                return tipc_cfg_reply_none();
 
-       read_lock_bh(&tipc_net_lock);
-
+       spin_lock_bh(&node_list_lock);
        /* Get space for all unicast links + broadcast link */
-       payload_size = TLV_SPACE(sizeof(link_info)) *
-               (atomic_read(&tipc_num_links) + 1);
+       payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
        if (payload_size > 32768u) {
-               read_unlock_bh(&tipc_net_lock);
+               spin_unlock_bh(&node_list_lock);
                return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
                                                   " (too many links)");
        }
+       spin_unlock_bh(&node_list_lock);
+
        buf = tipc_cfg_reply_alloc(payload_size);
-       if (!buf) {
-               read_unlock_bh(&tipc_net_lock);
+       if (!buf)
                return NULL;
-       }
 
        /* Add TLV for broadcast link */
        link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
@@ -410,7 +415,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
        tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
 
        /* Add TLVs for any other links in scope */
-       list_for_each_entry(n_ptr, &tipc_node_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
                u32 i;
 
                if (!tipc_in_scope(domain, n_ptr->addr))
@@ -427,7 +433,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
                }
                tipc_node_unlock(n_ptr);
        }
-
-       read_unlock_bh(&tipc_net_lock);
+       rcu_read_unlock();
        return buf;
 }
index 63e2e8ead2fe5d08d50a4d26f0337b79a9961c8b..7cbb8cec1a932f881cd636a71edb0342070530ae 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/node.h: Include file for TIPC node management routines
  *
  * Copyright (c) 2000-2006, Ericsson AB
- * Copyright (c) 2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2005, 2010-2014, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,7 @@
  * @link_cnt: number of links to node
  * @signature: node instance identifier
  * @bclink: broadcast-related info
+ * @rcu: rcu struct for tipc_node
  *    @acked: sequence # of last outbound b'cast message acknowledged by node
  *    @last_in: sequence # of last in-sequence b'cast message received from node
  *    @last_sent: sequence # of last b'cast message sent by node
@@ -89,6 +90,7 @@ struct tipc_node {
        int working_links;
        int block_setup;
        u32 signature;
+       struct rcu_head rcu;
        struct {
                u32 acked;
                u32 last_in;
@@ -107,7 +109,7 @@ extern struct list_head tipc_node_list;
 
 struct tipc_node *tipc_node_find(u32 addr);
 struct tipc_node *tipc_node_create(u32 addr);
-void tipc_node_delete(struct tipc_node *n_ptr);
+void tipc_node_stop(void);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
index b742b2654525e2b4c7bd7974c0b7ae6f0356156f..5c14c7801ee65095d809d502cab9f78d33d51e5e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/port.c: TIPC port code
  *
- * Copyright (c) 1992-2007, Ericsson AB
+ * Copyright (c) 1992-2007, 2014, Ericsson AB
  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
  * All rights reserved.
  *
@@ -38,6 +38,7 @@
 #include "config.h"
 #include "port.h"
 #include "name_table.h"
+#include "socket.h"
 
 /* Connection management: */
 #define PROBING_INTERVAL 3600000       /* [ms] => 1 h */
@@ -54,17 +55,6 @@ static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err);
 static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
 static void port_timeout(unsigned long ref);
 
-
-static u32 port_peernode(struct tipc_port *p_ptr)
-{
-       return msg_destnode(&p_ptr->phdr);
-}
-
-static u32 port_peerport(struct tipc_port *p_ptr)
-{
-       return msg_destport(&p_ptr->phdr);
-}
-
 /**
  * tipc_port_peer_msg - verify message was sent by connected port's peer
  *
@@ -76,33 +66,32 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
        u32 peernode;
        u32 orignode;
 
-       if (msg_origport(msg) != port_peerport(p_ptr))
+       if (msg_origport(msg) != tipc_port_peerport(p_ptr))
                return 0;
 
        orignode = msg_orignode(msg);
-       peernode = port_peernode(p_ptr);
+       peernode = tipc_port_peernode(p_ptr);
        return (orignode == peernode) ||
                (!orignode && (peernode == tipc_own_addr)) ||
                (!peernode && (orignode == tipc_own_addr));
 }
 
 /**
- * tipc_multicast - send a multicast message to local and remote destinations
+ * tipc_port_mcast_xmit - send a multicast message to local and remote
+ * destinations
  */
-int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
-                  struct iovec const *msg_sect, unsigned int len)
+int tipc_port_mcast_xmit(struct tipc_port *oport,
+                        struct tipc_name_seq const *seq,
+                        struct iovec const *msg_sect,
+                        unsigned int len)
 {
        struct tipc_msg *hdr;
        struct sk_buff *buf;
        struct sk_buff *ibuf = NULL;
        struct tipc_port_list dports = {0, NULL, };
-       struct tipc_port *oport = tipc_port_deref(ref);
        int ext_targets;
        int res;
 
-       if (unlikely(!oport))
-               return -EINVAL;
-
        /* Create multicast message */
        hdr = &oport->phdr;
        msg_set_type(hdr, TIPC_MCAST_MSG);
@@ -131,7 +120,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
                                return -ENOMEM;
                        }
                }
-               res = tipc_bclink_send_msg(buf);
+               res = tipc_bclink_xmit(buf);
                if ((res < 0) && (dports.count != 0))
                        kfree_skb(ibuf);
        } else {
@@ -140,7 +129,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
 
        if (res >= 0) {
                if (ibuf)
-                       tipc_port_recv_mcast(ibuf, &dports);
+                       tipc_port_mcast_rcv(ibuf, &dports);
        } else {
                tipc_port_list_free(&dports);
        }
@@ -148,11 +137,11 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
 }
 
 /**
- * tipc_port_recv_mcast - deliver multicast message to all destination ports
+ * tipc_port_mcast_rcv - deliver multicast message to all destination ports
  *
  * If there is no port list, perform a lookup to create one
  */
-void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
 {
        struct tipc_msg *msg;
        struct tipc_port_list dports = {0, NULL, };
@@ -176,7 +165,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
                msg_set_destnode(msg, tipc_own_addr);
                if (dp->count == 1) {
                        msg_set_destport(msg, dp->ports[0]);
-                       tipc_port_recv_msg(buf);
+                       tipc_port_rcv(buf);
                        tipc_port_list_free(dp);
                        return;
                }
@@ -191,7 +180,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
                        if ((index == 0) && (cnt != 0))
                                item = item->next;
                        msg_set_destport(buf_msg(b), item->ports[index]);
-                       tipc_port_recv_msg(b);
+                       tipc_port_rcv(b);
                }
        }
 exit:
@@ -199,40 +188,32 @@ exit:
        tipc_port_list_free(dp);
 }
 
-/**
- * tipc_createport - create a generic TIPC port
+
+void tipc_port_wakeup(struct tipc_port *port)
+{
+       tipc_sock_wakeup(tipc_port_to_sock(port));
+}
+
+/* tipc_port_init - intiate TIPC port and lock it
  *
- * Returns pointer to (locked) TIPC port, or NULL if unable to create it
+ * Returns obtained reference if initialization is successful, zero otherwise
  */
-struct tipc_port *tipc_createport(struct sock *sk,
-                                 u32 (*dispatcher)(struct tipc_port *,
-                                 struct sk_buff *),
-                                 void (*wakeup)(struct tipc_port *),
-                                 const u32 importance)
+u32 tipc_port_init(struct tipc_port *p_ptr,
+                  const unsigned int importance)
 {
-       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        u32 ref;
 
-       p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
-       if (!p_ptr) {
-               pr_warn("Port creation failed, no memory\n");
-               return NULL;
-       }
        ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
        if (!ref) {
-               pr_warn("Port creation failed, ref. table exhausted\n");
-               kfree(p_ptr);
-               return NULL;
+               pr_warn("Port registration failed, ref. table exhausted\n");
+               return 0;
        }
 
-       p_ptr->sk = sk;
        p_ptr->max_pkt = MAX_PKT_DEFAULT;
        p_ptr->ref = ref;
        INIT_LIST_HEAD(&p_ptr->wait_list);
        INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
-       p_ptr->dispatcher = dispatcher;
-       p_ptr->wakeup = wakeup;
        k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
        INIT_LIST_HEAD(&p_ptr->publications);
        INIT_LIST_HEAD(&p_ptr->port_list);
@@ -248,10 +229,10 @@ struct tipc_port *tipc_createport(struct sock *sk,
        msg_set_origport(msg, ref);
        list_add_tail(&p_ptr->port_list, &ports);
        spin_unlock_bh(&tipc_port_list_lock);
-       return p_ptr;
+       return ref;
 }
 
-int tipc_deleteport(struct tipc_port *p_ptr)
+void tipc_port_destroy(struct tipc_port *p_ptr)
 {
        struct sk_buff *buf = NULL;
 
@@ -272,67 +253,7 @@ int tipc_deleteport(struct tipc_port *p_ptr)
        list_del(&p_ptr->wait_list);
        spin_unlock_bh(&tipc_port_list_lock);
        k_term_timer(&p_ptr->timer);
-       kfree(p_ptr);
        tipc_net_route_msg(buf);
-       return 0;
-}
-
-static int port_unreliable(struct tipc_port *p_ptr)
-{
-       return msg_src_droppable(&p_ptr->phdr);
-}
-
-int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
-{
-       struct tipc_port *p_ptr;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       *isunreliable = port_unreliable(p_ptr);
-       tipc_port_unlock(p_ptr);
-       return 0;
-}
-
-int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
-{
-       struct tipc_port *p_ptr;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       msg_set_src_droppable(&p_ptr->phdr, (isunreliable != 0));
-       tipc_port_unlock(p_ptr);
-       return 0;
-}
-
-static int port_unreturnable(struct tipc_port *p_ptr)
-{
-       return msg_dest_droppable(&p_ptr->phdr);
-}
-
-int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
-{
-       struct tipc_port *p_ptr;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       *isunrejectable = port_unreturnable(p_ptr);
-       tipc_port_unlock(p_ptr);
-       return 0;
-}
-
-int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
-{
-       struct tipc_port *p_ptr;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       msg_set_dest_droppable(&p_ptr->phdr, (isunrejectable != 0));
-       tipc_port_unlock(p_ptr);
-       return 0;
 }
 
 /*
@@ -350,8 +271,8 @@ static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr,
        if (buf) {
                msg = buf_msg(buf);
                tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE,
-                             port_peernode(p_ptr));
-               msg_set_destport(msg, port_peerport(p_ptr));
+                             tipc_port_peernode(p_ptr));
+               msg_set_destport(msg, tipc_port_peerport(p_ptr));
                msg_set_origport(msg, p_ptr->ref);
                msg_set_msgcnt(msg, ack);
        }
@@ -422,17 +343,17 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
        /* send returned message & dispose of rejected message */
        src_node = msg_prevnode(msg);
        if (in_own_node(src_node))
-               tipc_port_recv_msg(rbuf);
+               tipc_port_rcv(rbuf);
        else
-               tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
+               tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
 exit:
        kfree_skb(buf);
        return data_sz;
 }
 
-int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                             struct iovec const *msg_sect, unsigned int len,
-                             int err)
+int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
+                          struct iovec const *msg_sect, unsigned int len,
+                          int err)
 {
        struct sk_buff *buf;
        int res;
@@ -519,7 +440,7 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 er
        return buf;
 }
 
-void tipc_port_recv_proto_msg(struct sk_buff *buf)
+void tipc_port_proto_rcv(struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
        struct tipc_port *p_ptr;
@@ -547,13 +468,12 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
        /* Process protocol message sent by peer */
        switch (msg_type(msg)) {
        case CONN_ACK:
-               wakeable = tipc_port_congested(p_ptr) && p_ptr->congested &&
-                       p_ptr->wakeup;
+               wakeable = tipc_port_congested(p_ptr) && p_ptr->congested;
                p_ptr->acked += msg_msgcnt(msg);
                if (!tipc_port_congested(p_ptr)) {
                        p_ptr->congested = 0;
                        if (wakeable)
-                               p_ptr->wakeup(p_ptr);
+                               tipc_port_wakeup(p_ptr);
                }
                break;
        case CONN_PROBE:
@@ -584,8 +504,8 @@ static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
                ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref);
 
        if (p_ptr->connected) {
-               u32 dport = port_peerport(p_ptr);
-               u32 destnode = port_peernode(p_ptr);
+               u32 dport = tipc_port_peerport(p_ptr);
+               u32 destnode = tipc_port_peernode(p_ptr);
 
                ret += tipc_snprintf(buf + ret, len - ret,
                                     " connected to <%u.%u.%u:%u>",
@@ -673,34 +593,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
        tipc_net_route_msg(buf);
 }
 
-int tipc_portimportance(u32 ref, unsigned int *importance)
-{
-       struct tipc_port *p_ptr;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       *importance = (unsigned int)msg_importance(&p_ptr->phdr);
-       tipc_port_unlock(p_ptr);
-       return 0;
-}
-
-int tipc_set_portimportance(u32 ref, unsigned int imp)
-{
-       struct tipc_port *p_ptr;
-
-       if (imp > TIPC_CRITICAL_IMPORTANCE)
-               return -EINVAL;
-
-       p_ptr = tipc_port_lock(ref);
-       if (!p_ptr)
-               return -EINVAL;
-       msg_set_importance(&p_ptr->phdr, (u32)imp);
-       tipc_port_unlock(p_ptr);
-       return 0;
-}
-
-
 int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
                 struct tipc_name_seq const *seq)
 {
@@ -760,7 +652,7 @@ int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
        return res;
 }
 
-int tipc_connect(u32 ref, struct tipc_portid const *peer)
+int tipc_port_connect(u32 ref, struct tipc_portid const *peer)
 {
        struct tipc_port *p_ptr;
        int res;
@@ -768,17 +660,17 @@ int tipc_connect(u32 ref, struct tipc_portid const *peer)
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       res = __tipc_connect(ref, p_ptr, peer);
+       res = __tipc_port_connect(ref, p_ptr, peer);
        tipc_port_unlock(p_ptr);
        return res;
 }
 
 /*
- * __tipc_connect - connect to a remote peer
+ * __tipc_port_connect - connect to a remote peer
  *
  * Port must be locked.
  */
-int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
                        struct tipc_portid const *peer)
 {
        struct tipc_msg *msg;
@@ -815,7 +707,7 @@ exit:
  *
  * Port must be locked.
  */
-int __tipc_disconnect(struct tipc_port *tp_ptr)
+int __tipc_port_disconnect(struct tipc_port *tp_ptr)
 {
        if (tp_ptr->connected) {
                tp_ptr->connected = 0;
@@ -828,10 +720,10 @@ int __tipc_disconnect(struct tipc_port *tp_ptr)
 }
 
 /*
- * tipc_disconnect(): Disconnect port form peer.
+ * tipc_port_disconnect(): Disconnect port form peer.
  *                    This is a node local operation.
  */
-int tipc_disconnect(u32 ref)
+int tipc_port_disconnect(u32 ref)
 {
        struct tipc_port *p_ptr;
        int res;
@@ -839,15 +731,15 @@ int tipc_disconnect(u32 ref)
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return -EINVAL;
-       res = __tipc_disconnect(p_ptr);
+       res = __tipc_port_disconnect(p_ptr);
        tipc_port_unlock(p_ptr);
        return res;
 }
 
 /*
- * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
+ * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect
  */
-int tipc_shutdown(u32 ref)
+int tipc_port_shutdown(u32 ref)
 {
        struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
@@ -859,13 +751,13 @@ int tipc_shutdown(u32 ref)
        buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
        tipc_port_unlock(p_ptr);
        tipc_net_route_msg(buf);
-       return tipc_disconnect(ref);
+       return tipc_port_disconnect(ref);
 }
 
 /**
- * tipc_port_recv_msg - receive message from lower layer and deliver to port user
+ * tipc_port_rcv - receive message from lower layer and deliver to port user
  */
-int tipc_port_recv_msg(struct sk_buff *buf)
+int tipc_port_rcv(struct sk_buff *buf)
 {
        struct tipc_port *p_ptr;
        struct tipc_msg *msg = buf_msg(buf);
@@ -882,7 +774,7 @@ int tipc_port_recv_msg(struct sk_buff *buf)
        /* validate destination & pass to port, otherwise reject message */
        p_ptr = tipc_port_lock(destport);
        if (likely(p_ptr)) {
-               err = p_ptr->dispatcher(p_ptr, buf);
+               err = tipc_sk_rcv(&tipc_port_to_sock(p_ptr)->sk, buf);
                tipc_port_unlock(p_ptr);
                if (likely(!err))
                        return dsz;
@@ -894,43 +786,43 @@ int tipc_port_recv_msg(struct sk_buff *buf)
 }
 
 /*
- *  tipc_port_recv_sections(): Concatenate and deliver sectioned
- *                        message for this node.
+ *  tipc_port_iovec_rcv: Concatenate and deliver sectioned
+ *                       message for this node.
  */
-static int tipc_port_recv_sections(struct tipc_port *sender,
-                                  struct iovec const *msg_sect,
-                                  unsigned int len)
+static int tipc_port_iovec_rcv(struct tipc_port *sender,
+                              struct iovec const *msg_sect,
+                              unsigned int len)
 {
        struct sk_buff *buf;
        int res;
 
        res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (likely(buf))
-               tipc_port_recv_msg(buf);
+               tipc_port_rcv(buf);
        return res;
 }
 
 /**
  * tipc_send - send message sections on connection
  */
-int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
+int tipc_send(struct tipc_port *p_ptr,
+             struct iovec const *msg_sect,
+             unsigned int len)
 {
-       struct tipc_port *p_ptr;
        u32 destnode;
        int res;
 
-       p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || !p_ptr->connected)
+       if (!p_ptr->connected)
                return -EINVAL;
 
        p_ptr->congested = 1;
        if (!tipc_port_congested(p_ptr)) {
-               destnode = port_peernode(p_ptr);
+               destnode = tipc_port_peernode(p_ptr);
                if (likely(!in_own_node(destnode)))
-                       res = tipc_link_send_sections_fast(p_ptr, msg_sect,
-                                                          len, destnode);
+                       res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+                                                       destnode);
                else
-                       res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+                       res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
 
                if (likely(res != -ELINKCONG)) {
                        p_ptr->congested = 0;
@@ -939,7 +831,7 @@ int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
                        return res;
                }
        }
-       if (port_unreliable(p_ptr)) {
+       if (tipc_port_unreliable(p_ptr)) {
                p_ptr->congested = 0;
                return len;
        }
@@ -949,17 +841,18 @@ int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
 /**
  * tipc_send2name - send message sections to port name
  */
-int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
-                  struct iovec const *msg_sect, unsigned int len)
+int tipc_send2name(struct tipc_port *p_ptr,
+                  struct tipc_name const *name,
+                  unsigned int domain,
+                  struct iovec const *msg_sect,
+                  unsigned int len)
 {
-       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        u32 destnode = domain;
        u32 destport;
        int res;
 
-       p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || p_ptr->connected)
+       if (p_ptr->connected)
                return -EINVAL;
 
        msg = &p_ptr->phdr;
@@ -974,39 +867,39 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
 
        if (likely(destport || destnode)) {
                if (likely(in_own_node(destnode)))
-                       res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+                       res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
                else if (tipc_own_addr)
-                       res = tipc_link_send_sections_fast(p_ptr, msg_sect,
-                                                          len, destnode);
+                       res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+                                                       destnode);
                else
-                       res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
-                                                       len, TIPC_ERR_NO_NODE);
+                       res = tipc_port_iovec_reject(p_ptr, msg, msg_sect,
+                                                    len, TIPC_ERR_NO_NODE);
                if (likely(res != -ELINKCONG)) {
                        if (res > 0)
                                p_ptr->sent++;
                        return res;
                }
-               if (port_unreliable(p_ptr)) {
+               if (tipc_port_unreliable(p_ptr))
                        return len;
-               }
+
                return -ELINKCONG;
        }
-       return tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
-                                        TIPC_ERR_NO_NAME);
+       return tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
+                                     TIPC_ERR_NO_NAME);
 }
 
 /**
  * tipc_send2port - send message sections to port identity
  */
-int tipc_send2port(u32 ref, struct tipc_portid const *dest,
-                  struct iovec const *msg_sect, unsigned int len)
+int tipc_send2port(struct tipc_port *p_ptr,
+                  struct tipc_portid const *dest,
+                  struct iovec const *msg_sect,
+                  unsigned int len)
 {
-       struct tipc_port *p_ptr;
        struct tipc_msg *msg;
        int res;
 
-       p_ptr = tipc_port_deref(ref);
-       if (!p_ptr || p_ptr->connected)
+       if (p_ptr->connected)
                return -EINVAL;
 
        msg = &p_ptr->phdr;
@@ -1017,20 +910,20 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
        msg_set_hdr_sz(msg, BASIC_H_SIZE);
 
        if (in_own_node(dest->node))
-               res =  tipc_port_recv_sections(p_ptr, msg_sect, len);
+               res =  tipc_port_iovec_rcv(p_ptr, msg_sect, len);
        else if (tipc_own_addr)
-               res = tipc_link_send_sections_fast(p_ptr, msg_sect, len,
-                                                  dest->node);
+               res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+                                               dest->node);
        else
-               res = tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+               res = tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
                                                TIPC_ERR_NO_NODE);
        if (likely(res != -ELINKCONG)) {
                if (res > 0)
                        p_ptr->sent++;
                return res;
        }
-       if (port_unreliable(p_ptr)) {
+       if (tipc_port_unreliable(p_ptr))
                return len;
-       }
+
        return -ELINKCONG;
 }
index 34f12bd4074e49ff8b4c7809bfe0bc706941ec7c..a00397393bd1d9179bd779339500e34bd710aa5b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/port.h: Include file for TIPC port code
  *
- * Copyright (c) 1994-2007, Ericsson AB
+ * Copyright (c) 1994-2007, 2014, Ericsson AB
  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
  * All rights reserved.
  *
@@ -48,7 +48,6 @@
 
 /**
  * struct tipc_port - TIPC port structure
- * @sk: pointer to socket handle
  * @lock: pointer to spinlock for controlling access to port
  * @connected: non-zero if port is currently connected to a peer port
  * @conn_type: TIPC type used when connection was established
@@ -60,8 +59,6 @@
  * @ref: unique reference to port in TIPC object registry
  * @phdr: preformatted message header used when sending messages
  * @port_list: adjacent ports in TIPC's global list of ports
- * @dispatcher: ptr to routine which handles received messages
- * @wakeup: ptr to routine to call when port is no longer congested
  * @wait_list: adjacent ports in list of ports waiting on link congestion
  * @waiting_pkts:
  * @sent: # of non-empty messages sent by port
@@ -74,7 +71,6 @@
  * @subscription: "node down" subscription used to terminate failed connections
  */
 struct tipc_port {
-       struct sock *sk;
        spinlock_t *lock;
        int connected;
        u32 conn_type;
@@ -86,8 +82,6 @@ struct tipc_port {
        u32 ref;
        struct tipc_msg phdr;
        struct list_head port_list;
-       u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
-       void (*wakeup)(struct tipc_port *);
        struct list_head wait_list;
        u32 waiting_pkts;
        u32 sent;
@@ -106,68 +100,71 @@ struct tipc_port_list;
 /*
  * TIPC port manipulation routines
  */
-struct tipc_port *tipc_createport(struct sock *sk,
-                                 u32 (*dispatcher)(struct tipc_port *,
-                                 struct sk_buff *),
-                                 void (*wakeup)(struct tipc_port *),
-                                 const u32 importance);
+u32 tipc_port_init(struct tipc_port *p_ptr,
+                  const unsigned int importance);
 
 int tipc_reject_msg(struct sk_buff *buf, u32 err);
 
 void tipc_acknowledge(u32 port_ref, u32 ack);
 
-int tipc_deleteport(struct tipc_port *p_ptr);
-
-int tipc_portimportance(u32 portref, unsigned int *importance);
-int tipc_set_portimportance(u32 portref, unsigned int importance);
-
-int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
-int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
-
-int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
-int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
+void tipc_port_destroy(struct tipc_port *p_ptr);
 
 int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
                 struct tipc_name_seq const *name_seq);
+
 int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
                  struct tipc_name_seq const *name_seq);
 
-int tipc_connect(u32 portref, struct tipc_portid const *port);
+int tipc_port_connect(u32 portref, struct tipc_portid const *port);
 
-int tipc_disconnect(u32 portref);
+int tipc_port_disconnect(u32 portref);
 
-int tipc_shutdown(u32 ref);
+int tipc_port_shutdown(u32 ref);
 
+void tipc_port_wakeup(struct tipc_port *port);
 
 /*
  * The following routines require that the port be locked on entry
  */
-int __tipc_disconnect(struct tipc_port *tp_ptr);
-int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
+int __tipc_port_disconnect(struct tipc_port *tp_ptr);
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
                   struct tipc_portid const *peer);
 int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
 
 /*
  * TIPC messaging routines
  */
-int tipc_port_recv_msg(struct sk_buff *buf);
-int tipc_send(u32 portref, struct iovec const *msg_sect, unsigned int len);
-
-int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
-                  struct iovec const *msg_sect, unsigned int len);
+int tipc_port_rcv(struct sk_buff *buf);
+
+int tipc_send(struct tipc_port *port,
+             struct iovec const *msg_sect,
+             unsigned int len);
+
+int tipc_send2name(struct tipc_port *port,
+                  struct tipc_name const *name,
+                  u32 domain,
+                  struct iovec const *msg_sect,
+                  unsigned int len);
+
+int tipc_send2port(struct tipc_port *port,
+                  struct tipc_portid const *dest,
+                  struct iovec const *msg_sect,
+                  unsigned int len);
+
+int tipc_port_mcast_xmit(struct tipc_port *port,
+                        struct tipc_name_seq const *seq,
+                        struct iovec const *msg,
+                        unsigned int len);
+
+int tipc_port_iovec_reject(struct tipc_port *p_ptr,
+                          struct tipc_msg *hdr,
+                          struct iovec const *msg_sect,
+                          unsigned int len,
+                          int err);
 
-int tipc_send2port(u32 portref, struct tipc_portid const *dest,
-                  struct iovec const *msg_sect, unsigned int len);
-
-int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
-                  struct iovec const *msg, unsigned int len);
-
-int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                             struct iovec const *msg_sect, unsigned int len,
-                             int err);
 struct sk_buff *tipc_port_get_ports(void);
-void tipc_port_recv_proto_msg(struct sk_buff *buf);
-void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
+void tipc_port_proto_rcv(struct sk_buff *buf);
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp);
 void tipc_port_reinit(void);
 
 /**
@@ -188,14 +185,53 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
        spin_unlock_bh(p_ptr->lock);
 }
 
-static inline struct tipc_port *tipc_port_deref(u32 ref)
+static inline int tipc_port_congested(struct tipc_port *p_ptr)
 {
-       return (struct tipc_port *)tipc_ref_deref(ref);
+       return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
 }
 
-static inline int tipc_port_congested(struct tipc_port *p_ptr)
+
+static inline u32 tipc_port_peernode(struct tipc_port *p_ptr)
 {
-       return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
+       return msg_destnode(&p_ptr->phdr);
+}
+
+static inline u32 tipc_port_peerport(struct tipc_port *p_ptr)
+{
+       return msg_destport(&p_ptr->phdr);
+}
+
+static inline  bool tipc_port_unreliable(struct tipc_port *port)
+{
+       return msg_src_droppable(&port->phdr) != 0;
+}
+
+static inline void tipc_port_set_unreliable(struct tipc_port *port,
+                                           bool unreliable)
+{
+       msg_set_src_droppable(&port->phdr, unreliable ? 1 : 0);
+}
+
+static inline bool tipc_port_unreturnable(struct tipc_port *port)
+{
+       return msg_dest_droppable(&port->phdr) != 0;
+}
+
+static inline void tipc_port_set_unreturnable(struct tipc_port *port,
+                                            bool unreturnable)
+{
+       msg_set_dest_droppable(&port->phdr, unreturnable ? 1 : 0);
+}
+
+
+static inline int tipc_port_importance(struct tipc_port *port)
+{
+       return msg_importance(&port->phdr);
+}
+
+static inline void tipc_port_set_importance(struct tipc_port *port, int imp)
+{
+       msg_set_importance(&port->phdr, (u32)imp);
 }
 
 #endif
index de3d593e2fee08c384ca2970d7a0fc695ff4d5fc..3d4ecd754eeef578e708e8219d8dc636093ccdfe 100644 (file)
@@ -89,7 +89,7 @@ struct ref_table {
 
 static struct ref_table tipc_ref_table;
 
-static DEFINE_RWLOCK(ref_table_lock);
+static DEFINE_SPINLOCK(ref_table_lock);
 
 /**
  * tipc_ref_table_init - create reference table for objects
@@ -159,7 +159,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
        }
 
        /* take a free entry, if available; otherwise initialize a new entry */
-       write_lock_bh(&ref_table_lock);
+       spin_lock_bh(&ref_table_lock);
        if (tipc_ref_table.first_free) {
                index = tipc_ref_table.first_free;
                entry = &(tipc_ref_table.entries[index]);
@@ -175,7 +175,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
        } else {
                ref = 0;
        }
-       write_unlock_bh(&ref_table_lock);
+       spin_unlock_bh(&ref_table_lock);
 
        /*
         * Grab the lock so no one else can modify this entry
@@ -216,7 +216,7 @@ void tipc_ref_discard(u32 ref)
        index = ref & index_mask;
        entry = &(tipc_ref_table.entries[index]);
 
-       write_lock_bh(&ref_table_lock);
+       spin_lock_bh(&ref_table_lock);
 
        if (!entry->object) {
                pr_err("Attempt to discard ref. to non-existent obj\n");
@@ -242,7 +242,7 @@ void tipc_ref_discard(u32 ref)
        tipc_ref_table.last_free = index;
 
 exit:
-       write_unlock_bh(&ref_table_lock);
+       spin_unlock_bh(&ref_table_lock);
 }
 
 /**
@@ -264,20 +264,3 @@ void *tipc_ref_lock(u32 ref)
        }
        return NULL;
 }
-
-
-/**
- * tipc_ref_deref - return pointer referenced object (without locking it)
- */
-void *tipc_ref_deref(u32 ref)
-{
-       if (likely(tipc_ref_table.entries)) {
-               struct reference *entry;
-
-               entry = &tipc_ref_table.entries[ref &
-                                               tipc_ref_table.index_mask];
-               if (likely(entry->ref == ref))
-                       return entry->object;
-       }
-       return NULL;
-}
index 5bc8e7ab84de8192ca71c97a0ae415474f8e266c..d01aa1df63b86e391ed61a2fb2b31ff245fdc87e 100644 (file)
@@ -44,6 +44,5 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock);
 void tipc_ref_discard(u32 ref);
 
 void *tipc_ref_lock(u32 ref);
-void *tipc_ref_deref(u32 ref);
 
 #endif
index 0ed0eaa62f29e7148052907892ff90ebd20d2fed..29b7f26a12cf6b29c0bac4c65354385fad036391 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/socket.c: TIPC socket API
  *
- * Copyright (c) 2001-2007, 2012 Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2014, Ericsson AB
  * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
  * All rights reserved.
  *
 #include "port.h"
 
 #include <linux/export.h>
-#include <net/sock.h>
 
 #define SS_LISTENING   -1      /* socket is listening */
 #define SS_READY       -2      /* socket is connectionless */
 
 #define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
 
-struct tipc_sock {
-       struct sock sk;
-       struct tipc_port *p;
-       struct tipc_portid peer_name;
-       unsigned int conn_timeout;
-};
-
-#define tipc_sk(sk) ((struct tipc_sock *)(sk))
-#define tipc_sk_port(sk) (tipc_sk(sk)->p)
-
 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
-static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
-static void wakeupdispatch(struct tipc_port *tport);
 static void tipc_data_ready(struct sock *sk, int len);
 static void tipc_write_space(struct sock *sk);
-static int release(struct socket *sock);
-static int accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_release(struct socket *sock);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -115,6 +102,8 @@ static struct proto tipc_proto_kern;
  *   - port reference
  */
 
+#include "socket.h"
+
 /**
  * advance_rx_queue - discard first buffer in socket receive queue
  *
@@ -150,13 +139,15 @@ static void reject_rx_queue(struct sock *sk)
  *
  * Returns 0 on success, errno otherwise
  */
-static int tipc_sk_create(struct net *net, struct socket *sock, int protocol,
-                         int kern)
+static int tipc_sk_create(struct net *net, struct socket *sock,
+                         int protocol, int kern)
 {
        const struct proto_ops *ops;
        socket_state state;
        struct sock *sk;
-       struct tipc_port *tp_ptr;
+       struct tipc_sock *tsk;
+       struct tipc_port *port;
+       u32 ref;
 
        /* Validate arguments */
        if (unlikely(protocol != 0))
@@ -189,10 +180,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock, int protocol,
        if (sk == NULL)
                return -ENOMEM;
 
-       /* Allocate TIPC port for socket to use */
-       tp_ptr = tipc_createport(sk, &dispatch, &wakeupdispatch,
-                                TIPC_LOW_IMPORTANCE);
-       if (unlikely(!tp_ptr)) {
+       tsk = tipc_sk(sk);
+       port = &tsk->port;
+
+       ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE);
+       if (!ref) {
+               pr_warn("Socket registration failed, ref. table exhausted\n");
                sk_free(sk);
                return -ENOMEM;
        }
@@ -206,17 +199,14 @@ static int tipc_sk_create(struct net *net, struct socket *sock, int protocol,
        sk->sk_rcvbuf = sysctl_tipc_rmem[1];
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
-       tipc_sk(sk)->p = tp_ptr;
        tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
-
-       spin_unlock_bh(tp_ptr->lock);
+       tipc_port_unlock(port);
 
        if (sock->state == SS_READY) {
-               tipc_set_portunreturnable(tp_ptr->ref, 1);
+               tipc_port_set_unreturnable(port, true);
                if (sock->type == SOCK_DGRAM)
-                       tipc_set_portunreliable(tp_ptr->ref, 1);
+                       tipc_port_set_unreliable(port, true);
        }
-
        return 0;
 }
 
@@ -254,7 +244,7 @@ int tipc_sock_create_local(int type, struct socket **res)
  */
 void tipc_sock_release_local(struct socket *sock)
 {
-       release(sock);
+       tipc_release(sock);
        sock->ops = NULL;
        sock_release(sock);
 }
@@ -280,7 +270,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
        if (ret < 0)
                return ret;
 
-       ret = accept(sock, *newsock, flags);
+       ret = tipc_accept(sock, *newsock, flags);
        if (ret < 0) {
                sock_release(*newsock);
                return ret;
@@ -290,7 +280,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
 }
 
 /**
- * release - destroy a TIPC socket
+ * tipc_release - destroy a TIPC socket
  * @sock: socket to destroy
  *
  * This routine cleans up any messages that are still queued on the socket.
@@ -305,10 +295,11 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
  *
  * Returns 0 on success, errno otherwise
  */
-static int release(struct socket *sock)
+static int tipc_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport;
+       struct tipc_sock *tsk;
+       struct tipc_port *port;
        struct sk_buff *buf;
        int res;
 
@@ -319,7 +310,8 @@ static int release(struct socket *sock)
        if (sk == NULL)
                return 0;
 
-       tport = tipc_sk_port(sk);
+       tsk = tipc_sk(sk);
+       port = &tsk->port;
        lock_sock(sk);
 
        /*
@@ -336,17 +328,16 @@ static int release(struct socket *sock)
                        if ((sock->state == SS_CONNECTING) ||
                            (sock->state == SS_CONNECTED)) {
                                sock->state = SS_DISCONNECTING;
-                               tipc_disconnect(tport->ref);
+                               tipc_port_disconnect(port->ref);
                        }
                        tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
                }
        }
 
-       /*
-        * Delete TIPC port; this ensures no more messages are queued
-        * (also disconnects an active connection & sends a 'FIN-' to peer)
+       /* Destroy TIPC port; also disconnects an active connection and
+        * sends a 'FIN-' to peer.
         */
-       res = tipc_deleteport(tport);
+       tipc_port_destroy(port);
 
        /* Discard any remaining (connection-based) messages in receive queue */
        __skb_queue_purge(&sk->sk_receive_queue);
@@ -362,7 +353,7 @@ static int release(struct socket *sock)
 }
 
 /**
- * bind - associate or disassocate TIPC name(s) with a socket
+ * tipc_bind - associate or disassocate TIPC name(s) with a socket
  * @sock: socket structure
  * @uaddr: socket address describing name(s) and desired operation
  * @uaddr_len: size of socket address data structure
@@ -376,16 +367,17 @@ static int release(struct socket *sock)
  * NOTE: This routine doesn't need to take the socket lock since it doesn't
  *       access any non-constant socket information.
  */
-static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
+static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
+                    int uaddr_len)
 {
        struct sock *sk = sock->sk;
        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
-       struct tipc_port *tport = tipc_sk_port(sock->sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
        int res = -EINVAL;
 
        lock_sock(sk);
        if (unlikely(!uaddr_len)) {
-               res = tipc_withdraw(tport, 0, NULL);
+               res = tipc_withdraw(&tsk->port, 0, NULL);
                goto exit;
        }
 
@@ -413,15 +405,15 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
        }
 
        res = (addr->scope > 0) ?
-               tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
-               tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
+               tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) :
+               tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq);
 exit:
        release_sock(sk);
        return res;
 }
 
 /**
- * get_name - get port ID of socket or peer socket
+ * tipc_getname - get port ID of socket or peer socket
  * @sock: socket structure
  * @uaddr: area for returned socket address
  * @uaddr_len: area for returned length of socket address
@@ -433,21 +425,21 @@ exit:
  *       accesses socket information that is unchanging (or which changes in
  *       a completely predictable manner).
  */
-static int get_name(struct socket *sock, struct sockaddr *uaddr,
-                   int *uaddr_len, int peer)
+static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
+                       int *uaddr_len, int peer)
 {
        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
-       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       struct tipc_sock *tsk = tipc_sk(sock->sk);
 
        memset(addr, 0, sizeof(*addr));
        if (peer) {
                if ((sock->state != SS_CONNECTED) &&
                        ((peer != 2) || (sock->state != SS_DISCONNECTING)))
                        return -ENOTCONN;
-               addr->addr.id.ref = tsock->peer_name.ref;
-               addr->addr.id.node = tsock->peer_name.node;
+               addr->addr.id.ref = tipc_port_peerport(&tsk->port);
+               addr->addr.id.node = tipc_port_peernode(&tsk->port);
        } else {
-               addr->addr.id.ref = tsock->p->ref;
+               addr->addr.id.ref = tsk->port.ref;
                addr->addr.id.node = tipc_own_addr;
        }
 
@@ -461,7 +453,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
 }
 
 /**
- * poll - read and possibly block on pollmask
+ * tipc_poll - read and possibly block on pollmask
  * @file: file structure associated with the socket
  * @sock: socket for which to calculate the poll bits
  * @wait: ???
@@ -500,22 +492,23 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
  * imply that the operation will succeed, merely that it should be performed
  * and will not block.
  */
-static unsigned int poll(struct file *file, struct socket *sock,
-                        poll_table *wait)
+static unsigned int tipc_poll(struct file *file, struct socket *sock,
+                             poll_table *wait)
 {
        struct sock *sk = sock->sk;
+       struct tipc_sock *tsk = tipc_sk(sk);
        u32 mask = 0;
 
        sock_poll_wait(file, sk_sleep(sk), wait);
 
        switch ((int)sock->state) {
        case SS_UNCONNECTED:
-               if (!tipc_sk_port(sk)->congested)
+               if (!tsk->port.congested)
                        mask |= POLLOUT;
                break;
        case SS_READY:
        case SS_CONNECTED:
-               if (!tipc_sk_port(sk)->congested)
+               if (!tsk->port.congested)
                        mask |= POLLOUT;
                /* fall thru' */
        case SS_CONNECTING:
@@ -565,7 +558,7 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
        DEFINE_WAIT(wait);
        int done;
 
@@ -581,14 +574,15 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
                        return sock_intr_errno(*timeo_p);
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               done = sk_wait_event(sk, timeo_p, !tport->congested);
+               done = sk_wait_event(sk, timeo_p, !tsk->port.congested);
                finish_wait(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
 }
 
+
 /**
- * send_msg - send message in connectionless manner
+ * tipc_sendmsg - send message in connectionless manner
  * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
  * @m: message to send
@@ -601,11 +595,12 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
  *
  * Returns the number of bytes sent on success, or errno otherwise
  */
-static int send_msg(struct kiocb *iocb, struct socket *sock,
-                   struct msghdr *m, size_t total_len)
+static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
+                       struct msghdr *m, size_t total_len)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        int needs_conn;
        long timeo;
@@ -632,13 +627,13 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                        res = -EISCONN;
                        goto exit;
                }
-               if (tport->published) {
+               if (tsk->port.published) {
                        res = -EOPNOTSUPP;
                        goto exit;
                }
                if (dest->addrtype == TIPC_ADDR_NAME) {
-                       tport->conn_type = dest->addr.name.name.type;
-                       tport->conn_instance = dest->addr.name.name.instance;
+                       tsk->port.conn_type = dest->addr.name.name.type;
+                       tsk->port.conn_instance = dest->addr.name.name.instance;
                }
 
                /* Abort any pending connection attempts (very unlikely) */
@@ -651,13 +646,13 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                        res = dest_name_check(dest, m);
                        if (res)
                                break;
-                       res = tipc_send2name(tport->ref,
+                       res = tipc_send2name(port,
                                             &dest->addr.name.name,
                                             dest->addr.name.domain,
                                             m->msg_iov,
                                             total_len);
                } else if (dest->addrtype == TIPC_ADDR_ID) {
-                       res = tipc_send2port(tport->ref,
+                       res = tipc_send2port(port,
                                             &dest->addr.id,
                                             m->msg_iov,
                                             total_len);
@@ -669,10 +664,10 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                        res = dest_name_check(dest, m);
                        if (res)
                                break;
-                       res = tipc_multicast(tport->ref,
-                                            &dest->addr.nameseq,
-                                            m->msg_iov,
-                                            total_len);
+                       res = tipc_port_mcast_xmit(port,
+                                                  &dest->addr.nameseq,
+                                                  m->msg_iov,
+                                                  total_len);
                }
                if (likely(res != -ELINKCONG)) {
                        if (needs_conn && (res >= 0))
@@ -693,7 +688,8 @@ exit:
 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
        DEFINE_WAIT(wait);
        int done;
 
@@ -712,14 +708,14 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                done = sk_wait_event(sk, timeo_p,
-                                    (!tport->congested || !tport->connected));
+                                    (!port->congested || !port->connected));
                finish_wait(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
 }
 
 /**
- * send_packet - send a connection-oriented message
+ * tipc_send_packet - send a connection-oriented message
  * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
  * @m: message to send
@@ -729,18 +725,18 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
  *
  * Returns the number of bytes sent on success, or errno otherwise
  */
-static int send_packet(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *m, size_t total_len)
+static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
+                           struct msghdr *m, size_t total_len)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        int res = -EINVAL;
        long timeo;
 
        /* Handle implied connection establishment */
        if (unlikely(dest))
-               return send_msg(iocb, sock, m, total_len);
+               return tipc_sendmsg(iocb, sock, m, total_len);
 
        if (total_len > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
@@ -758,7 +754,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
 
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
        do {
-               res = tipc_send(tport->ref, m->msg_iov, total_len);
+               res = tipc_send(&tsk->port, m->msg_iov, total_len);
                if (likely(res != -ELINKCONG))
                        break;
                res = tipc_wait_for_sndpkt(sock, &timeo);
@@ -772,7 +768,7 @@ exit:
 }
 
 /**
- * send_stream - send stream-oriented data
+ * tipc_send_stream - send stream-oriented data
  * @iocb: (unused)
  * @sock: socket structure
  * @m: data to send
@@ -783,11 +779,11 @@ exit:
  * Returns the number of bytes sent on success (or partial success),
  * or errno if no data sent
  */
-static int send_stream(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *m, size_t total_len)
+static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
+                           struct msghdr *m, size_t total_len)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
        struct msghdr my_msg;
        struct iovec my_iov;
        struct iovec *curr_iov;
@@ -804,7 +800,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
        /* Handle special cases where there is no connection */
        if (unlikely(sock->state != SS_CONNECTED)) {
                if (sock->state == SS_UNCONNECTED)
-                       res = send_packet(NULL, sock, m, total_len);
+                       res = tipc_send_packet(NULL, sock, m, total_len);
                else
                        res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
                goto exit;
@@ -835,21 +831,22 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
        my_msg.msg_name = NULL;
        bytes_sent = 0;
 
-       hdr_size = msg_hdr_sz(&tport->phdr);
+       hdr_size = msg_hdr_sz(&tsk->port.phdr);
 
        while (curr_iovlen--) {
                curr_start = curr_iov->iov_base;
                curr_left = curr_iov->iov_len;
 
                while (curr_left) {
-                       bytes_to_send = tport->max_pkt - hdr_size;
+                       bytes_to_send = tsk->port.max_pkt - hdr_size;
                        if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
                                bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
                        if (curr_left < bytes_to_send)
                                bytes_to_send = curr_left;
                        my_iov.iov_base = curr_start;
                        my_iov.iov_len = bytes_to_send;
-                       res = send_packet(NULL, sock, &my_msg, bytes_to_send);
+                       res = tipc_send_packet(NULL, sock, &my_msg,
+                                              bytes_to_send);
                        if (res < 0) {
                                if (bytes_sent)
                                        res = bytes_sent;
@@ -870,27 +867,25 @@ exit:
 
 /**
  * auto_connect - complete connection setup to a remote port
- * @sock: socket structure
+ * @tsk: tipc socket structure
  * @msg: peer's response message
  *
  * Returns 0 on success, errno otherwise
  */
-static int auto_connect(struct socket *sock, struct tipc_msg *msg)
+static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg)
 {
-       struct tipc_sock *tsock = tipc_sk(sock->sk);
-       struct tipc_port *p_ptr;
+       struct tipc_port *port = &tsk->port;
+       struct socket *sock = tsk->sk.sk_socket;
+       struct tipc_portid peer;
 
-       tsock->peer_name.ref = msg_origport(msg);
-       tsock->peer_name.node = msg_orignode(msg);
-       p_ptr = tipc_port_deref(tsock->p->ref);
-       if (!p_ptr)
-               return -EINVAL;
+       peer.ref = msg_origport(msg);
+       peer.node = msg_orignode(msg);
 
-       __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
+       __tipc_port_connect(port->ref, port, &peer);
 
        if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
                return -EINVAL;
-       msg_set_importance(&p_ptr->phdr, (u32)msg_importance(msg));
+       msg_set_importance(&port->phdr, (u32)msg_importance(msg));
        sock->state = SS_CONNECTED;
        return 0;
 }
@@ -1021,7 +1016,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
 }
 
 /**
- * recv_msg - receive packet-oriented message
+ * tipc_recvmsg - receive packet-oriented message
  * @iocb: (unused)
  * @m: descriptor for message info
  * @buf_len: total size of user buffer area
@@ -1032,11 +1027,12 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
  *
  * Returns size of returned message data, errno otherwise
  */
-static int recv_msg(struct kiocb *iocb, struct socket *sock,
-                   struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
+                       struct msghdr *m, size_t buf_len, int flags)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
        struct sk_buff *buf;
        struct tipc_msg *msg;
        long timeo;
@@ -1079,7 +1075,7 @@ restart:
        set_orig_addr(m, msg);
 
        /* Capture ancillary data (optional) */
-       res = anc_data_recv(m, msg, tport);
+       res = anc_data_recv(m, msg, port);
        if (res)
                goto exit;
 
@@ -1105,8 +1101,8 @@ restart:
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
                if ((sock->state != SS_READY) &&
-                   (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
-                       tipc_acknowledge(tport->ref, tport->conn_unacked);
+                   (++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                       tipc_acknowledge(port->ref, port->conn_unacked);
                advance_rx_queue(sk);
        }
 exit:
@@ -1115,7 +1111,7 @@ exit:
 }
 
 /**
- * recv_stream - receive stream-oriented data
+ * tipc_recv_stream - receive stream-oriented data
  * @iocb: (unused)
  * @m: descriptor for message info
  * @buf_len: total size of user buffer area
@@ -1126,11 +1122,12 @@ exit:
  *
  * Returns size of returned message data, errno otherwise
  */
-static int recv_stream(struct kiocb *iocb, struct socket *sock,
-                      struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
+                           struct msghdr *m, size_t buf_len, int flags)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
        struct sk_buff *buf;
        struct tipc_msg *msg;
        long timeo;
@@ -1175,7 +1172,7 @@ restart:
        /* Optionally capture sender's address & ancillary data of first msg */
        if (sz_copied == 0) {
                set_orig_addr(m, msg);
-               res = anc_data_recv(m, msg, tport);
+               res = anc_data_recv(m, msg, port);
                if (res)
                        goto exit;
        }
@@ -1213,8 +1210,8 @@ restart:
 
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
-               if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
-                       tipc_acknowledge(tport->ref, tport->conn_unacked);
+               if (unlikely(++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                       tipc_acknowledge(port->ref, port->conn_unacked);
                advance_rx_queue(sk);
        }
 
@@ -1266,17 +1263,19 @@ static void tipc_data_ready(struct sock *sk, int len)
 
 /**
  * filter_connect - Handle all incoming messages for a connection-based socket
- * @tsock: TIPC socket
+ * @tsk: TIPC socket
  * @msg: message
  *
  * Returns TIPC error status code and socket error status code
  * once it encounters some errors
  */
-static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
+static u32 filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
 {
-       struct socket *sock = tsock->sk.sk_socket;
+       struct sock *sk = &tsk->sk;
+       struct tipc_port *port = &tsk->port;
+       struct socket *sock = sk->sk_socket;
        struct tipc_msg *msg = buf_msg(*buf);
-       struct sock *sk = &tsock->sk;
+
        u32 retval = TIPC_ERR_NO_PORT;
        int res;
 
@@ -1286,10 +1285,10 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
        switch ((int)sock->state) {
        case SS_CONNECTED:
                /* Accept only connection-based messages sent by peer */
-               if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
+               if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) {
                        if (unlikely(msg_errcode(msg))) {
                                sock->state = SS_DISCONNECTING;
-                               __tipc_disconnect(tsock->p);
+                               __tipc_port_disconnect(port);
                        }
                        retval = TIPC_OK;
                }
@@ -1306,7 +1305,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
                if (unlikely(!msg_connected(msg)))
                        break;
 
-               res = auto_connect(sock, msg);
+               res = auto_connect(tsk, msg);
                if (res) {
                        sock->state = SS_DISCONNECTING;
                        sk->sk_err = -res;
@@ -1385,6 +1384,7 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
 static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
 {
        struct socket *sock = sk->sk_socket;
+       struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *msg = buf_msg(buf);
        unsigned int limit = rcvbuf_limit(sk, buf);
        u32 res = TIPC_OK;
@@ -1397,7 +1397,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
                if (msg_connected(msg))
                        return TIPC_ERR_NO_PORT;
        } else {
-               res = filter_connect(tipc_sk(sk), &buf);
+               res = filter_connect(tsk, &buf);
                if (res != TIPC_OK || buf == NULL)
                        return res;
        }
@@ -1435,17 +1435,16 @@ static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
 }
 
 /**
- * dispatch - handle incoming message
- * @tport: TIPC port that received message
+ * tipc_sk_rcv - handle incoming message
+ * @sk:  socket receiving message
  * @buf: message
  *
  * Called with port lock already taken.
  *
  * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
  */
-static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
+u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf)
 {
-       struct sock *sk = tport->sk;
        u32 res;
 
        /*
@@ -1468,19 +1467,6 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
        return res;
 }
 
-/**
- * wakeupdispatch - wake up port after congestion
- * @tport: port to wakeup
- *
- * Called with port lock already taken.
- */
-static void wakeupdispatch(struct tipc_port *tport)
-{
-       struct sock *sk = tport->sk;
-
-       sk->sk_write_space(sk);
-}
-
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
 {
        struct sock *sk = sock->sk;
@@ -1504,7 +1490,7 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
 }
 
 /**
- * connect - establish a connection to another TIPC port
+ * tipc_connect - establish a connection to another TIPC port
  * @sock: socket structure
  * @dest: socket address for destination port
  * @destlen: size of socket address data structure
@@ -1512,8 +1498,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
  *
  * Returns 0 on success, errno otherwise
  */
-static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
-                  int flags)
+static int tipc_connect(struct socket *sock, struct sockaddr *dest,
+                       int destlen, int flags)
 {
        struct sock *sk = sock->sk;
        struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
@@ -1554,7 +1540,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
                if (!timeout)
                        m.msg_flags = MSG_DONTWAIT;
 
-               res = send_msg(NULL, sock, &m, 0);
+               res = tipc_sendmsg(NULL, sock, &m, 0);
                if ((res < 0) && (res != -EWOULDBLOCK))
                        goto exit;
 
@@ -1585,13 +1571,13 @@ exit:
 }
 
 /**
- * listen - allow socket to listen for incoming connections
+ * tipc_listen - allow socket to listen for incoming connections
  * @sock: socket structure
  * @len: (unused)
  *
  * Returns 0 on success, errno otherwise
  */
-static int listen(struct socket *sock, int len)
+static int tipc_listen(struct socket *sock, int len)
 {
        struct sock *sk = sock->sk;
        int res;
@@ -1646,20 +1632,20 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
 }
 
 /**
- * accept - wait for connection request
+ * tipc_accept - wait for connection request
  * @sock: listening socket
  * @newsock: new socket that is to be connected
  * @flags: file-related flags associated with socket
  *
  * Returns 0 on success, errno otherwise
  */
-static int accept(struct socket *sock, struct socket *new_sock, int flags)
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
 {
        struct sock *new_sk, *sk = sock->sk;
        struct sk_buff *buf;
-       struct tipc_sock *new_tsock;
-       struct tipc_port *new_tport;
+       struct tipc_port *new_port;
        struct tipc_msg *msg;
+       struct tipc_portid peer;
        u32 new_ref;
        long timeo;
        int res;
@@ -1670,7 +1656,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
                res = -EINVAL;
                goto exit;
        }
-
        timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
        res = tipc_wait_for_accept(sock, timeo);
        if (res)
@@ -1683,9 +1668,8 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
                goto exit;
 
        new_sk = new_sock->sk;
-       new_tsock = tipc_sk(new_sk);
-       new_tport = new_tsock->p;
-       new_ref = new_tport->ref;
+       new_port = &tipc_sk(new_sk)->port;
+       new_ref = new_port->ref;
        msg = buf_msg(buf);
 
        /* we lock on new_sk; but lockdep sees the lock on sk */
@@ -1698,15 +1682,15 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
        reject_rx_queue(new_sk);
 
        /* Connect new socket to it's peer */
-       new_tsock->peer_name.ref = msg_origport(msg);
-       new_tsock->peer_name.node = msg_orignode(msg);
-       tipc_connect(new_ref, &new_tsock->peer_name);
+       peer.ref = msg_origport(msg);
+       peer.node = msg_orignode(msg);
+       tipc_port_connect(new_ref, &peer);
        new_sock->state = SS_CONNECTED;
 
-       tipc_set_portimportance(new_ref, msg_importance(msg));
+       tipc_port_set_importance(new_port, msg_importance(msg));
        if (msg_named(msg)) {
-               new_tport->conn_type = msg_nametype(msg);
-               new_tport->conn_instance = msg_nameinst(msg);
+               new_port->conn_type = msg_nametype(msg);
+               new_port->conn_instance = msg_nameinst(msg);
        }
 
        /*
@@ -1717,21 +1701,20 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
                struct msghdr m = {NULL,};
 
                advance_rx_queue(sk);
-               send_packet(NULL, new_sock, &m, 0);
+               tipc_send_packet(NULL, new_sock, &m, 0);
        } else {
                __skb_dequeue(&sk->sk_receive_queue);
                __skb_queue_head(&new_sk->sk_receive_queue, buf);
                skb_set_owner_r(buf, new_sk);
        }
        release_sock(new_sk);
-
 exit:
        release_sock(sk);
        return res;
 }
 
 /**
- * shutdown - shutdown socket connection
+ * tipc_shutdown - shutdown socket connection
  * @sock: socket structure
  * @how: direction to close (must be SHUT_RDWR)
  *
@@ -1739,10 +1722,11 @@ exit:
  *
  * Returns 0 on success, errno otherwise
  */
-static int shutdown(struct socket *sock, int how)
+static int tipc_shutdown(struct socket *sock, int how)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
        struct sk_buff *buf;
        int res;
 
@@ -1763,10 +1747,10 @@ restart:
                                kfree_skb(buf);
                                goto restart;
                        }
-                       tipc_disconnect(tport->ref);
+                       tipc_port_disconnect(port->ref);
                        tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
                } else {
-                       tipc_shutdown(tport->ref);
+                       tipc_port_shutdown(port->ref);
                }
 
                sock->state = SS_DISCONNECTING;
@@ -1792,7 +1776,7 @@ restart:
 }
 
 /**
- * setsockopt - set socket option
+ * tipc_setsockopt - set socket option
  * @sock: socket structure
  * @lvl: option level
  * @opt: option identifier
@@ -1804,11 +1788,12 @@ restart:
  *
  * Returns 0 on success, errno otherwise
  */
-static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
-                     unsigned int ol)
+static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
+                          char __user *ov, unsigned int ol)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
        u32 value;
        int res;
 
@@ -1826,16 +1811,16 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
 
        switch (opt) {
        case TIPC_IMPORTANCE:
-               res = tipc_set_portimportance(tport->ref, value);
+               tipc_port_set_importance(port, value);
                break;
        case TIPC_SRC_DROPPABLE:
                if (sock->type != SOCK_STREAM)
-                       res = tipc_set_portunreliable(tport->ref, value);
+                       tipc_port_set_unreliable(port, value);
                else
                        res = -ENOPROTOOPT;
                break;
        case TIPC_DEST_DROPPABLE:
-               res = tipc_set_portunreturnable(tport->ref, value);
+               tipc_port_set_unreturnable(port, value);
                break;
        case TIPC_CONN_TIMEOUT:
                tipc_sk(sk)->conn_timeout = value;
@@ -1851,7 +1836,7 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
 }
 
 /**
- * getsockopt - get socket option
+ * tipc_getsockopt - get socket option
  * @sock: socket structure
  * @lvl: option level
  * @opt: option identifier
@@ -1863,11 +1848,12 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
  *
  * Returns 0 on success, errno otherwise
  */
-static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
-                     int __user *ol)
+static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
+                          char __user *ov, int __user *ol)
 {
        struct sock *sk = sock->sk;
-       struct tipc_port *tport = tipc_sk_port(sk);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
        int len;
        u32 value;
        int res;
@@ -1884,13 +1870,13 @@ static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
 
        switch (opt) {
        case TIPC_IMPORTANCE:
-               res = tipc_portimportance(tport->ref, &value);
+               value = tipc_port_importance(port);
                break;
        case TIPC_SRC_DROPPABLE:
-               res = tipc_portunreliable(tport->ref, &value);
+               value = tipc_port_unreliable(port);
                break;
        case TIPC_DEST_DROPPABLE:
-               res = tipc_portunreturnable(tport->ref, &value);
+               value = tipc_port_unreturnable(port);
                break;
        case TIPC_CONN_TIMEOUT:
                value = tipc_sk(sk)->conn_timeout;
@@ -1925,20 +1911,20 @@ static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
 static const struct proto_ops msg_ops = {
        .owner          = THIS_MODULE,
        .family         = AF_TIPC,
-       .release        = release,
-       .bind           = bind,
-       .connect        = connect,
+       .release        = tipc_release,
+       .bind           = tipc_bind,
+       .connect        = tipc_connect,
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
-       .getname        = get_name,
-       .poll           = poll,
+       .getname        = tipc_getname,
+       .poll           = tipc_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
-       .shutdown       = shutdown,
-       .setsockopt     = setsockopt,
-       .getsockopt     = getsockopt,
-       .sendmsg        = send_msg,
-       .recvmsg        = recv_msg,
+       .shutdown       = tipc_shutdown,
+       .setsockopt     = tipc_setsockopt,
+       .getsockopt     = tipc_getsockopt,
+       .sendmsg        = tipc_sendmsg,
+       .recvmsg        = tipc_recvmsg,
        .mmap           = sock_no_mmap,
        .sendpage       = sock_no_sendpage
 };
@@ -1946,20 +1932,20 @@ static const struct proto_ops msg_ops = {
 static const struct proto_ops packet_ops = {
        .owner          = THIS_MODULE,
        .family         = AF_TIPC,
-       .release        = release,
-       .bind           = bind,
-       .connect        = connect,
+       .release        = tipc_release,
+       .bind           = tipc_bind,
+       .connect        = tipc_connect,
        .socketpair     = sock_no_socketpair,
-       .accept         = accept,
-       .getname        = get_name,
-       .poll           = poll,
+       .accept         = tipc_accept,
+       .getname        = tipc_getname,
+       .poll           = tipc_poll,
        .ioctl          = sock_no_ioctl,
-       .listen         = listen,
-       .shutdown       = shutdown,
-       .setsockopt     = setsockopt,
-       .getsockopt     = getsockopt,
-       .sendmsg        = send_packet,
-       .recvmsg        = recv_msg,
+       .listen         = tipc_listen,
+       .shutdown       = tipc_shutdown,
+       .setsockopt     = tipc_setsockopt,
+       .getsockopt     = tipc_getsockopt,
+       .sendmsg        = tipc_send_packet,
+       .recvmsg        = tipc_recvmsg,
        .mmap           = sock_no_mmap,
        .sendpage       = sock_no_sendpage
 };
@@ -1967,20 +1953,20 @@ static const struct proto_ops packet_ops = {
 static const struct proto_ops stream_ops = {
        .owner          = THIS_MODULE,
        .family         = AF_TIPC,
-       .release        = release,
-       .bind           = bind,
-       .connect        = connect,
+       .release        = tipc_release,
+       .bind           = tipc_bind,
+       .connect        = tipc_connect,
        .socketpair     = sock_no_socketpair,
-       .accept         = accept,
-       .getname        = get_name,
-       .poll           = poll,
+       .accept         = tipc_accept,
+       .getname        = tipc_getname,
+       .poll           = tipc_poll,
        .ioctl          = sock_no_ioctl,
-       .listen         = listen,
-       .shutdown       = shutdown,
-       .setsockopt     = setsockopt,
-       .getsockopt     = getsockopt,
-       .sendmsg        = send_stream,
-       .recvmsg        = recv_stream,
+       .listen         = tipc_listen,
+       .shutdown       = tipc_shutdown,
+       .setsockopt     = tipc_setsockopt,
+       .getsockopt     = tipc_getsockopt,
+       .sendmsg        = tipc_send_stream,
+       .recvmsg        = tipc_recv_stream,
        .mmap           = sock_no_mmap,
        .sendpage       = sock_no_sendpage
 };
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
new file mode 100644 (file)
index 0000000..74e5c7f
--- /dev/null
@@ -0,0 +1,72 @@
+/* net/tipc/socket.h: Include file for TIPC socket code
+ *
+ * Copyright (c) 2014, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SOCK_H
+#define _TIPC_SOCK_H
+
+#include "port.h"
+#include <net/sock.h>
+
+/**
+ * struct tipc_sock - TIPC socket structure
+ * @sk: socket - interacts with 'port' and with user via the socket API
+ * @port: port - interacts with 'sk' and with the rest of the TIPC stack
+ * @peer_name: the peer of the connection, if any
+ * @conn_timeout: the time we can wait for an unresponded setup request
+ */
+
+struct tipc_sock {
+       struct sock sk;
+       struct tipc_port port;
+       unsigned int conn_timeout;
+};
+
+static inline struct tipc_sock *tipc_sk(const struct sock *sk)
+{
+       return container_of(sk, struct tipc_sock, sk);
+}
+
+static inline struct tipc_sock *tipc_port_to_sock(const struct tipc_port *port)
+{
+       return container_of(port, struct tipc_sock, port);
+}
+
+static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
+{
+       tsk->sk.sk_write_space(&tsk->sk);
+}
+
+u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf);
+
+#endif
index 11ee4ed04f73ea2ae0dd1ae6293837785d8fd5f8..3e02ade508d8804d4158a03ad5de474d64bf7594 100644 (file)
@@ -7,7 +7,7 @@
 
 
 static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
-                             struct net_device *dev)
+                             struct net_device *dev, bool notify)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
@@ -27,22 +27,24 @@ static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
        err = rdev_stop_ap(rdev, dev);
        if (!err) {
                wdev->beacon_interval = 0;
-               wdev->channel = NULL;
+               memset(&wdev->chandef, 0, sizeof(wdev->chandef));
                wdev->ssid_len = 0;
                rdev_set_qos_map(rdev, dev, NULL);
+               if (notify)
+                       nl80211_send_ap_stopped(wdev);
        }
 
        return err;
 }
 
 int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
-                    struct net_device *dev)
+                    struct net_device *dev, bool notify)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        int err;
 
        wdev_lock(wdev);
-       err = __cfg80211_stop_ap(rdev, dev);
+       err = __cfg80211_stop_ap(rdev, dev, notify);
        wdev_unlock(wdev);
 
        return err;
index 78559b5bbd1fe1d98c533f18dd1cc4fce4087baa..9c9501a35fb5c6a43a142132306998405e086774 100644 (file)
@@ -490,6 +490,62 @@ static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
        return r;
 }
 
+static unsigned int cfg80211_get_chans_dfs_cac_time(struct wiphy *wiphy,
+                                                   u32 center_freq,
+                                                   u32 bandwidth)
+{
+       struct ieee80211_channel *c;
+       u32 start_freq, end_freq, freq;
+       unsigned int dfs_cac_ms = 0;
+
+       start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
+       end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
+
+       for (freq = start_freq; freq <= end_freq; freq += 20) {
+               c = ieee80211_get_channel(wiphy, freq);
+               if (!c)
+                       return 0;
+
+               if (c->flags & IEEE80211_CHAN_DISABLED)
+                       return 0;
+
+               if (!(c->flags & IEEE80211_CHAN_RADAR))
+                       continue;
+
+               if (c->dfs_cac_ms > dfs_cac_ms)
+                       dfs_cac_ms = c->dfs_cac_ms;
+       }
+
+       return dfs_cac_ms;
+}
+
+unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
+                             const struct cfg80211_chan_def *chandef)
+{
+       int width;
+       unsigned int t1 = 0, t2 = 0;
+
+       if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+               return 0;
+
+       width = cfg80211_chandef_get_width(chandef);
+       if (width < 0)
+               return 0;
+
+       t1 = cfg80211_get_chans_dfs_cac_time(wiphy,
+                                            chandef->center_freq1,
+                                            width);
+
+       if (!chandef->center_freq2)
+               return t1;
+
+       t2 = cfg80211_get_chans_dfs_cac_time(wiphy,
+                                            chandef->center_freq2,
+                                            width);
+
+       return max(t1, t2);
+}
 
 static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
                                        u32 center_freq, u32 bandwidth,
@@ -642,7 +698,8 @@ int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
 void
 cfg80211_get_chan_state(struct wireless_dev *wdev,
                        struct ieee80211_channel **chan,
-                       enum cfg80211_chan_mode *chanmode)
+                       enum cfg80211_chan_mode *chanmode,
+                       u8 *radar_detect)
 {
        *chan = NULL;
        *chanmode = CHAN_MODE_UNDEFINED;
@@ -660,6 +717,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
                                     !wdev->ibss_dfs_possible)
                                  ? CHAN_MODE_SHARED
                                  : CHAN_MODE_EXCLUSIVE;
+
+                       /* consider worst-case - IBSS can try to return to the
+                        * original user-specified channel as creator */
+                       if (wdev->ibss_dfs_possible)
+                               *radar_detect |= BIT(wdev->chandef.width);
                        return;
                }
                break;
@@ -674,33 +736,36 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
                if (wdev->cac_started) {
-                       *chan = wdev->channel;
+                       *chan = wdev->chandef.chan;
                        *chanmode = CHAN_MODE_SHARED;
+                       *radar_detect |= BIT(wdev->chandef.width);
                } else if (wdev->beacon_interval) {
-                       *chan = wdev->channel;
+                       *chan = wdev->chandef.chan;
                        *chanmode = CHAN_MODE_SHARED;
+
+                       if (cfg80211_chandef_dfs_required(wdev->wiphy,
+                                                         &wdev->chandef))
+                               *radar_detect |= BIT(wdev->chandef.width);
                }
                return;
        case NL80211_IFTYPE_MESH_POINT:
                if (wdev->mesh_id_len) {
-                       *chan = wdev->channel;
+                       *chan = wdev->chandef.chan;
                        *chanmode = CHAN_MODE_SHARED;
+
+                       if (cfg80211_chandef_dfs_required(wdev->wiphy,
+                                                         &wdev->chandef))
+                               *radar_detect |= BIT(wdev->chandef.width);
                }
                return;
        case NL80211_IFTYPE_MONITOR:
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_WDS:
-               /* these interface types don't really have a channel */
-               return;
        case NL80211_IFTYPE_P2P_DEVICE:
-               if (wdev->wiphy->features &
-                               NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL)
-                       *chanmode = CHAN_MODE_EXCLUSIVE;
+               /* these interface types don't really have a channel */
                return;
        case NL80211_IFTYPE_UNSPECIFIED:
        case NUM_NL80211_IFTYPES:
                WARN_ON(1);
        }
-
-       return;
 }
index a3bf18d116095f14f4238bc792d22e5f51f2dd26..086cddd03ba6edd79d1609ecf713146bc756c1ff 100644 (file)
@@ -737,7 +737,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
 }
 EXPORT_SYMBOL(cfg80211_unregister_wdev);
 
-static struct device_type wiphy_type = {
+static const struct device_type wiphy_type = {
        .name   = "wlan",
 };
 
@@ -783,7 +783,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
                break;
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_P2P_GO:
-               cfg80211_stop_ap(rdev, dev);
+               cfg80211_stop_ap(rdev, dev, true);
                break;
        default:
                break;
index f1d193b557b69a5c021b76130f04bf58655288dd..5b1fdcadd46985548f4a04f4f64ddaccbc935661 100644 (file)
@@ -166,7 +166,6 @@ static inline void wdev_unlock(struct wireless_dev *wdev)
        mutex_unlock(&wdev->mtx);
 }
 
-#define ASSERT_RDEV_LOCK(rdev) ASSERT_RTNL()
 #define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
 
 static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
@@ -211,6 +210,7 @@ struct cfg80211_event {
                } dc;
                struct {
                        u8 bssid[ETH_ALEN];
+                       struct ieee80211_channel *channel;
                } ij;
        };
 };
@@ -245,10 +245,6 @@ void cfg80211_bss_age(struct cfg80211_registered_device *dev,
                       unsigned long age_secs);
 
 /* IBSS */
-int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
-                        struct net_device *dev,
-                        struct cfg80211_ibss_params *params,
-                        struct cfg80211_cached_keys *connkeys);
 int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
                       struct net_device *dev,
                       struct cfg80211_ibss_params *params,
@@ -258,7 +254,8 @@ int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
                          struct net_device *dev, bool nowext);
 int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
                        struct net_device *dev, bool nowext);
-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+                           struct ieee80211_channel *channel);
 int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
                            struct wireless_dev *wdev);
 
@@ -281,7 +278,7 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
 
 /* AP */
 int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
-                    struct net_device *dev);
+                    struct net_device *dev, bool notify);
 
 /* MLME */
 int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
@@ -400,6 +397,9 @@ void cfg80211_set_dfs_state(struct wiphy *wiphy,
 
 void cfg80211_dfs_channels_update_work(struct work_struct *work);
 
+unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
+                             const struct cfg80211_chan_def *chandef);
 
 static inline int
 cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
@@ -443,7 +443,8 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
 void
 cfg80211_get_chan_state(struct wireless_dev *wdev,
                        struct ieee80211_channel **chan,
-                       enum cfg80211_chan_mode *chanmode);
+                       enum cfg80211_chan_mode *chanmode,
+                       u8 *radar_detect);
 
 int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
                                 struct cfg80211_chan_def *chandef);
index 9a8217d2a90882f872451833cabe30159a64953f..b35da8dc85deff4640ecb0975cc829326b68034c 100644 (file)
@@ -66,6 +66,7 @@ function parse_reg_rule()
        units = $8
        sub(/\)/, "", units)
        sub(/,/, "", units)
+       dfs_cac = $9
        if (units == "mW") {
                if (power == 100) {
                        power = 20
@@ -78,7 +79,12 @@ function parse_reg_rule()
                } else {
                        print "Unknown power value in database!"
                }
+       } else {
+               dfs_cac = $8
        }
+       sub(/,/, "", dfs_cac)
+       sub(/\(/, "", dfs_cac)
+       sub(/\)/, "", dfs_cac)
        flagstr = ""
        for (i=8; i<=NF; i++)
                flagstr = flagstr $i
@@ -105,11 +111,13 @@ function parse_reg_rule()
                        flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
                } else if (flagarray[arg] == "NO-IR") {
                        flags = flags "\n\t\t\tNL80211_RRF_NO_IR | "
+               } else if (flagarray[arg] == "AUTO-BW") {
+                       flags = flags "\n\t\t\tNL80211_RRF_AUTO_BW | "
                }
 
        }
        flags = flags "0"
-       printf "\t\tREG_RULE(%d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, flags
+       printf "\t\tREG_RULE_EXT(%d, %d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, dfs_cac, flags
        rules++
 }
 
index f911c5f9f903d8ccdd791aaf26d950b19ad6dd9f..a6b5bdad039c7450f276d1e56994661e2e41952c 100644 (file)
@@ -14,7 +14,8 @@
 #include "rdev-ops.h"
 
 
-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+                           struct ieee80211_channel *channel)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_bss *bss;
@@ -28,8 +29,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
        if (!wdev->ssid_len)
                return;
 
-       bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
-                              wdev->ssid, wdev->ssid_len,
+       bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
                               WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
 
        if (WARN_ON(!bss))
@@ -54,21 +54,26 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
 #endif
 }
 
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+                         struct ieee80211_channel *channel, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
        struct cfg80211_event *ev;
        unsigned long flags;
 
-       trace_cfg80211_ibss_joined(dev, bssid);
+       trace_cfg80211_ibss_joined(dev, bssid, channel);
+
+       if (WARN_ON(!channel))
+               return;
 
        ev = kzalloc(sizeof(*ev), gfp);
        if (!ev)
                return;
 
        ev->type = EVENT_IBSS_JOINED;
-       memcpy(ev->cr.bssid, bssid, ETH_ALEN);
+       memcpy(ev->ij.bssid, bssid, ETH_ALEN);
+       ev->ij.channel = channel;
 
        spin_lock_irqsave(&wdev->event_lock, flags);
        list_add_tail(&ev->list, &wdev->event_list);
@@ -77,10 +82,10 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
 }
 EXPORT_SYMBOL(cfg80211_ibss_joined);
 
-int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
-                        struct net_device *dev,
-                        struct cfg80211_ibss_params *params,
-                        struct cfg80211_cached_keys *connkeys)
+static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
+                               struct net_device *dev,
+                               struct cfg80211_ibss_params *params,
+                               struct cfg80211_cached_keys *connkeys)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct ieee80211_channel *check_chan;
@@ -117,17 +122,17 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
 
        wdev->ibss_fixed = params->channel_fixed;
        wdev->ibss_dfs_possible = params->userspace_handles_dfs;
+       wdev->chandef = params->chandef;
 #ifdef CONFIG_CFG80211_WEXT
        wdev->wext.ibss.chandef = params->chandef;
 #endif
        check_chan = params->chandef.chan;
        if (params->userspace_handles_dfs) {
-               /* use channel NULL to check for radar even if the current
-                * channel is not a radar channel - it might decide to change
-                * to DFS channel later.
+               /* Check for radar even if the current channel is not
+                * a radar channel - it might decide to change to DFS
+                * channel later.
                 */
                radar_detect_width = BIT(params->chandef.width);
-               check_chan = NULL;
        }
 
        err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
@@ -200,6 +205,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
 
        wdev->current_bss = NULL;
        wdev->ssid_len = 0;
+       memset(&wdev->chandef, 0, sizeof(wdev->chandef));
 #ifdef CONFIG_CFG80211_WEXT
        if (!nowext)
                wdev->wext.ibss.ssid_len = 0;
index 885862447b63c3434c0784924de2b984fcfedc25..5af5cc6b2c4c2406475a3063a69eef80cc14691f 100644 (file)
@@ -195,7 +195,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
        if (!err) {
                memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
                wdev->mesh_id_len = setup->mesh_id_len;
-               wdev->channel = setup->chandef.chan;
+               wdev->chandef = setup->chandef;
        }
 
        return err;
@@ -236,6 +236,12 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
                if (!netif_running(wdev->netdev))
                        return -ENETDOWN;
 
+               /* cfg80211_can_use_chan() calls
+                * cfg80211_can_use_iftype_chan() with no radar
+                * detection, so if we're trying to use a radar
+                * channel here, something is wrong.
+                */
+               WARN_ON_ONCE(chandef->chan->flags & IEEE80211_CHAN_RADAR);
                err = cfg80211_can_use_chan(rdev, wdev, chandef->chan,
                                            CHAN_MODE_SHARED);
                if (err)
@@ -244,7 +250,7 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
                err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
                                                     chandef->chan);
                if (!err)
-                       wdev->channel = chandef->chan;
+                       wdev->chandef = *chandef;
 
                return err;
        }
@@ -276,7 +282,7 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
        err = rdev_leave_mesh(rdev, dev);
        if (!err) {
                wdev->mesh_id_len = 0;
-               wdev->channel = NULL;
+               memset(&wdev->chandef, 0, sizeof(wdev->chandef));
                rdev_set_qos_map(rdev, dev, NULL);
        }
 
index 52cca05044a898b977927f2c9e540b00d6811634..c52ff59a3e96d7cabb892bff220b86c580069a43 100644 (file)
@@ -772,13 +772,13 @@ void cfg80211_cac_event(struct net_device *netdev,
        if (WARN_ON(!wdev->cac_started))
                return;
 
-       if (WARN_ON(!wdev->channel))
+       if (WARN_ON(!wdev->chandef.chan))
                return;
 
        switch (event) {
        case NL80211_RADAR_CAC_FINISHED:
                timeout = wdev->cac_start_time +
-                         msecs_to_jiffies(IEEE80211_DFS_MIN_CAC_TIME_MS);
+                         msecs_to_jiffies(wdev->cac_time_ms);
                WARN_ON(!time_after_eq(jiffies, timeout));
                cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE);
                break;
index 4fe2e6e2bc7635daef9aee5752ed97eafb29012a..052c1bf8ffaceb92d3f117231a46fca78ed30216 100644 (file)
@@ -382,6 +382,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
        [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
                                   .len = IEEE80211_QOS_MAP_LEN_MAX },
+       [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
+       [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
+       [NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
 };
 
 /* policy for the key attributes */
@@ -590,6 +593,10 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
                        if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME,
                                        time))
                                goto nla_put_failure;
+                       if (nla_put_u32(msg,
+                                       NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
+                                       chan->dfs_cac_ms))
+                               goto nla_put_failure;
                }
        }
 
@@ -855,6 +862,19 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
        return 0;
 }
 
+static struct ieee80211_channel *nl80211_get_valid_chan(struct wiphy *wiphy,
+                                                       struct nlattr *tb)
+{
+       struct ieee80211_channel *chan;
+
+       if (tb == NULL)
+               return NULL;
+       chan = ieee80211_get_channel(wiphy, nla_get_u32(tb));
+       if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+               return NULL;
+       return chan;
+}
+
 static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
 {
        struct nlattr *nl_modes = nla_nest_start(msg, attr);
@@ -1586,6 +1606,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
                    (nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) ||
                     nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ)))
                        goto nla_put_failure;
+
+               if (dev->wiphy.max_ap_assoc_sta &&
+                   nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
+                               dev->wiphy.max_ap_assoc_sta))
+                       goto nla_put_failure;
+
                state->split_start++;
                break;
        case 11:
@@ -2035,10 +2061,12 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
                nla_for_each_nested(nl_txq_params,
                                    info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
                                    rem_txq_params) {
-                       nla_parse(tb, NL80211_TXQ_ATTR_MAX,
-                                 nla_data(nl_txq_params),
-                                 nla_len(nl_txq_params),
-                                 txq_params_policy);
+                       result = nla_parse(tb, NL80211_TXQ_ATTR_MAX,
+                                          nla_data(nl_txq_params),
+                                          nla_len(nl_txq_params),
+                                          txq_params_policy);
+                       if (result)
+                               return result;
                        result = parse_txq_params(tb, &txq_params);
                        if (result)
                                return result;
@@ -3259,7 +3287,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
        if (!err) {
                wdev->preset_chandef = params.chandef;
                wdev->beacon_interval = params.beacon_interval;
-               wdev->channel = params.chandef.chan;
+               wdev->chandef = params.chandef;
                wdev->ssid_len = params.ssid_len;
                memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
        }
@@ -3304,7 +3332,7 @@ static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
 
-       return cfg80211_stop_ap(rdev, dev);
+       return cfg80211_stop_ap(rdev, dev, false);
 }
 
 static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
@@ -3902,8 +3930,8 @@ static struct net_device *get_vlan(struct genl_info *info,
        return ERR_PTR(ret);
 }
 
-static struct nla_policy
-nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
+static const struct nla_policy
+nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
        [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
        [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
 };
@@ -4590,6 +4618,7 @@ static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] =
        [NL80211_ATTR_FREQ_RANGE_MAX_BW]        = { .type = NLA_U32 },
        [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]  = { .type = NLA_U32 },
        [NL80211_ATTR_POWER_RULE_MAX_EIRP]      = { .type = NLA_U32 },
+       [NL80211_ATTR_DFS_CAC_TIME]             = { .type = NLA_U32 },
 };
 
 static int parse_reg_rule(struct nlattr *tb[],
@@ -4625,6 +4654,10 @@ static int parse_reg_rule(struct nlattr *tb[],
                power_rule->max_antenna_gain =
                        nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
 
+       if (tb[NL80211_ATTR_DFS_CAC_TIME])
+               reg_rule->dfs_cac_ms =
+                       nla_get_u32(tb[NL80211_ATTR_DFS_CAC_TIME]);
+
        return 0;
 }
 
@@ -5086,6 +5119,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
                const struct ieee80211_reg_rule *reg_rule;
                const struct ieee80211_freq_range *freq_range;
                const struct ieee80211_power_rule *power_rule;
+               unsigned int max_bandwidth_khz;
 
                reg_rule = &regdom->reg_rules[i];
                freq_range = &reg_rule->freq_range;
@@ -5095,6 +5129,11 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
                if (!nl_reg_rule)
                        goto nla_put_failure_rcu;
 
+               max_bandwidth_khz = freq_range->max_bandwidth_khz;
+               if (!max_bandwidth_khz)
+                       max_bandwidth_khz = reg_get_max_bandwidth(regdom,
+                                                                 reg_rule);
+
                if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
                                reg_rule->flags) ||
                    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
@@ -5102,11 +5141,13 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
                    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
                                freq_range->end_freq_khz) ||
                    nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
-                               freq_range->max_bandwidth_khz) ||
+                               max_bandwidth_khz) ||
                    nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
                                power_rule->max_antenna_gain) ||
                    nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
-                               power_rule->max_eirp))
+                               power_rule->max_eirp) ||
+                   nla_put_u32(msg, NL80211_ATTR_DFS_CAC_TIME,
+                               reg_rule->dfs_cac_ms))
                        goto nla_put_failure_rcu;
 
                nla_nest_end(msg, nl_reg_rule);
@@ -5178,9 +5219,11 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
 
        nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
                            rem_reg_rules) {
-               nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
-                         nla_data(nl_reg_rule), nla_len(nl_reg_rule),
-                         reg_rule_policy);
+               r = nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
+                             nla_data(nl_reg_rule), nla_len(nl_reg_rule),
+                             reg_rule_policy);
+               if (r)
+                       goto bad_reg;
                r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
                if (r)
                        goto bad_reg;
@@ -5443,6 +5486,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        enum ieee80211_band band;
        size_t ie_len;
        struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
+       s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
 
        if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
            !rdev->ops->sched_scan_start)
@@ -5477,11 +5521,40 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
        if (n_ssids > wiphy->max_sched_scan_ssids)
                return -EINVAL;
 
-       if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
+       /*
+        * First, count the number of 'real' matchsets. Due to an issue with
+        * the old implementation, matchsets containing only the RSSI attribute
+        * (NL80211_SCHED_SCAN_MATCH_ATTR_RSSI) are considered as the 'default'
+        * RSSI for all matchsets, rather than their own matchset for reporting
+        * all APs with a strong RSSI. This is needed to be compatible with
+        * older userspace that treated a matchset with only the RSSI as the
+        * global RSSI for all other matchsets - if there are other matchsets.
+        */
+       if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
                nla_for_each_nested(attr,
                                    info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
-                                   tmp)
-                       n_match_sets++;
+                                   tmp) {
+                       struct nlattr *rssi;
+
+                       err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+                                       nla_data(attr), nla_len(attr),
+                                       nl80211_match_policy);
+                       if (err)
+                               return err;
+                       /* add other standalone attributes here */
+                       if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) {
+                               n_match_sets++;
+                               continue;
+                       }
+                       rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+                       if (rssi)
+                               default_match_rssi = nla_get_s32(rssi);
+               }
+       }
+
+       /* However, if there's no other matchset, add the RSSI one */
+       if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF)
+               n_match_sets = 1;
 
        if (n_match_sets > wiphy->max_match_sets)
                return -EINVAL;
@@ -5602,11 +5675,22 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
                                    tmp) {
                        struct nlattr *ssid, *rssi;
 
-                       nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
-                                 nla_data(attr), nla_len(attr),
-                                 nl80211_match_policy);
+                       err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+                                       nla_data(attr), nla_len(attr),
+                                       nl80211_match_policy);
+                       if (err)
+                               goto out_free;
                        ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
                        if (ssid) {
+                               if (WARN_ON(i >= n_match_sets)) {
+                                       /* this indicates a programming error,
+                                        * the loop above should have verified
+                                        * things properly
+                                        */
+                                       err = -EINVAL;
+                                       goto out_free;
+                               }
+
                                if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
                                        err = -EINVAL;
                                        goto out_free;
@@ -5615,19 +5699,32 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
                                       nla_data(ssid), nla_len(ssid));
                                request->match_sets[i].ssid.ssid_len =
                                        nla_len(ssid);
+                               /* special attribute - old implemenation w/a */
+                               request->match_sets[i].rssi_thold =
+                                       default_match_rssi;
+                               rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+                               if (rssi)
+                                       request->match_sets[i].rssi_thold =
+                                               nla_get_s32(rssi);
                        }
-                       rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
-                       if (rssi)
-                               request->rssi_thold = nla_get_u32(rssi);
-                       else
-                               request->rssi_thold =
-                                                  NL80211_SCAN_RSSI_THOLD_OFF;
                        i++;
                }
+
+               /* there was no other matchset, so the RSSI one is alone */
+               if (i == 0)
+                       request->match_sets[0].rssi_thold = default_match_rssi;
+
+               request->min_rssi_thold = INT_MAX;
+               for (i = 0; i < n_match_sets; i++)
+                       request->min_rssi_thold =
+                               min(request->match_sets[i].rssi_thold,
+                                   request->min_rssi_thold);
+       } else {
+               request->min_rssi_thold = NL80211_SCAN_RSSI_THOLD_OFF;
        }
 
-       if (info->attrs[NL80211_ATTR_IE]) {
-               request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+       if (ie_len) {
+               request->ie_len = ie_len;
                memcpy((void *)request->ie,
                       nla_data(info->attrs[NL80211_ATTR_IE]),
                       request->ie_len);
@@ -5682,6 +5779,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_chan_def chandef;
        enum nl80211_dfs_regions dfs_region;
+       unsigned int cac_time_ms;
        int err;
 
        dfs_region = reg_get_dfs_region(wdev->wiphy);
@@ -5717,11 +5815,17 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        if (err)
                return err;
 
-       err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef);
+       cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
+       if (WARN_ON(!cac_time_ms))
+               cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+
+       err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef,
+                                              cac_time_ms);
        if (!err) {
-               wdev->channel = chandef.chan;
+               wdev->chandef = chandef;
                wdev->cac_started = true;
                wdev->cac_start_time = jiffies;
+               wdev->cac_time_ms = cac_time_ms;
        }
        return err;
 }
@@ -5751,10 +5855,15 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
 
                /* useless if AP is not running */
                if (!wdev->beacon_interval)
-                       return -EINVAL;
+                       return -ENOTCONN;
                break;
        case NL80211_IFTYPE_ADHOC:
+               if (!wdev->ssid_len)
+                       return -ENOTCONN;
+               break;
        case NL80211_IFTYPE_MESH_POINT:
+               if (!wdev->mesh_id_len)
+                       return -ENOTCONN;
                break;
        default:
                return -EOPNOTSUPP;
@@ -5822,17 +5931,22 @@ skip_beacons:
        if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
                return -EINVAL;
 
-       if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP ||
-           dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO ||
-           dev->ieee80211_ptr->iftype == NL80211_IFTYPE_ADHOC) {
+       switch (dev->ieee80211_ptr->iftype) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_ADHOC:
+       case NL80211_IFTYPE_MESH_POINT:
                err = cfg80211_chandef_dfs_required(wdev->wiphy,
                                                    &params.chandef);
-               if (err < 0) {
+               if (err < 0)
                        return err;
-               } else if (err) {
+               if (err) {
                        radar_detect_width = BIT(params.chandef.width);
                        params.radar_required = true;
                }
+               break;
+       default:
+               break;
        }
 
        err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
@@ -6192,9 +6306,9 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
 
        bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
-       chan = ieee80211_get_channel(&rdev->wiphy,
-               nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
-       if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED))
+       chan = nl80211_get_valid_chan(&rdev->wiphy,
+                                     info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+       if (!chan)
                return -EINVAL;
 
        ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -6347,9 +6461,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
 
        bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
 
-       chan = ieee80211_get_channel(&rdev->wiphy,
-               nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
-       if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED))
+       chan = nl80211_get_valid_chan(&rdev->wiphy,
+                                     info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+       if (!chan)
                return -EINVAL;
 
        ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -6985,6 +7099,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
 
        if (info->attrs[NL80211_ATTR_MAC])
                connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+       else if (info->attrs[NL80211_ATTR_MAC_HINT])
+               connect.bssid_hint =
+                       nla_data(info->attrs[NL80211_ATTR_MAC_HINT]);
        connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
        connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
 
@@ -7003,11 +7120,14 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
-               connect.channel =
-                       ieee80211_get_channel(wiphy,
-                           nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
-               if (!connect.channel ||
-                   connect.channel->flags & IEEE80211_CHAN_DISABLED)
+               connect.channel = nl80211_get_valid_chan(
+                       wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+               if (!connect.channel)
+                       return -EINVAL;
+       } else if (info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]) {
+               connect.channel_hint = nl80211_get_valid_chan(
+                       wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]);
+               if (!connect.channel_hint)
                        return -EINVAL;
        }
 
@@ -7174,6 +7294,7 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
        struct cfg80211_registered_device *rdev = info->user_ptr[0];
        struct net_device *dev = info->user_ptr[1];
        u8 action_code, dialog_token;
+       u32 peer_capability = 0;
        u16 status_code;
        u8 *peer;
 
@@ -7192,9 +7313,12 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
        action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]);
        status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
        dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
+       if (info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY])
+               peer_capability =
+                       nla_get_u32(info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]);
 
        return rdev_tdls_mgmt(rdev, dev, peer, action_code,
-                             dialog_token, status_code,
+                             dialog_token, status_code, peer_capability,
                              nla_data(info->attrs[NL80211_ATTR_IE]),
                              nla_len(info->attrs[NL80211_ATTR_IE]));
 }
@@ -7421,6 +7545,7 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
        [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
                                .len = NL80211_MAX_SUPP_HT_RATES },
        [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+       [NL80211_TXRATE_GI] = { .type = NLA_U8 },
 };
 
 static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
@@ -7467,16 +7592,19 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
         * directly to the enum ieee80211_band values used in cfg80211.
         */
        BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
-       nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
-       {
+       nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
                enum ieee80211_band band = nla_type(tx_rates);
+               int err;
+
                if (band < 0 || band >= IEEE80211_NUM_BANDS)
                        return -EINVAL;
                sband = rdev->wiphy.bands[band];
                if (sband == NULL)
                        return -EINVAL;
-               nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
-                         nla_len(tx_rates), nl80211_txattr_policy);
+               err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+                               nla_len(tx_rates), nl80211_txattr_policy);
+               if (err)
+                       return err;
                if (tb[NL80211_TXRATE_LEGACY]) {
                        mask.control[band].legacy = rateset_to_mask(
                                sband,
@@ -7501,6 +7629,12 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
                                        mask.control[band].vht_mcs))
                                return -EINVAL;
                }
+               if (tb[NL80211_TXRATE_GI]) {
+                       mask.control[band].gi =
+                               nla_get_u8(tb[NL80211_TXRATE_GI]);
+                       if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
+                               return -EINVAL;
+               }
 
                if (mask.control[band].legacy == 0) {
                        /* don't allow empty legacy rates if HT or VHT
@@ -7777,8 +7911,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
        return err;
 }
 
-static struct nla_policy
-nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
+static const struct nla_policy
+nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = {
        [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
        [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
        [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
@@ -11107,7 +11241,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
                    wdev->iftype != NL80211_IFTYPE_MESH_POINT))
                return;
 
-       wdev->channel = chandef->chan;
+       wdev->chandef = *chandef;
+       wdev->preset_chandef = *chandef;
        nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
 }
 EXPORT_SYMBOL(cfg80211_ch_switch_notify);
@@ -11621,6 +11756,35 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
 }
 EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
 
+void nl80211_send_ap_stopped(struct wireless_dev *wdev)
+{
+       struct wiphy *wiphy = wdev->wiphy;
+       struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return;
+
+       hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STOP_AP);
+       if (!hdr)
+               goto out;
+
+       if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+           nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
+           nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+               goto out;
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0,
+                               NL80211_MCGRP_MLME, GFP_KERNEL);
+       return;
+ out:
+       nlmsg_free(msg);
+}
+
 /* initialisation/exit functions */
 
 int nl80211_init(void)
index 75799746d845f6fea6dcfa6b6f0b84bdc20aa14e..1e6df9630f42f11f815578a4bbc73fc7b037017d 100644 (file)
@@ -74,6 +74,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
                     enum nl80211_radar_event event,
                     struct net_device *netdev, gfp_t gfp);
 
+void nl80211_send_ap_stopped(struct wireless_dev *wdev);
+
 void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
 
 #endif /* __NET_WIRELESS_NL80211_H */
index c8e225947adb2601d31dbb0f4a1aa5a4392d8bc5..74d97d33c938e8250ef300c2c3fb82d2a39b63b6 100644 (file)
@@ -769,13 +769,16 @@ static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev,
 static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev,
                                 struct net_device *dev, u8 *peer,
                                 u8 action_code, u8 dialog_token,
-                                u16 status_code, const u8 *buf, size_t len)
+                                u16 status_code, u32 peer_capability,
+                                const u8 *buf, size_t len)
 {
        int ret;
        trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
-                            dialog_token, status_code, buf, len);
+                            dialog_token, status_code, peer_capability,
+                            buf, len);
        ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
-                                  dialog_token, status_code, buf, len);
+                                  dialog_token, status_code, peer_capability,
+                                  buf, len);
        trace_rdev_return_int(&rdev->wiphy, ret);
        return ret;
 }
index f0541370e68eb9071caa429772a0a55d952c27d4..f59aaac586f8cf10905135324c3913646910a662 100644 (file)
@@ -91,10 +91,6 @@ static struct regulatory_request __rcu *last_request =
 /* To trigger userspace events */
 static struct platform_device *reg_pdev;
 
-static struct device_type reg_device_type = {
-       .uevent = reg_device_uevent,
-};
-
 /*
  * Central wireless core regulatory domains, we only need two,
  * the current one and a world regulatory domain in case we have no
@@ -244,19 +240,21 @@ static char user_alpha2[2];
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-static void reg_kfree_last_request(void)
+static void reg_free_request(struct regulatory_request *lr)
 {
-       struct regulatory_request *lr;
-
-       lr = get_last_request();
-
        if (lr != &core_request_world && lr)
                kfree_rcu(lr, rcu_head);
 }
 
 static void reg_update_last_request(struct regulatory_request *request)
 {
-       reg_kfree_last_request();
+       struct regulatory_request *lr;
+
+       lr = get_last_request();
+       if (lr == request)
+               return;
+
+       reg_free_request(lr);
        rcu_assign_pointer(last_request, request);
 }
 
@@ -487,11 +485,16 @@ static inline void reg_regdb_query(const char *alpha2) {}
 
 /*
  * This lets us keep regulatory code which is updated on a regulatory
- * basis in userspace. Country information is filled in by
- * reg_device_uevent
+ * basis in userspace.
  */
 static int call_crda(const char *alpha2)
 {
+       char country[12];
+       char *env[] = { country, NULL };
+
+       snprintf(country, sizeof(country), "COUNTRY=%c%c",
+                alpha2[0], alpha2[1]);
+
        if (!is_world_regdom((char *) alpha2))
                pr_info("Calling CRDA for country: %c%c\n",
                        alpha2[0], alpha2[1]);
@@ -501,7 +504,7 @@ static int call_crda(const char *alpha2)
        /* query internal regulatory database (if it exists) */
        reg_regdb_query(alpha2);
 
-       return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE);
+       return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
 }
 
 static enum reg_request_treatment
@@ -522,6 +525,71 @@ bool reg_is_valid_request(const char *alpha2)
        return alpha2_equal(lr->alpha2, alpha2);
 }
 
+static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy)
+{
+       struct regulatory_request *lr = get_last_request();
+
+       /*
+        * Follow the driver's regulatory domain, if present, unless a country
+        * IE has been processed or a user wants to help complaince further
+        */
+       if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+           lr->initiator != NL80211_REGDOM_SET_BY_USER &&
+           wiphy->regd)
+               return get_wiphy_regdom(wiphy);
+
+       return get_cfg80211_regdom();
+}
+
+unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
+                                  const struct ieee80211_reg_rule *rule)
+{
+       const struct ieee80211_freq_range *freq_range = &rule->freq_range;
+       const struct ieee80211_freq_range *freq_range_tmp;
+       const struct ieee80211_reg_rule *tmp;
+       u32 start_freq, end_freq, idx, no;
+
+       for (idx = 0; idx < rd->n_reg_rules; idx++)
+               if (rule == &rd->reg_rules[idx])
+                       break;
+
+       if (idx == rd->n_reg_rules)
+               return 0;
+
+       /* get start_freq */
+       no = idx;
+
+       while (no) {
+               tmp = &rd->reg_rules[--no];
+               freq_range_tmp = &tmp->freq_range;
+
+               if (freq_range_tmp->end_freq_khz < freq_range->start_freq_khz)
+                       break;
+
+               freq_range = freq_range_tmp;
+       }
+
+       start_freq = freq_range->start_freq_khz;
+
+       /* get end_freq */
+       freq_range = &rule->freq_range;
+       no = idx;
+
+       while (no < rd->n_reg_rules - 1) {
+               tmp = &rd->reg_rules[++no];
+               freq_range_tmp = &tmp->freq_range;
+
+               if (freq_range_tmp->start_freq_khz > freq_range->end_freq_khz)
+                       break;
+
+               freq_range = freq_range_tmp;
+       }
+
+       end_freq = freq_range->end_freq_khz;
+
+       return end_freq - start_freq;
+}
+
 /* Sanity check on a regulatory rule */
 static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
 {
@@ -630,7 +698,9 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
  * Helper for regdom_intersect(), this does the real
  * mathematical intersection fun
  */
-static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
+static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
+                              const struct ieee80211_regdomain *rd2,
+                              const struct ieee80211_reg_rule *rule1,
                               const struct ieee80211_reg_rule *rule2,
                               struct ieee80211_reg_rule *intersected_rule)
 {
@@ -638,7 +708,7 @@ static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
        struct ieee80211_freq_range *freq_range;
        const struct ieee80211_power_rule *power_rule1, *power_rule2;
        struct ieee80211_power_rule *power_rule;
-       u32 freq_diff;
+       u32 freq_diff, max_bandwidth1, max_bandwidth2;
 
        freq_range1 = &rule1->freq_range;
        freq_range2 = &rule2->freq_range;
@@ -652,8 +722,32 @@ static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
                                         freq_range2->start_freq_khz);
        freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
                                       freq_range2->end_freq_khz);
-       freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz,
-                                           freq_range2->max_bandwidth_khz);
+
+       max_bandwidth1 = freq_range1->max_bandwidth_khz;
+       max_bandwidth2 = freq_range2->max_bandwidth_khz;
+
+       if (rule1->flags & NL80211_RRF_AUTO_BW)
+               max_bandwidth1 = reg_get_max_bandwidth(rd1, rule1);
+       if (rule2->flags & NL80211_RRF_AUTO_BW)
+               max_bandwidth2 = reg_get_max_bandwidth(rd2, rule2);
+
+       freq_range->max_bandwidth_khz = min(max_bandwidth1, max_bandwidth2);
+
+       intersected_rule->flags = rule1->flags | rule2->flags;
+
+       /*
+        * In case NL80211_RRF_AUTO_BW requested for both rules
+        * set AUTO_BW in intersected rule also. Next we will
+        * calculate BW correctly in handle_channel function.
+        * In other case remove AUTO_BW flag while we calculate
+        * maximum bandwidth correctly and auto calculation is
+        * not required.
+        */
+       if ((rule1->flags & NL80211_RRF_AUTO_BW) &&
+           (rule2->flags & NL80211_RRF_AUTO_BW))
+               intersected_rule->flags |= NL80211_RRF_AUTO_BW;
+       else
+               intersected_rule->flags &= ~NL80211_RRF_AUTO_BW;
 
        freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
        if (freq_range->max_bandwidth_khz > freq_diff)
@@ -664,7 +758,8 @@ static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
        power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain,
                power_rule2->max_antenna_gain);
 
-       intersected_rule->flags = rule1->flags | rule2->flags;
+       intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
+                                          rule2->dfs_cac_ms);
 
        if (!is_valid_reg_rule(intersected_rule))
                return -EINVAL;
@@ -713,7 +808,8 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
                rule1 = &rd1->reg_rules[x];
                for (y = 0; y < rd2->n_reg_rules; y++) {
                        rule2 = &rd2->reg_rules[y];
-                       if (!reg_rules_intersect(rule1, rule2, &dummy_rule))
+                       if (!reg_rules_intersect(rd1, rd2, rule1, rule2,
+                                                &dummy_rule))
                                num_rules++;
                }
        }
@@ -738,7 +834,8 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
                         * a memcpy()
                         */
                        intersected_rule = &rd->reg_rules[rule_idx];
-                       r = reg_rules_intersect(rule1, rule2, intersected_rule);
+                       r = reg_rules_intersect(rd1, rd2, rule1, rule2,
+                                               intersected_rule);
                        /*
                         * No need to memset here the intersected rule here as
                         * we're not using the stack anymore
@@ -821,18 +918,8 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
                                               u32 center_freq)
 {
        const struct ieee80211_regdomain *regd;
-       struct regulatory_request *lr = get_last_request();
 
-       /*
-        * Follow the driver's regulatory domain, if present, unless a country
-        * IE has been processed or a user wants to help complaince further
-        */
-       if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
-           lr->initiator != NL80211_REGDOM_SET_BY_USER &&
-           wiphy->regd)
-               regd = get_wiphy_regdom(wiphy);
-       else
-               regd = get_cfg80211_regdom();
+       regd = reg_get_regdomain(wiphy);
 
        return freq_reg_info_regd(wiphy, center_freq, regd);
 }
@@ -857,31 +944,42 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
 EXPORT_SYMBOL(reg_initiator_name);
 
 #ifdef CONFIG_CFG80211_REG_DEBUG
-static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
+static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
+                                   struct ieee80211_channel *chan,
                                    const struct ieee80211_reg_rule *reg_rule)
 {
        const struct ieee80211_power_rule *power_rule;
        const struct ieee80211_freq_range *freq_range;
-       char max_antenna_gain[32];
+       char max_antenna_gain[32], bw[32];
 
        power_rule = &reg_rule->power_rule;
        freq_range = &reg_rule->freq_range;
 
        if (!power_rule->max_antenna_gain)
-               snprintf(max_antenna_gain, 32, "N/A");
+               snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A");
        else
-               snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
+               snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d",
+                        power_rule->max_antenna_gain);
+
+       if (reg_rule->flags & NL80211_RRF_AUTO_BW)
+               snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
+                        freq_range->max_bandwidth_khz,
+                        reg_get_max_bandwidth(regd, reg_rule));
+       else
+               snprintf(bw, sizeof(bw), "%d KHz",
+                        freq_range->max_bandwidth_khz);
 
        REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
                      chan->center_freq);
 
-       REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n",
+       REG_DBG_PRINT("%d KHz - %d KHz @ %s), (%s mBi, %d mBm)\n",
                      freq_range->start_freq_khz, freq_range->end_freq_khz,
-                     freq_range->max_bandwidth_khz, max_antenna_gain,
+                     bw, max_antenna_gain,
                      power_rule->max_eirp);
 }
 #else
-static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
+static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
+                                   struct ieee80211_channel *chan,
                                    const struct ieee80211_reg_rule *reg_rule)
 {
        return;
@@ -903,6 +1001,8 @@ static void handle_channel(struct wiphy *wiphy,
        const struct ieee80211_freq_range *freq_range = NULL;
        struct wiphy *request_wiphy = NULL;
        struct regulatory_request *lr = get_last_request();
+       const struct ieee80211_regdomain *regd;
+       u32 max_bandwidth_khz;
 
        request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
 
@@ -939,16 +1039,22 @@ static void handle_channel(struct wiphy *wiphy,
                return;
        }
 
-       chan_reg_rule_print_dbg(chan, reg_rule);
+       regd = reg_get_regdomain(wiphy);
+       chan_reg_rule_print_dbg(regd, chan, reg_rule);
 
        power_rule = &reg_rule->power_rule;
        freq_range = &reg_rule->freq_range;
 
-       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
+       max_bandwidth_khz = freq_range->max_bandwidth_khz;
+       /* Check if auto calculation requested */
+       if (reg_rule->flags & NL80211_RRF_AUTO_BW)
+               max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+
+       if (max_bandwidth_khz < MHZ_TO_KHZ(40))
                bw_flags = IEEE80211_CHAN_NO_HT40;
-       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+       if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
-       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+       if (max_bandwidth_khz < MHZ_TO_KHZ(160))
                bw_flags |= IEEE80211_CHAN_NO_160MHZ;
 
        if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
@@ -977,6 +1083,14 @@ static void handle_channel(struct wiphy *wiphy,
                min_t(int, chan->orig_mag,
                      MBI_TO_DBI(power_rule->max_antenna_gain));
        chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
+
+       if (chan->flags & IEEE80211_CHAN_RADAR) {
+               if (reg_rule->dfs_cac_ms)
+                       chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
+               else
+                       chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+       }
+
        if (chan->orig_mpwr) {
                /*
                 * Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER
@@ -1334,6 +1448,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
        const struct ieee80211_reg_rule *reg_rule = NULL;
        const struct ieee80211_power_rule *power_rule = NULL;
        const struct ieee80211_freq_range *freq_range = NULL;
+       u32 max_bandwidth_khz;
 
        reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
                                      regd);
@@ -1346,16 +1461,21 @@ static void handle_channel_custom(struct wiphy *wiphy,
                return;
        }
 
-       chan_reg_rule_print_dbg(chan, reg_rule);
+       chan_reg_rule_print_dbg(regd, chan, reg_rule);
 
        power_rule = &reg_rule->power_rule;
        freq_range = &reg_rule->freq_range;
 
-       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
+       max_bandwidth_khz = freq_range->max_bandwidth_khz;
+       /* Check if auto calculation requested */
+       if (reg_rule->flags & NL80211_RRF_AUTO_BW)
+               max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+
+       if (max_bandwidth_khz < MHZ_TO_KHZ(40))
                bw_flags = IEEE80211_CHAN_NO_HT40;
-       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+       if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
-       if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+       if (max_bandwidth_khz < MHZ_TO_KHZ(160))
                bw_flags |= IEEE80211_CHAN_NO_160MHZ;
 
        chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
@@ -1683,17 +1803,9 @@ static void reg_process_hint(struct regulatory_request *reg_request)
        struct wiphy *wiphy = NULL;
        enum reg_request_treatment treatment;
 
-       if (WARN_ON(!reg_request->alpha2))
-               return;
-
        if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
                wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
 
-       if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !wiphy) {
-               kfree(reg_request);
-               return;
-       }
-
        switch (reg_request->initiator) {
        case NL80211_REGDOM_SET_BY_CORE:
                reg_process_hint_core(reg_request);
@@ -1703,23 +1815,33 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                if (treatment == REG_REQ_IGNORE ||
                    treatment == REG_REQ_ALREADY_SET)
                        return;
-               schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
+               queue_delayed_work(system_power_efficient_wq,
+                                  &reg_timeout, msecs_to_jiffies(3142));
                return;
        case NL80211_REGDOM_SET_BY_DRIVER:
+               if (!wiphy)
+                       goto out_free;
                treatment = reg_process_hint_driver(wiphy, reg_request);
                break;
        case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+               if (!wiphy)
+                       goto out_free;
                treatment = reg_process_hint_country_ie(wiphy, reg_request);
                break;
        default:
                WARN(1, "invalid initiator %d\n", reg_request->initiator);
-               return;
+               goto out_free;
        }
 
        /* This is required so that the orig_* parameters are saved */
        if (treatment == REG_REQ_ALREADY_SET && wiphy &&
            wiphy->regulatory_flags & REGULATORY_STRICT_REG)
                wiphy_update_regulatory(wiphy, reg_request->initiator);
+
+       return;
+
+out_free:
+       kfree(reg_request);
 }
 
 /*
@@ -2147,31 +2269,49 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
        const struct ieee80211_reg_rule *reg_rule = NULL;
        const struct ieee80211_freq_range *freq_range = NULL;
        const struct ieee80211_power_rule *power_rule = NULL;
+       char bw[32], cac_time[32];
 
-       pr_info("  (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
+       pr_info("  (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n");
 
        for (i = 0; i < rd->n_reg_rules; i++) {
                reg_rule = &rd->reg_rules[i];
                freq_range = &reg_rule->freq_range;
                power_rule = &reg_rule->power_rule;
 
+               if (reg_rule->flags & NL80211_RRF_AUTO_BW)
+                       snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
+                                freq_range->max_bandwidth_khz,
+                                reg_get_max_bandwidth(rd, reg_rule));
+               else
+                       snprintf(bw, sizeof(bw), "%d KHz",
+                                freq_range->max_bandwidth_khz);
+
+               if (reg_rule->flags & NL80211_RRF_DFS)
+                       scnprintf(cac_time, sizeof(cac_time), "%u s",
+                                 reg_rule->dfs_cac_ms/1000);
+               else
+                       scnprintf(cac_time, sizeof(cac_time), "N/A");
+
+
                /*
                 * There may not be documentation for max antenna gain
                 * in certain regions
                 */
                if (power_rule->max_antenna_gain)
-                       pr_info("  (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
+                       pr_info("  (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n",
                                freq_range->start_freq_khz,
                                freq_range->end_freq_khz,
-                               freq_range->max_bandwidth_khz,
+                               bw,
                                power_rule->max_antenna_gain,
-                               power_rule->max_eirp);
+                               power_rule->max_eirp,
+                               cac_time);
                else
-                       pr_info("  (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
+                       pr_info("  (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n",
                                freq_range->start_freq_khz,
                                freq_range->end_freq_khz,
-                               freq_range->max_bandwidth_khz,
-                               power_rule->max_eirp);
+                               bw,
+                               power_rule->max_eirp,
+                               cac_time);
        }
 }
 
@@ -2244,9 +2384,6 @@ static int reg_set_rd_user(const struct ieee80211_regdomain *rd,
 {
        const struct ieee80211_regdomain *intersected_rd = NULL;
 
-       if (is_world_regdom(rd->alpha2))
-               return -EINVAL;
-
        if (!regdom_changes(rd->alpha2))
                return -EALREADY;
 
@@ -2294,7 +2431,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
 
        request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx);
        if (!request_wiphy) {
-               schedule_delayed_work(&reg_timeout, 0);
+               queue_delayed_work(system_power_efficient_wq,
+                                  &reg_timeout, 0);
                return -ENODEV;
        }
 
@@ -2354,7 +2492,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
 
        request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx);
        if (!request_wiphy) {
-               schedule_delayed_work(&reg_timeout, 0);
+               queue_delayed_work(system_power_efficient_wq,
+                                  &reg_timeout, 0);
                return -ENODEV;
        }
 
@@ -2433,26 +2572,6 @@ int set_regdom(const struct ieee80211_regdomain *rd)
        return 0;
 }
 
-int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       struct regulatory_request *lr;
-       u8 alpha2[2];
-       bool add = false;
-
-       rcu_read_lock();
-       lr = get_last_request();
-       if (lr && !lr->processed) {
-               memcpy(alpha2, lr->alpha2, 2);
-               add = true;
-       }
-       rcu_read_unlock();
-
-       if (add)
-               return add_uevent_var(env, "COUNTRY=%c%c",
-                                     alpha2[0], alpha2[1]);
-       return 0;
-}
-
 void wiphy_regulatory_register(struct wiphy *wiphy)
 {
        struct regulatory_request *lr;
@@ -2503,8 +2622,6 @@ int __init regulatory_init(void)
        if (IS_ERR(reg_pdev))
                return PTR_ERR(reg_pdev);
 
-       reg_pdev->dev.type = &reg_device_type;
-
        spin_lock_init(&reg_requests_lock);
        spin_lock_init(&reg_pending_beacons_lock);
 
index 02bd8f4b0921dafca98c4e1bc23b729045b036db..37c180df34b72a1195aacb6d72b7b07ddc44a9ef 100644 (file)
@@ -26,7 +26,6 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy);
 int regulatory_hint_user(const char *alpha2,
                         enum nl80211_user_reg_hint_type user_reg_hint_type);
 
-int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env);
 void wiphy_regulatory_register(struct wiphy *wiphy);
 void wiphy_regulatory_deregister(struct wiphy *wiphy);
 
@@ -34,6 +33,8 @@ int __init regulatory_init(void);
 void regulatory_exit(void);
 
 int set_regdom(const struct ieee80211_regdomain *rd);
+unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
+                                  const struct ieee80211_reg_rule *rule);
 
 bool reg_last_request_cell_base(void);
 
index d1ed4aebbbb7dcc6dca3fccea4222e3eb7eb2fb0..7d09a712cb1f1353f13310f5c68b38e750d199a6 100644 (file)
@@ -659,9 +659,6 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
                        continue;
                if (ssidlen && ie[1] != ssidlen)
                        continue;
-               /* that would be odd ... */
-               if (bss->pub.beacon_ies)
-                       continue;
                if (WARN_ON_ONCE(bss->pub.hidden_beacon_bss))
                        continue;
                if (WARN_ON_ONCE(!list_empty(&bss->hidden_list)))
@@ -680,7 +677,8 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_internal_bss *
 cfg80211_bss_update(struct cfg80211_registered_device *dev,
-                   struct cfg80211_internal_bss *tmp)
+                   struct cfg80211_internal_bss *tmp,
+                   bool signal_valid)
 {
        struct cfg80211_internal_bss *found = NULL;
 
@@ -765,7 +763,12 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                }
 
                found->pub.beacon_interval = tmp->pub.beacon_interval;
-               found->pub.signal = tmp->pub.signal;
+               /*
+                * don't update the signal if beacon was heard on
+                * adjacent channel.
+                */
+               if (signal_valid)
+                       found->pub.signal = tmp->pub.signal;
                found->pub.capability = tmp->pub.capability;
                found->ts = tmp->ts;
        } else {
@@ -869,13 +872,14 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss*
 cfg80211_inform_bss_width(struct wiphy *wiphy,
-                         struct ieee80211_channel *channel,
+                         struct ieee80211_channel *rx_channel,
                          enum nl80211_bss_scan_width scan_width,
                          const u8 *bssid, u64 tsf, u16 capability,
                          u16 beacon_interval, const u8 *ie, size_t ielen,
                          s32 signal, gfp_t gfp)
 {
        struct cfg80211_bss_ies *ies;
+       struct ieee80211_channel *channel;
        struct cfg80211_internal_bss tmp = {}, *res;
 
        if (WARN_ON(!wiphy))
@@ -885,7 +889,7 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
                        (signal < 0 || signal > 100)))
                return NULL;
 
-       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, channel);
+       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, rx_channel);
        if (!channel)
                return NULL;
 
@@ -913,7 +917,8 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
        rcu_assign_pointer(tmp.pub.beacon_ies, ies);
        rcu_assign_pointer(tmp.pub.ies, ies);
 
-       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp);
+       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp,
+                                 rx_channel == channel);
        if (!res)
                return NULL;
 
@@ -929,20 +934,21 @@ EXPORT_SYMBOL(cfg80211_inform_bss_width);
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 struct cfg80211_bss *
 cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
-                               struct ieee80211_channel *channel,
+                               struct ieee80211_channel *rx_channel,
                                enum nl80211_bss_scan_width scan_width,
                                struct ieee80211_mgmt *mgmt, size_t len,
                                s32 signal, gfp_t gfp)
 {
        struct cfg80211_internal_bss tmp = {}, *res;
        struct cfg80211_bss_ies *ies;
+       struct ieee80211_channel *channel;
        size_t ielen = len - offsetof(struct ieee80211_mgmt,
                                      u.probe_resp.variable);
 
        BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
                        offsetof(struct ieee80211_mgmt, u.beacon.variable));
 
-       trace_cfg80211_inform_bss_width_frame(wiphy, channel, scan_width, mgmt,
+       trace_cfg80211_inform_bss_width_frame(wiphy, rx_channel, scan_width, mgmt,
                                              len, signal);
 
        if (WARN_ON(!mgmt))
@@ -959,7 +965,7 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
                return NULL;
 
        channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
-                                          ielen, channel);
+                                          ielen, rx_channel);
        if (!channel)
                return NULL;
 
@@ -983,7 +989,8 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
        tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
        tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
 
-       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp);
+       res = cfg80211_bss_update(wiphy_to_dev(wiphy), &tmp,
+                                 rx_channel == channel);
        if (!res)
                return NULL;
 
index f04d4c32e96e144d37b49de5d1be69add8a55b2c..acdcb4a81817b7c78e8e721ff632284b9b806fa9 100644 (file)
@@ -64,7 +64,6 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
        int n_channels, err;
 
        ASSERT_RTNL();
-       ASSERT_RDEV_LOCK(rdev);
        ASSERT_WDEV_LOCK(wdev);
 
        if (rdev->scan_req || rdev->scan_msg)
index fbcc23edee5474459950b8566ecffd541ad4ed7d..aabccf13e07b6860ef92ddc637a7879a8f961aab 100644 (file)
@@ -1468,9 +1468,10 @@ TRACE_EVENT(rdev_sched_scan_start,
 TRACE_EVENT(rdev_tdls_mgmt,
        TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
                 u8 *peer, u8 action_code, u8 dialog_token,
-                u16 status_code, const u8 *buf, size_t len),
+                u16 status_code, u32 peer_capability,
+                const u8 *buf, size_t len),
        TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code,
-               buf, len),
+               peer_capability, buf, len),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                NETDEV_ENTRY
@@ -1478,6 +1479,7 @@ TRACE_EVENT(rdev_tdls_mgmt,
                __field(u8, action_code)
                __field(u8, dialog_token)
                __field(u16, status_code)
+               __field(u32, peer_capability)
                __dynamic_array(u8, buf, len)
        ),
        TP_fast_assign(
@@ -1487,13 +1489,15 @@ TRACE_EVENT(rdev_tdls_mgmt,
                __entry->action_code = action_code;
                __entry->dialog_token = dialog_token;
                __entry->status_code = status_code;
+               __entry->peer_capability = peer_capability;
                memcpy(__get_dynamic_array(buf), buf, len);
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", action_code: %u, "
-                 "dialog_token: %u, status_code: %u, buf: %#.2x ",
+                 "dialog_token: %u, status_code: %u, peer_capability: %u buf: %#.2x ",
                  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
                  __entry->action_code, __entry->dialog_token,
-                 __entry->status_code, ((u8 *)__get_dynamic_array(buf))[0])
+                 __entry->status_code, __entry->peer_capability,
+                 ((u8 *)__get_dynamic_array(buf))[0])
 );
 
 TRACE_EVENT(rdev_dump_survey,
@@ -2278,11 +2282,6 @@ DECLARE_EVENT_CLASS(cfg80211_rx_evt,
        TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
 );
 
-DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined,
-       TP_PROTO(struct net_device *netdev, const u8 *addr),
-       TP_ARGS(netdev, addr)
-);
-
 DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
        TP_PROTO(struct net_device *netdev, const u8 *addr),
        TP_ARGS(netdev, addr)
@@ -2293,6 +2292,24 @@ DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame,
        TP_ARGS(netdev, addr)
 );
 
+TRACE_EVENT(cfg80211_ibss_joined,
+       TP_PROTO(struct net_device *netdev, const u8 *bssid,
+                struct ieee80211_channel *channel),
+       TP_ARGS(netdev, bssid, channel),
+       TP_STRUCT__entry(
+               NETDEV_ENTRY
+               MAC_ENTRY(bssid)
+               CHAN_ENTRY
+       ),
+       TP_fast_assign(
+               NETDEV_ASSIGN;
+               MAC_ASSIGN(bssid, bssid);
+               CHAN_ASSIGN(channel);
+       ),
+       TP_printk(NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", " CHAN_PR_FMT,
+                 NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
+);
+
 TRACE_EVENT(cfg80211_probe_status,
        TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie,
                 bool acked),
index d39c37104ae2f125c5def9c943dc65ab8a669f5a..e5872ff2c27ca8989ca6da7cfdf4d7041c29a72e 100644 (file)
@@ -11,6 +11,7 @@
 #include <net/ip.h>
 #include <net/dsfield.h>
 #include <linux/if_vlan.h>
+#include <linux/mpls.h>
 #include "core.h"
 #include "rdev-ops.h"
 
@@ -717,6 +718,21 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
        case htons(ETH_P_IPV6):
                dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc;
                break;
+       case htons(ETH_P_MPLS_UC):
+       case htons(ETH_P_MPLS_MC): {
+               struct mpls_label mpls_tmp, *mpls;
+
+               mpls = skb_header_pointer(skb, sizeof(struct ethhdr),
+                                         sizeof(*mpls), &mpls_tmp);
+               if (!mpls)
+                       return 0;
+
+               return (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
+                       >> MPLS_LS_TC_SHIFT;
+       }
+       case htons(ETH_P_80221):
+               /* 802.21 is always network control traffic */
+               return 7;
        default:
                return 0;
        }
@@ -820,7 +836,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
                                                ev->dc.reason, true);
                        break;
                case EVENT_IBSS_JOINED:
-                       __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
+                       __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
+                                              ev->ij.channel);
                        break;
                }
                wdev_unlock(wdev);
@@ -837,7 +854,6 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
        struct wireless_dev *wdev;
 
        ASSERT_RTNL();
-       ASSERT_RDEV_LOCK(rdev);
 
        list_for_each_entry(wdev, &rdev->wdev_list, list)
                cfg80211_process_wdev_events(wdev);
@@ -850,7 +866,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
        int err;
        enum nl80211_iftype otype = dev->ieee80211_ptr->iftype;
 
-       ASSERT_RDEV_LOCK(rdev);
+       ASSERT_RTNL();
 
        /* don't support changing VLANs, you just re-create them */
        if (otype == NL80211_IFTYPE_AP_VLAN)
@@ -885,7 +901,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
 
                switch (otype) {
                case NL80211_IFTYPE_AP:
-                       cfg80211_stop_ap(rdev, dev);
+                       cfg80211_stop_ap(rdev, dev, true);
                        break;
                case NL80211_IFTYPE_ADHOC:
                        cfg80211_leave_ibss(rdev, dev, false);
@@ -1268,7 +1284,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        enum cfg80211_chan_mode chmode;
        int num_different_channels = 0;
        int total = 1;
-       bool radar_required = false;
        int i, j;
 
        ASSERT_RTNL();
@@ -1276,35 +1291,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
        if (WARN_ON(hweight32(radar_detect) > 1))
                return -EINVAL;
 
-       switch (iftype) {
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_AP:
-       case NL80211_IFTYPE_AP_VLAN:
-       case NL80211_IFTYPE_MESH_POINT:
-       case NL80211_IFTYPE_P2P_GO:
-       case NL80211_IFTYPE_WDS:
-               /* if the interface could potentially choose a DFS channel,
-                * then mark DFS as required.
-                */
-               if (!chan) {
-                       if (chanmode != CHAN_MODE_UNDEFINED && radar_detect)
-                               radar_required = true;
-                       break;
-               }
-               radar_required = !!(chan->flags & IEEE80211_CHAN_RADAR);
-               break;
-       case NL80211_IFTYPE_P2P_CLIENT:
-       case NL80211_IFTYPE_STATION:
-       case NL80211_IFTYPE_P2P_DEVICE:
-       case NL80211_IFTYPE_MONITOR:
-               break;
-       case NUM_NL80211_IFTYPES:
-       case NL80211_IFTYPE_UNSPECIFIED:
-       default:
-               return -EINVAL;
-       }
-
-       if (radar_required && !radar_detect)
+       if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
                return -EINVAL;
 
        /* Always allow software iftypes */
@@ -1356,7 +1343,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
                 */
                mutex_lock_nested(&wdev_iter->mtx, 1);
                __acquire(wdev_iter->mtx);
-               cfg80211_get_chan_state(wdev_iter, &ch, &chmode);
+               cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect);
                wdev_unlock(wdev_iter);
 
                switch (chmode) {
index 14c9a2583ba0926a2e14a777aaa5b69fd09f8963..86c331a65664a77bfe6c083224b7eae2c91f5b9c 100644 (file)
@@ -21,7 +21,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
        const u8 *prev_bssid = NULL;
        int err, i;
 
-       ASSERT_RDEV_LOCK(rdev);
+       ASSERT_RTNL();
        ASSERT_WDEV_LOCK(wdev);
 
        if (!netif_running(wdev->netdev))
index 6c7ac016ce3a7e780bf4f6361c61189d0d9b9f9e..85d1d476461257b3e248bd870cd9f7217fa21657 100644 (file)
 
 static struct kmem_cache *secpath_cachep __read_mostly;
 
+static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
+static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO];
+
+int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
+{
+       int err = 0;
+
+       if (unlikely(afinfo == NULL))
+               return -EINVAL;
+       if (unlikely(afinfo->family >= NPROTO))
+               return -EAFNOSUPPORT;
+       spin_lock_bh(&xfrm_input_afinfo_lock);
+       if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
+               err = -ENOBUFS;
+       else
+               rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
+       spin_unlock_bh(&xfrm_input_afinfo_lock);
+       return err;
+}
+EXPORT_SYMBOL(xfrm_input_register_afinfo);
+
+int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo)
+{
+       int err = 0;
+
+       if (unlikely(afinfo == NULL))
+               return -EINVAL;
+       if (unlikely(afinfo->family >= NPROTO))
+               return -EAFNOSUPPORT;
+       spin_lock_bh(&xfrm_input_afinfo_lock);
+       if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
+               if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
+                       err = -EINVAL;
+               else
+                       RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL);
+       }
+       spin_unlock_bh(&xfrm_input_afinfo_lock);
+       synchronize_rcu();
+       return err;
+}
+EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
+
+static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
+{
+       struct xfrm_input_afinfo *afinfo;
+
+       if (unlikely(family >= NPROTO))
+               return NULL;
+       rcu_read_lock();
+       afinfo = rcu_dereference(xfrm_input_afinfo[family]);
+       if (unlikely(!afinfo))
+               rcu_read_unlock();
+       return afinfo;
+}
+
+static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo)
+{
+       rcu_read_unlock();
+}
+
+static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
+                      int err)
+{
+       int ret;
+       struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
+
+       if (!afinfo)
+               return -EAFNOSUPPORT;
+
+       ret = afinfo->callback(skb, protocol, err);
+       xfrm_input_put_afinfo(afinfo);
+
+       return ret;
+}
+
 void __secpath_destroy(struct sec_path *sp)
 {
        int i;
@@ -108,7 +183,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        int err;
        __be32 seq;
        __be32 seq_hi;
-       struct xfrm_state *x;
+       struct xfrm_state *x = NULL;
        xfrm_address_t *daddr;
        struct xfrm_mode *inner_mode;
        unsigned int family;
@@ -120,9 +195,14 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                async = 1;
                x = xfrm_input_state(skb);
                seq = XFRM_SKB_CB(skb)->seq.input.low;
+               family = x->outer_mode->afinfo->family;
                goto resume;
        }
 
+       daddr = (xfrm_address_t *)(skb_network_header(skb) +
+                                  XFRM_SPI_SKB_CB(skb)->daddroff);
+       family = XFRM_SPI_SKB_CB(skb)->family;
+
        /* Allocate new secpath or COW existing one. */
        if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
                struct sec_path *sp;
@@ -137,10 +217,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                skb->sp = sp;
        }
 
-       daddr = (xfrm_address_t *)(skb_network_header(skb) +
-                                  XFRM_SPI_SKB_CB(skb)->daddroff);
-       family = XFRM_SPI_SKB_CB(skb)->family;
-
        seq = 0;
        if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
                XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
@@ -162,6 +238,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
                skb->sp->xvec[skb->sp->len++] = x;
 
+               if (xfrm_tunnel_check(skb, x, family)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
+                       goto drop;
+               }
+
                spin_lock(&x->lock);
                if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
@@ -201,7 +282,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
                if (nexthdr == -EINPROGRESS)
                        return 0;
-
 resume:
                spin_lock(&x->lock);
                if (nexthdr <= 0) {
@@ -263,6 +343,10 @@ resume:
                }
        } while (!err);
 
+       err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
+       if (err)
+               goto drop;
+
        nf_reset(skb);
 
        if (decaps) {
@@ -276,6 +360,7 @@ resume:
 drop_unlock:
        spin_unlock(&x->lock);
 drop:
+       xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
        kfree_skb(skb);
        return 0;
 }
index 1d5c7bf29938231fa06428dc6236e36c1719860e..f02f511b710741e1779d389ac69bfc75a17ab42f 100644 (file)
@@ -39,8 +39,6 @@
 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
 #define XFRM_MAX_QUEUE_LEN     100
 
-static struct dst_entry *xfrm_policy_sk_bundles;
-
 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
                                                __read_mostly;
@@ -661,7 +659,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
                hlist_add_head(&policy->bydst, chain);
        xfrm_pol_hold(policy);
        net->xfrm.policy_count[dir]++;
-       atomic_inc(&flow_cache_genid);
+       atomic_inc(&net->xfrm.flow_cache_genid);
 
        /* After previous checking, family can either be AF_INET or AF_INET6 */
        if (policy->family == AF_INET)
@@ -2109,13 +2107,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                                goto no_transform;
                        }
 
-                       dst_hold(&xdst->u.dst);
-
-                       spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-                       xdst->u.dst.next = xfrm_policy_sk_bundles;
-                       xfrm_policy_sk_bundles = &xdst->u.dst;
-                       spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
                        route = xdst->route;
                }
        }
@@ -2549,33 +2540,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
        return dst;
 }
 
-static void __xfrm_garbage_collect(struct net *net)
-{
-       struct dst_entry *head, *next;
-
-       spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-       head = xfrm_policy_sk_bundles;
-       xfrm_policy_sk_bundles = NULL;
-       spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
-       while (head) {
-               next = head->next;
-               dst_free(head);
-               head = next;
-       }
-}
-
 void xfrm_garbage_collect(struct net *net)
 {
-       flow_cache_flush();
-       __xfrm_garbage_collect(net);
+       flow_cache_flush(net);
 }
 EXPORT_SYMBOL(xfrm_garbage_collect);
 
 static void xfrm_garbage_collect_deferred(struct net *net)
 {
-       flow_cache_flush_deferred();
-       __xfrm_garbage_collect(net);
+       flow_cache_flush_deferred(net);
 }
 
 static void xfrm_init_pmtu(struct dst_entry *dst)
@@ -2940,15 +2913,19 @@ static int __net_init xfrm_net_init(struct net *net)
        rv = xfrm_sysctl_init(net);
        if (rv < 0)
                goto out_sysctl;
+       rv = flow_cache_init(net);
+       if (rv < 0)
+               goto out;
 
        /* Initialize the per-net locks here */
        spin_lock_init(&net->xfrm.xfrm_state_lock);
        rwlock_init(&net->xfrm.xfrm_policy_lock);
-       spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock);
        mutex_init(&net->xfrm.xfrm_cfg_mutex);
 
        return 0;
 
+out:
+       xfrm_sysctl_fini(net);
 out_sysctl:
        xfrm_policy_fini(net);
 out_policy:
@@ -2961,6 +2938,7 @@ out_statistics:
 
 static void __net_exit xfrm_net_exit(struct net *net)
 {
+       flow_cache_fini(net);
        xfrm_sysctl_fini(net);
        xfrm_policy_fini(net);
        xfrm_state_fini(net);
index 40f1b3e92e7812e83127064237db07f18048b0a5..8e9c781a6bbaaba83e4af4a31ac7a07a70dd6c71 100644 (file)
@@ -161,6 +161,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
 int __xfrm_state_delete(struct xfrm_state *x);
 
 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
+bool km_is_alive(const struct km_event *c);
 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
 
 static DEFINE_SPINLOCK(xfrm_type_lock);
@@ -788,6 +789,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
        struct xfrm_state *best = NULL;
        u32 mark = pol->mark.v & pol->mark.m;
        unsigned short encap_family = tmpl->encap_family;
+       struct km_event c;
 
        to_put = NULL;
 
@@ -832,6 +834,17 @@ found:
                        error = -EEXIST;
                        goto out;
                }
+
+               c.net = net;
+               /* If the KMs have no listeners (yet...), avoid allocating an SA
+                * for each and every packet - garbage collection might not
+                * handle the flood.
+                */
+               if (!km_is_alive(&c)) {
+                       error = -ESRCH;
+                       goto out;
+               }
+
                x = xfrm_state_alloc(net);
                if (x == NULL) {
                        error = -ENOMEM;
@@ -1135,10 +1148,9 @@ out:
 EXPORT_SYMBOL(xfrm_state_add);
 
 #ifdef CONFIG_XFRM_MIGRATE
-static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
+static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
 {
        struct net *net = xs_net(orig);
-       int err = -ENOMEM;
        struct xfrm_state *x = xfrm_state_alloc(net);
        if (!x)
                goto out;
@@ -1192,15 +1204,13 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
        }
 
        if (orig->replay_esn) {
-               err = xfrm_replay_clone(x, orig);
-               if (err)
+               if (xfrm_replay_clone(x, orig))
                        goto error;
        }
 
        memcpy(&x->mark, &orig->mark, sizeof(x->mark));
 
-       err = xfrm_init_state(x);
-       if (err)
+       if (xfrm_init_state(x) < 0)
                goto error;
 
        x->props.flags = orig->props.flags;
@@ -1218,8 +1228,6 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
  error:
        xfrm_state_put(x);
 out:
-       if (errp)
-               *errp = err;
        return NULL;
 }
 
@@ -1274,9 +1282,8 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
                                      struct xfrm_migrate *m)
 {
        struct xfrm_state *xc;
-       int err;
 
-       xc = xfrm_state_clone(x, &err);
+       xc = xfrm_state_clone(x);
        if (!xc)
                return NULL;
 
@@ -1289,7 +1296,7 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
                   state is to be updated as it is a part of triplet */
                xfrm_state_insert(xc);
        } else {
-               if ((err = xfrm_state_add(xc)) < 0)
+               if (xfrm_state_add(xc) < 0)
                        goto error;
        }
 
@@ -1601,6 +1608,23 @@ unlock:
 }
 EXPORT_SYMBOL(xfrm_alloc_spi);
 
+static bool __xfrm_state_filter_match(struct xfrm_state *x,
+                                     struct xfrm_address_filter *filter)
+{
+       if (filter) {
+               if ((filter->family == AF_INET ||
+                    filter->family == AF_INET6) &&
+                   x->props.family != filter->family)
+                       return false;
+
+               return addr_match(&x->props.saddr, &filter->saddr,
+                                 filter->splen) &&
+                      addr_match(&x->id.daddr, &filter->daddr,
+                                 filter->dplen);
+       }
+       return true;
+}
+
 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
                    int (*func)(struct xfrm_state *, int, void*),
                    void *data)
@@ -1623,6 +1647,8 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
                state = container_of(x, struct xfrm_state, km);
                if (!xfrm_id_proto_match(state->id.proto, walk->proto))
                        continue;
+               if (!__xfrm_state_filter_match(state, walk->filter))
+                       continue;
                err = func(state, walk->seq, data);
                if (err) {
                        list_move_tail(&walk->all, &x->all);
@@ -1641,17 +1667,21 @@ out:
 }
 EXPORT_SYMBOL(xfrm_state_walk);
 
-void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
+                         struct xfrm_address_filter *filter)
 {
        INIT_LIST_HEAD(&walk->all);
        walk->proto = proto;
        walk->state = XFRM_STATE_DEAD;
        walk->seq = 0;
+       walk->filter = filter;
 }
 EXPORT_SYMBOL(xfrm_state_walk_init);
 
 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
 {
+       kfree(walk->filter);
+
        if (list_empty(&walk->all))
                return;
 
@@ -1804,6 +1834,24 @@ int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address
 }
 EXPORT_SYMBOL(km_report);
 
+bool km_is_alive(const struct km_event *c)
+{
+       struct xfrm_mgr *km;
+       bool is_alive = false;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(km, &xfrm_km_list, list) {
+               if (km->is_alive && km->is_alive(c)) {
+                       is_alive = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return is_alive;
+}
+EXPORT_SYMBOL(km_is_alive);
+
 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
 {
        int err;
index 2f7ddc3a59b42db13c1df648b188a8dc312c5f8b..8f131c10a6f3d6793c6d0a049108ab66ccaa8664 100644 (file)
@@ -137,7 +137,8 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
        if (!rt)
                return 0;
 
-       if (p->id.proto != IPPROTO_ESP)
+       /* As only ESP and AH support ESN feature. */
+       if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
                return -EINVAL;
 
        if (p->replay_window != 0)
@@ -881,6 +882,7 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
        return 0;
 }
 
+static const struct nla_policy xfrma_policy[XFRMA_MAX+1];
 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
@@ -896,8 +898,31 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
        info.nlmsg_flags = NLM_F_MULTI;
 
        if (!cb->args[0]) {
+               struct nlattr *attrs[XFRMA_MAX+1];
+               struct xfrm_address_filter *filter = NULL;
+               u8 proto = 0;
+               int err;
+
                cb->args[0] = 1;
-               xfrm_state_walk_init(walk, 0);
+
+               err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
+                                 xfrma_policy);
+               if (err < 0)
+                       return err;
+
+               if (attrs[XFRMA_ADDRESS_FILTER]) {
+                       filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+                       if (filter == NULL)
+                               return -ENOMEM;
+
+                       memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]),
+                              sizeof(*filter));
+               }
+
+               if (attrs[XFRMA_PROTO])
+                       proto = nla_get_u8(attrs[XFRMA_PROTO]);
+
+               xfrm_state_walk_init(walk, proto, filter);
        }
 
        (void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2303,6 +2328,8 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
        [XFRMA_TFCPAD]          = { .type = NLA_U32 },
        [XFRMA_REPLAY_ESN_VAL]  = { .len = sizeof(struct xfrm_replay_state_esn) },
        [XFRMA_SA_EXTRA_FLAGS]  = { .type = NLA_U32 },
+       [XFRMA_PROTO]           = { .type = NLA_U8 },
+       [XFRMA_ADDRESS_FILTER]  = { .len = sizeof(struct xfrm_address_filter) },
 };
 
 static const struct xfrm_link {
@@ -2976,6 +3003,11 @@ static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
        return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
 }
 
+static bool xfrm_is_alive(const struct km_event *c)
+{
+       return (bool)xfrm_acquire_is_on(c->net);
+}
+
 static struct xfrm_mgr netlink_mgr = {
        .id             = "netlink",
        .notify         = xfrm_send_state_notify,
@@ -2985,6 +3017,7 @@ static struct xfrm_mgr netlink_mgr = {
        .report         = xfrm_send_report,
        .migrate        = xfrm_send_migrate,
        .new_mapping    = xfrm_send_mapping,
+       .is_alive       = xfrm_is_alive,
 };
 
 static int __net_init xfrm_user_net_init(struct net *net)
index 9f0584710c858a8281d51f2c287dc3b43b30d77f..1450f85b946da462e5ef3576a50d979d15574b8f 100644 (file)
@@ -46,10 +46,11 @@ static inline void selinux_xfrm_notify_policyload(void)
 {
        struct net *net;
 
-       atomic_inc(&flow_cache_genid);
        rtnl_lock();
-       for_each_net(net)
+       for_each_net(net) {
+               atomic_inc(&net->xfrm.flow_cache_genid);
                rt_genid_bump_all(net);
+       }
        rtnl_unlock();
 }
 #else
index 65dc757f7f7b5c8d41bf49d95fe47df6b549c4fc..bb31813e43ddca8bd2bad4544e593d8c6df418f2 100644 (file)
@@ -87,9 +87,6 @@
        __attribute__ ((format (printf, (pos_fmtstr), (pos_fmtargs))))
 #endif
 
-#define CMD(_name, _func) { .name = _name, .func = _func, }
-#define OP(_op, _name)      [_op] = _name
-
 enum {
        CMD_OK,
        CMD_ERR,
@@ -145,32 +142,32 @@ static size_t pcap_map_size = 0;
 static char *pcap_ptr_va_start, *pcap_ptr_va_curr;
 
 static const char * const op_table[] = {
-       OP(BPF_ST, "st"),
-       OP(BPF_STX, "stx"),
-       OP(BPF_LD_B, "ldb"),
-       OP(BPF_LD_H, "ldh"),
-       OP(BPF_LD_W, "ld"),
-       OP(BPF_LDX, "ldx"),
-       OP(BPF_LDX_B, "ldxb"),
-       OP(BPF_JMP_JA, "ja"),
-       OP(BPF_JMP_JEQ, "jeq"),
-       OP(BPF_JMP_JGT, "jgt"),
-       OP(BPF_JMP_JGE, "jge"),
-       OP(BPF_JMP_JSET, "jset"),
-       OP(BPF_ALU_ADD, "add"),
-       OP(BPF_ALU_SUB, "sub"),
-       OP(BPF_ALU_MUL, "mul"),
-       OP(BPF_ALU_DIV, "div"),
-       OP(BPF_ALU_MOD, "mod"),
-       OP(BPF_ALU_NEG, "neg"),
-       OP(BPF_ALU_AND, "and"),
-       OP(BPF_ALU_OR, "or"),
-       OP(BPF_ALU_XOR, "xor"),
-       OP(BPF_ALU_LSH, "lsh"),
-       OP(BPF_ALU_RSH, "rsh"),
-       OP(BPF_MISC_TAX, "tax"),
-       OP(BPF_MISC_TXA, "txa"),
-       OP(BPF_RET, "ret"),
+       [BPF_ST]        = "st",
+       [BPF_STX]       = "stx",
+       [BPF_LD_B]      = "ldb",
+       [BPF_LD_H]      = "ldh",
+       [BPF_LD_W]      = "ld",
+       [BPF_LDX]       = "ldx",
+       [BPF_LDX_B]     = "ldxb",
+       [BPF_JMP_JA]    = "ja",
+       [BPF_JMP_JEQ]   = "jeq",
+       [BPF_JMP_JGT]   = "jgt",
+       [BPF_JMP_JGE]   = "jge",
+       [BPF_JMP_JSET]  = "jset",
+       [BPF_ALU_ADD]   = "add",
+       [BPF_ALU_SUB]   = "sub",
+       [BPF_ALU_MUL]   = "mul",
+       [BPF_ALU_DIV]   = "div",
+       [BPF_ALU_MOD]   = "mod",
+       [BPF_ALU_NEG]   = "neg",
+       [BPF_ALU_AND]   = "and",
+       [BPF_ALU_OR]    = "or",
+       [BPF_ALU_XOR]   = "xor",
+       [BPF_ALU_LSH]   = "lsh",
+       [BPF_ALU_RSH]   = "rsh",
+       [BPF_MISC_TAX]  = "tax",
+       [BPF_MISC_TXA]  = "txa",
+       [BPF_RET]       = "ret",
 };
 
 static __check_format_printf(1, 2) int rl_printf(const char *fmt, ...)
@@ -1127,7 +1124,6 @@ static int cmd_step(char *num)
 static int cmd_select(char *num)
 {
        unsigned int which, i;
-       struct pcap_pkthdr *hdr;
        bool have_next = true;
 
        if (!pcap_loaded() || strlen(num) == 0)
@@ -1144,7 +1140,7 @@ static int cmd_select(char *num)
 
        for (i = 0; i < which && (have_next = pcap_next_pkt()); i++)
                /* noop */;
-       if (!have_next || (hdr = pcap_curr_pkt()) == NULL) {
+       if (!have_next || pcap_curr_pkt() == NULL) {
                rl_printf("no packet #%u available!\n", which);
                pcap_reset_pkt();
                return CMD_ERR;
@@ -1177,9 +1173,8 @@ static int cmd_breakpoint(char *subcmd)
 static int cmd_run(char *num)
 {
        static uint32_t pass = 0, fail = 0;
-       struct pcap_pkthdr *hdr;
        bool has_limit = true;
-       int ret, pkts = 0, i = 0;
+       int pkts = 0, i = 0;
 
        if (!bpf_prog_loaded() || !pcap_loaded())
                return CMD_ERR;
@@ -1189,10 +1184,10 @@ static int cmd_run(char *num)
                has_limit = false;
 
        do {
-               hdr = pcap_curr_pkt();
-               ret = bpf_run_all(bpf_image, bpf_prog_len,
-                                 (uint8_t *) hdr + sizeof(*hdr),
-                                 hdr->caplen, hdr->len);
+               struct pcap_pkthdr *hdr = pcap_curr_pkt();
+               int ret = bpf_run_all(bpf_image, bpf_prog_len,
+                                     (uint8_t *) hdr + sizeof(*hdr),
+                                     hdr->caplen, hdr->len);
                if (ret > 0)
                        pass++;
                else if (ret == 0)
@@ -1245,14 +1240,14 @@ static int cmd_quit(char *dontcare)
 }
 
 static const struct shell_cmd cmds[] = {
-       CMD("load",             cmd_load),
-       CMD("select",           cmd_select),
-       CMD("step",             cmd_step),
-       CMD("run",              cmd_run),
-       CMD("breakpoint",       cmd_breakpoint),
-       CMD("disassemble",      cmd_disassemble),
-       CMD("dump",             cmd_dump),
-       CMD("quit",             cmd_quit),
+       { .name = "load", .func = cmd_load },
+       { .name = "select", .func = cmd_select },
+       { .name = "step", .func = cmd_step },
+       { .name = "run", .func = cmd_run },
+       { .name = "breakpoint", .func = cmd_breakpoint },
+       { .name = "disassemble", .func = cmd_disassemble },
+       { .name = "dump", .func = cmd_dump },
+       { .name = "quit", .func = cmd_quit },
 };
 
 static int execf(char *arg)
@@ -1280,7 +1275,6 @@ out:
 static char *shell_comp_gen(const char *buf, int state)
 {
        static int list_index, len;
-       const char *name;
 
        if (!state) {
                list_index = 0;
@@ -1288,9 +1282,9 @@ static char *shell_comp_gen(const char *buf, int state)
        }
 
        for (; list_index < array_size(cmds); ) {
-               name = cmds[list_index].name;
-               list_index++;
+               const char *name = cmds[list_index].name;
 
+               list_index++;
                if (strncmp(name, buf, len) == 0)
                        return strdup(name);
        }
@@ -1322,16 +1316,9 @@ static void init_shell(FILE *fin, FILE *fout)
 {
        char file[128];
 
-       memset(file, 0, sizeof(file));
-       snprintf(file, sizeof(file) - 1,
-                "%s/.bpf_dbg_history", getenv("HOME"));
-
+       snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
        read_history(file);
 
-       memset(file, 0, sizeof(file));
-       snprintf(file, sizeof(file) - 1,
-                "%s/.bpf_dbg_init", getenv("HOME"));
-
        rl_instream = fin;
        rl_outstream = fout;
 
@@ -1348,37 +1335,41 @@ static void init_shell(FILE *fin, FILE *fout)
        rl_bind_key_in_map('\t', rl_complete, emacs_meta_keymap);
        rl_bind_key_in_map('\033', rl_complete, emacs_meta_keymap);
 
+       snprintf(file, sizeof(file), "%s/.bpf_dbg_init", getenv("HOME"));
        rl_read_init_file(file);
+
        rl_prep_terminal(0);
        rl_set_signals();
 
        signal(SIGINT, intr_shell);
 }
 
-static void exit_shell(void)
+static void exit_shell(FILE *fin, FILE *fout)
 {
        char file[128];
 
-       memset(file, 0, sizeof(file));
-       snprintf(file, sizeof(file) - 1,
-                "%s/.bpf_dbg_history", getenv("HOME"));
-
+       snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
        write_history(file);
+
        clear_history();
        rl_deprep_terminal();
 
        try_close_pcap();
+
+       if (fin != stdin)
+               fclose(fin);
+       if (fout != stdout)
+               fclose(fout);
 }
 
 static int run_shell_loop(FILE *fin, FILE *fout)
 {
        char *buf;
-       int ret;
 
        init_shell(fin, fout);
 
        while ((buf = readline("> ")) != NULL) {
-               ret = execf(buf);
+               int ret = execf(buf);
                if (ret == CMD_EX)
                        break;
                if (ret == CMD_OK && strlen(buf) > 0)
@@ -1387,7 +1378,7 @@ static int run_shell_loop(FILE *fin, FILE *fout)
                free(buf);
        }
 
-       exit_shell();
+       exit_shell(fin, fout);
        return 0;
 }
 
This page took 4.905463 seconds and 5 git commands to generate.