Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Aug 2016 00:10:31 +0000 (20:10 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Aug 2016 00:10:31 +0000 (20:10 -0400)
Pull base rdma updates from Doug Ledford:
 "Round one of 4.8 code: while this is mostly normal, there is a new
  driver in here (the driver was hosted outside the kernel for several
  years and is actually a fairly mature and well coded driver).  It
  amounts to 13,000 of the 16,000 lines of added code in here.

  Summary:

   - Updates/fixes for iw_cxgb4 driver
   - Updates/fixes for mlx5 driver
   - Add flow steering and RSS API
   - Add hardware stats to mlx4 and mlx5 drivers
   - Add firmware version API for RDMA driver use
   - Add the rxe driver (this is a software RoCE driver that makes any
     Ethernet device a RoCE device)
   - Fixes for i40iw driver
   - Support for send only multicast joins in the cma layer
   - Other minor fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (72 commits)
  Soft RoCE driver
  IB/core: Support for CMA multicast join flags
  IB/sa: Add cached attribute containing SM information to SA port
  IB/uverbs: Fix race between uverbs_close and remove_one
  IB/mthca: Clean up error unwind flow in mthca_reset()
  IB/mthca: NULL arg to pci_dev_put is OK
  IB/hfi1: NULL arg to sc_return_credits is OK
  IB/mlx4: Add diagnostic hardware counters
  net/mlx4: Query performance and diagnostics counters
  net/mlx4: Add diagnostic counters capability bit
  Use smaller 512 byte messages for portmapper messages
  IB/ipoib: Report SG feature regardless of HW UD CSUM capability
  IB/mlx4: Don't use GFP_ATOMIC for CQ resize struct
  IB/hfi1: Disable by default
  IB/rdmavt: Disable by default
  IB/mlx5: Fix port counter ID association to QP offset
  IB/mlx5: Fix iteration overrun in GSI qps
  i40iw: Add NULL check for puda buffer
  i40iw: Change dup_ack_thresh to u8
  i40iw: Remove unnecessary check for moving CQ head
  ...

15 files changed:
1  2 
MAINTAINERS
drivers/infiniband/core/cma.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/net/ethernet/mellanox/mlx4/fw.c
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/qp.h
include/rdma/ib_verbs.h

diff --combined MAINTAINERS
index f518e69e5f295238aae4f4f59f1acfc41863a39e,4daa6712eac1d958a75547d47ff91dfd8ccb65f5..5e1f03f0a526d5b40ebf8a491d56978fed3815ba
@@@ -213,7 -213,7 +213,7 @@@ S: Maintaine
  F:    drivers/media/dvb-frontends/a8293*
  
  AACRAID SCSI RAID DRIVER
 -M:    Adaptec OEM Raid Solutions <aacraid@adaptec.com>
 +M:    Adaptec OEM Raid Solutions <aacraid@microsemi.com>
  L:    linux-scsi@vger.kernel.org
  W:    http://www.adaptec.com/
  S:    Supported
@@@ -288,7 -288,6 +288,7 @@@ F: include/linux/acpi.
  F:    include/acpi/
  F:    Documentation/acpi/
  F:    Documentation/ABI/testing/sysfs-bus-acpi
 +F:    Documentation/ABI/testing/configfs-acpi
  F:    drivers/pci/*acpi*
  F:    drivers/pci/*/*acpi*
  F:    drivers/pci/*/*/*acpi*
@@@ -596,10 -595,6 +596,10 @@@ S:       Odd Fixe
  L:    linux-alpha@vger.kernel.org
  F:    arch/alpha/
  
 +ALPS PS/2 TOUCHPAD DRIVER
 +R:    Pali Rohár <pali.rohar@gmail.com>
 +F:    drivers/input/mouse/alps.*
 +
  ALTERA MAILBOX DRIVER
  M:    Ley Foon Tan <lftan@altera.com>
  L:    nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
@@@ -612,13 -607,6 +612,13 @@@ L:       linux-gpio@vger.kernel.or
  S:    Maintained
  F:    drivers/gpio/gpio-altera.c
  
 +ALTERA SYSTEM RESOURCE DRIVER FOR ARRIA10 DEVKIT
 +M:    Thor Thayer <tthayer@opensource.altera.com>
 +S:    Maintained
 +F:    drivers/gpio/gpio-altera-a10sr.c
 +F:    drivers/mfd/altera-a10sr.c
 +F:    include/linux/mfd/altera-a10sr.h
 +
  ALTERA TRIPLE SPEED ETHERNET DRIVER
  M:    Vince Bridgers <vbridger@opensource.altera.com>
  L:    netdev@vger.kernel.org
@@@ -778,11 -766,6 +778,11 @@@ W:       http://ez.analog.com/community/linux
  S:    Supported
  F:    drivers/dma/dma-axi-dmac.c
  
 +ANDROID CONFIG FRAGMENTS
 +M:    Rob Herring <robh@kernel.org>
 +S:    Supported
 +F:    kernel/configs/android*
 +
  ANDROID DRIVERS
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  M:    Arve Hjønnevåg <arve@android.com>
@@@ -852,9 -835,7 +852,9 @@@ M: Iyappan Subramanian <isubramanian@ap
  M:    Keyur Chudgar <kchudgar@apm.com>
  S:    Supported
  F:    drivers/net/ethernet/apm/xgene/
 +F:    drivers/net/phy/mdio-xgene.c
  F:    Documentation/devicetree/bindings/net/apm-xgene-enet.txt
 +F:    Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
  
  APTINA CAMERA SENSOR PLL
  M:    Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
@@@ -884,17 -865,9 +884,17 @@@ F:       Documentation/devicetree/bindings/di
  ARM HDLCD DRM DRIVER
  M:    Liviu Dudau <liviu.dudau@arm.com>
  S:    Supported
 -F:    drivers/gpu/drm/arm/
 +F:    drivers/gpu/drm/arm/hdlcd_*
  F:    Documentation/devicetree/bindings/display/arm,hdlcd.txt
  
 +ARM MALI-DP DRM DRIVER
 +M:    Liviu Dudau <liviu.dudau@arm.com>
 +M:    Brian Starkey <brian.starkey@arm.com>
 +M:    Mali DP Maintainers <malidp@foss.arm.com>
 +S:    Supported
 +F:    drivers/gpu/drm/arm/
 +F:    Documentation/devicetree/bindings/display/arm,malidp.txt
 +
  ARM MFM AND FLOPPY DRIVERS
  M:    Ian Molton <spyro@f2s.com>
  S:    Maintained
@@@ -1549,7 -1522,6 +1549,7 @@@ M:      David Brown <david.brown@linaro.org
  L:    linux-arm-msm@vger.kernel.org
  L:    linux-soc@vger.kernel.org
  S:    Maintained
 +F:    Documentation/devicetree/bindings/soc/qcom/
  F:    arch/arm/boot/dts/qcom-*.dts
  F:    arch/arm/boot/dts/qcom-*.dtsi
  F:    arch/arm/mach-qcom/
@@@ -1627,13 -1599,12 +1627,13 @@@ F:   arch/arm/mach-s3c24*
  F:    arch/arm/mach-s3c64xx/
  F:    arch/arm/mach-s5p*/
  F:    arch/arm/mach-exynos*/
 -F:    drivers/*/*s3c2410*
 -F:    drivers/*/*/*s3c2410*
 +F:    drivers/*/*s3c24*
 +F:    drivers/*/*/*s3c24*
 +F:    drivers/*/*s3c64xx*
 +F:    drivers/*/*s5pv210*
  F:    drivers/memory/samsung/*
  F:    drivers/soc/samsung/*
  F:    drivers/spi/spi-s3c*
 -F:    sound/soc/samsung/*
  F:    Documentation/arm/Samsung/
  F:    Documentation/devicetree/bindings/arm/samsung/
  F:    Documentation/devicetree/bindings/sram/samsung-sram.txt
@@@ -1648,8 -1619,7 +1648,8 @@@ F:      arch/arm/mach-s5pv210
  
  ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
  M:    Kyungmin Park <kyungmin.park@samsung.com>
 -M:    Kamil Debski <k.debski@samsung.com>
 +M:    Kamil Debski <kamil@wypas.org>
 +M:    Andrzej Hajda <a.hajda@samsung.com>
  L:    linux-arm-kernel@lists.infradead.org
  L:    linux-media@vger.kernel.org
  S:    Maintained
@@@ -1657,9 -1627,8 +1657,9 @@@ F:      drivers/media/platform/s5p-g2d
  
  ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
  M:    Kyungmin Park <kyungmin.park@samsung.com>
 -M:    Kamil Debski <k.debski@samsung.com>
 +M:    Kamil Debski <kamil@wypas.org>
  M:    Jeongtae Park <jtp.park@samsung.com>
 +M:    Andrzej Hajda <a.hajda@samsung.com>
  L:    linux-arm-kernel@lists.infradead.org
  L:    linux-media@vger.kernel.org
  S:    Maintained
@@@ -1674,13 -1643,6 +1674,13 @@@ L:    linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/media/platform/s5p-tv/
  
 +ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT
 +M:    Kyungmin Park <kyungmin.park@samsung.com>
 +L:    linux-arm-kernel@lists.infradead.org
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    drivers/staging/media/platform/s5p-cec/
 +
  ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
  M:    Andrzej Pietrasiewicz <andrzej.p@samsung.com>
  M:    Jacek Anaszewski <j.anaszewski@samsung.com>
@@@ -1703,6 -1665,7 +1703,6 @@@ F:      arch/arm/boot/dts/sh
  F:    arch/arm/configs/shmobile_defconfig
  F:    arch/arm/include/debug/renesas-scif.S
  F:    arch/arm/mach-shmobile/
 -F:    drivers/sh/
  F:    drivers/soc/renesas/
  F:    include/linux/soc/renesas/
  
@@@ -1727,6 -1690,8 +1727,6 @@@ S:      Maintaine
  F:    drivers/edac/altera_edac.
  
  ARM/STI ARCHITECTURE
 -M:    Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
 -M:    Maxime Coquelin <maxime.coquelin@st.com>
  M:    Patrice Chotard <patrice.chotard@st.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    kernel@stlinux.com
@@@ -1759,7 -1724,6 +1759,7 @@@ F:      drivers/ata/ahci_st.
  
  ARM/STM32 ARCHITECTURE
  M:    Maxime Coquelin <mcoquelin.stm32@gmail.com>
 +M:    Alexandre Torgue <alexandre.torgue@st.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
@@@ -1770,7 -1734,8 +1770,7 @@@ ARM/TANGO ARCHITECTUR
  M:    Marc Gonzalez <marc_gonzalez@sigmadesigns.com>
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
 -F:    arch/arm/mach-tango/
 -F:    arch/arm/boot/dts/tango*
 +N:    tango
  
  ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
@@@ -1857,6 -1822,7 +1857,6 @@@ L:      linux-arm-kernel@lists.infradead.or
  T:    git git://git.linaro.org/people/ulfh/clk.git
  S:    Maintained
  F:    drivers/clk/ux500/
 -F:    include/linux/platform_data/clk-ux500.h
  
  ARM/VERSATILE EXPRESS PLATFORM
  M:    Liviu Dudau <liviu.dudau@arm.com>
@@@ -2327,7 -2293,6 +2327,7 @@@ S:      Maintaine
  F:    Documentation/ABI/testing/sysfs-class-net-batman-adv
  F:    Documentation/ABI/testing/sysfs-class-net-mesh
  F:    Documentation/networking/batman-adv.txt
 +F:    include/uapi/linux/batman_adv.h
  F:    net/batman-adv/
  
  BAYCOM/HDLCDRV DRIVERS FOR AX.25
@@@ -2353,10 -2318,7 +2353,10 @@@ S:    Supporte
  F:    drivers/media/platform/sti/bdisp
  
  BEFS FILE SYSTEM
 -S:    Orphan
 +M:    Luis de Bethencourt <luisbg@osg.samsung.com>
 +M:    Salah Triki <salah.triki@gmail.com>
 +S:    Maintained
 +T:    git git://github.com/luisbg/linux-befs.git
  F:    Documentation/filesystems/befs.txt
  F:    fs/befs/
  
@@@ -2494,14 -2456,6 +2494,14 @@@ L:    netdev@vger.kernel.or
  S:    Supported
  F:    drivers/net/ethernet/broadcom/b44.*
  
 +BROADCOM B53 ETHERNET SWITCH DRIVER
 +M:    Florian Fainelli <f.fainelli@gmail.com>
 +L:    netdev@vger.kernel.org
 +L:    openwrt-devel@lists.openwrt.org (subscribers-only)
 +S:    Supported
 +F:    drivers/net/dsa/b53/*
 +F:    include/linux/platform_data/b53.h
 +
  BROADCOM GENET ETHERNET DRIVER
  M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    netdev@vger.kernel.org
@@@ -2526,14 -2480,17 +2526,14 @@@ BROADCOM BCM281XX/BCM11XXX/BCM216XX AR
  M:    Florian Fainelli <f.fainelli@gmail.com>
  M:    Ray Jui <rjui@broadcom.com>
  M:    Scott Branden <sbranden@broadcom.com>
 -L:    bcm-kernel-feedback-list@broadcom.com
 +M:    bcm-kernel-feedback-list@broadcom.com
  T:    git git://github.com/broadcom/mach-bcm
  S:    Maintained
 +N:    bcm281*
 +N:    bcm113*
 +N:    bcm216*
 +N:    kona
  F:    arch/arm/mach-bcm/
 -F:    arch/arm/boot/dts/bcm113*
 -F:    arch/arm/boot/dts/bcm216*
 -F:    arch/arm/boot/dts/bcm281*
 -F:    arch/arm64/boot/dts/broadcom/
 -F:    arch/arm/configs/bcm_defconfig
 -F:    drivers/mmc/host/sdhci-bcm-kona.c
 -F:    drivers/clocksource/bcm_kona_timer.c
  
  BROADCOM BCM2835 ARM ARCHITECTURE
  M:    Stephen Warren <swarren@wwwdotorg.org>
@@@ -2556,21 -2513,20 +2556,21 @@@ F:   arch/mips/include/asm/mach-bcm47xx/
  
  BROADCOM BCM5301X ARM ARCHITECTURE
  M:    Hauke Mehrtens <hauke@hauke-m.de>
 +M:    Rafał Miłecki <zajec5@gmail.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
  F:    arch/arm/mach-bcm/bcm_5301x.c
 -F:    arch/arm/boot/dts/bcm5301x.dtsi
 +F:    arch/arm/boot/dts/bcm5301x*.dtsi
  F:    arch/arm/boot/dts/bcm470*
  
  BROADCOM BCM63XX ARM ARCHITECTURE
  M:    Florian Fainelli <f.fainelli@gmail.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    bcm-kernel-feedback-list@broadcom.com
  T:    git git://github.com/broadcom/stblinux.git
  S:    Maintained
 -F:    arch/arm/mach-bcm/bcm63xx.c
 -F:    arch/arm/include/debug/bcm63xx.S
 +N:    bcm63xx
  
  BROADCOM BCM63XX/BCM33XX UDC DRIVER
  M:    Kevin Cernekee <cernekee@gmail.com>
@@@ -2582,8 -2538,8 +2582,8 @@@ BROADCOM BCM7XXX ARM ARCHITECTUR
  M:    Brian Norris <computersforpeace@gmail.com>
  M:    Gregory Fong <gregory.0xf0@gmail.com>
  M:    Florian Fainelli <f.fainelli@gmail.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    bcm-kernel-feedback-list@broadcom.com
  T:    git git://github.com/broadcom/stblinux.git
  S:    Maintained
  F:    arch/arm/mach-bcm/*brcmstb*
@@@ -2616,11 -2572,12 +2616,11 @@@ S:   Supporte
  F:    drivers/net/ethernet/broadcom/tg3.*
  
  BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
 -M:    Brett Rudley <brudley@broadcom.com>
 -M:    Arend van Spriel <arend@broadcom.com>
 -M:    Franky (Zhenhui) Lin <frankyl@broadcom.com>
 -M:    Hante Meuleman <meuleman@broadcom.com>
 +M:    Arend van Spriel <arend.vanspriel@broadcom.com>
 +M:    Franky Lin <franky.lin@broadcom.com>
 +M:    Hante Meuleman <hante.meuleman@broadcom.com>
  L:    linux-wireless@vger.kernel.org
 -L:    brcm80211-dev-list@broadcom.com
 +L:    brcm80211-dev-list.pdl@broadcom.com
  S:    Supported
  F:    drivers/net/wireless/broadcom/brcm80211/
  
@@@ -2640,13 -2597,13 +2640,13 @@@ BROADCOM IPROC ARM ARCHITECTUR
  M:    Ray Jui <rjui@broadcom.com>
  M:    Scott Branden <sbranden@broadcom.com>
  M:    Jon Mason <jonmason@broadcom.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    bcm-kernel-feedback-list@broadcom.com
  T:    git git://github.com/broadcom/cygnus-linux.git
  S:    Maintained
  N:    iproc
  N:    cygnus
 -N:    nsp
 +N:    bcm[-_]nsp
  N:    bcm9113*
  N:    bcm9583*
  N:    bcm9585*
@@@ -2657,9 -2614,6 +2657,9 @@@ N:      bcm583
  N:    bcm585*
  N:    bcm586*
  N:    bcm88312
 +F:    arch/arm64/boot/dts/broadcom/ns2*
 +F:    drivers/clk/bcm/clk-ns*
 +F:    drivers/pinctrl/bcm/pinctrl-ns*
  
  BROADCOM BRCMSTB GPIO DRIVER
  M:    Gregory Fong <gregory.0xf0@gmail.com>
@@@ -2704,8 -2658,8 +2704,8 @@@ F:      drivers/net/ethernet/broadcom/bcmsys
  
  BROADCOM VULCAN ARM64 SOC
  M:    Jayachandran C. <jchandra@broadcom.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -L:    bcm-kernel-feedback-list@broadcom.com
  S:    Maintained
  F:    arch/arm64/boot/dts/broadcom/vulcan*
  
@@@ -2822,9 -2776,9 +2822,9 @@@ F:      include/net/caif
  F:    net/caif/
  
  CALGARY x86-64 IOMMU
 -M:    Muli Ben-Yehuda <muli@il.ibm.com>
 -M:    "Jon D. Mason" <jdmason@kudzu.us>
 -L:    discuss@x86-64.org
 +M:    Muli Ben-Yehuda <mulix@mulix.org>
 +M:    Jon Mason <jdmason@kudzu.us>
 +L:    iommu@lists.linux-foundation.org
  S:    Maintained
  F:    arch/x86/kernel/pci-calgary_64.c
  F:    arch/x86/kernel/tce_64.c
@@@ -2855,7 -2809,6 +2855,7 @@@ W:      https://github.com/linux-ca
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/net/can/
  F:    drivers/net/can/
  F:    include/linux/can/dev.h
  F:    include/linux/can/platform/
@@@ -2863,7 -2816,7 +2863,7 @@@ F:      include/uapi/linux/can/error.
  F:    include/uapi/linux/can/netlink.h
  
  CAPABILITIES
 -M:    Serge Hallyn <serge.hallyn@canonical.com>
 +M:    Serge Hallyn <serge@hallyn.com>
  L:    linux-security-module@vger.kernel.org
  S:    Supported
  F:    include/linux/capability.h
@@@ -2895,22 -2848,6 +2895,22 @@@ F:    drivers/net/ieee802154/cc2520.
  F:    include/linux/spi/cc2520.h
  F:    Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
  
 +CEC DRIVER
 +M:    Hans Verkuil <hans.verkuil@cisco.com>
 +L:    linux-media@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +W:    http://linuxtv.org
 +S:    Supported
 +F:    Documentation/cec.txt
 +F:    Documentation/DocBook/media/v4l/cec*
 +F:    drivers/staging/media/cec/
 +F:    drivers/media/cec-edid.c
 +F:    drivers/media/rc/keymaps/rc-cec.c
 +F:    include/media/cec.h
 +F:    include/media/cec-edid.h
 +F:    include/linux/cec.h
 +F:    include/linux/cec-funcs.h
 +
  CELL BROADBAND ENGINE ARCHITECTURE
  M:    Arnd Bergmann <arnd@arndb.de>
  L:    linuxppc-dev@lists.ozlabs.org
@@@ -3347,7 -3284,6 +3347,7 @@@ T:      git git://git.kernel.org/pub/scm/lin
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
  S:    Maintained
  F:    Documentation/crypto/
 +F:    Documentation/devicetree/bindings/crypto/
  F:    Documentation/DocBook/crypto-API.tmpl
  F:    arch/*/crypto/
  F:    crypto/
@@@ -3738,8 -3674,6 +3738,8 @@@ M:      Support Opensource <support.opensour
  W:    http://www.dialog-semiconductor.com/products
  S:    Supported
  F:    Documentation/hwmon/da90??
 +F:    Documentation/devicetree/bindings/mfd/da90*.txt
 +F:    Documentation/devicetree/bindings/regulator/da92*.txt
  F:    Documentation/devicetree/bindings/sound/da[79]*.txt
  F:    drivers/gpio/gpio-da90??.c
  F:    drivers/hwmon/da90??-hwmon.c
@@@ -3760,10 -3694,8 +3760,10 @@@ F:    drivers/watchdog/da90??_wdt.
  F:    include/linux/mfd/da903x.h
  F:    include/linux/mfd/da9052/
  F:    include/linux/mfd/da9055/
 +F:    include/linux/mfd/da9062/
  F:    include/linux/mfd/da9063/
  F:    include/linux/mfd/da9150/
 +F:    include/linux/regulator/da9211.h
  F:    include/sound/da[79]*.h
  F:    sound/soc/codecs/da[79]*.[ch]
  
@@@ -3839,17 -3771,6 +3839,17 @@@ F:    include/linux/*fence.
  F:    Documentation/dma-buf-sharing.txt
  T:    git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
  
 +SYNC FILE FRAMEWORK
 +M:    Sumit Semwal <sumit.semwal@linaro.org>
 +R:    Gustavo Padovan <gustavo@padovan.org>
 +S:    Maintained
 +L:    linux-media@vger.kernel.org
 +L:    dri-devel@lists.freedesktop.org
 +F:    drivers/dma-buf/sync_file.c
 +F:    include/linux/sync_file.h
 +F:    Documentation/sync_file.txt
 +T:    git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 +
  DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
  M:    Vinod Koul <vinod.koul@intel.com>
  L:    dmaengine@vger.kernel.org
@@@ -3939,10 -3860,7 +3939,10 @@@ T:    git git://people.freedesktop.org/~ai
  S:    Maintained
  F:    drivers/gpu/drm/
  F:    drivers/gpu/vga/
 -F:    Documentation/DocBook/gpu.*
 +F:    Documentation/devicetree/bindings/display/
 +F:    Documentation/devicetree/bindings/gpu/
 +F:    Documentation/devicetree/bindings/video/
 +F:    Documentation/gpu/
  F:    include/drm/
  F:    include/uapi/drm/
  
@@@ -3994,7 -3912,6 +3994,7 @@@ S:      Supporte
  F:    drivers/gpu/drm/i915/
  F:    include/drm/i915*
  F:    include/uapi/drm/i915_drm.h
 +F:    Documentation/gpu/i915.rst
  
  DRM DRIVERS FOR ATMEL HLCDC
  M:    Boris Brezillon <boris.brezillon@free-electrons.com>
@@@ -4190,21 -4107,6 +4190,21 @@@ F:    drivers/gpu/drm/vc4
  F:    include/uapi/drm/vc4_drm.h
  F:    Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
  
 +DRM DRIVERS FOR TI OMAP
 +M:    Tomi Valkeinen <tomi.valkeinen@ti.com>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Maintained
 +F:    drivers/gpu/drm/omapdrm/
 +F:    Documentation/devicetree/bindings/display/ti/
 +
 +DRM DRIVERS FOR TI LCDC
 +M:    Jyri Sarha <jsarha@ti.com>
 +R:    Tomi Valkeinen <tomi.valkeinen@ti.com>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Maintained
 +F:    drivers/gpu/drm/tilcdc/
 +F:    Documentation/devicetree/bindings/display/tilcdc/
 +
  DSBR100 USB FM RADIO DRIVER
  M:    Alexey Klimov <klimov.linux@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -4571,7 -4473,7 +4571,7 @@@ S:      Orpha
  F:    fs/efs/
  
  EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
 -M:    Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 +M:    Douglas Miller <dougmill@linux.vnet.ibm.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ethernet/ibm/ehea/
@@@ -4759,7 -4661,7 +4759,7 @@@ S:      Maintaine
  F:    drivers/staging/fbtft/
  
  FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
 -M:    Vasu Dev <vasu.dev@intel.com>
 +M:    Johannes Thumshirn <jth@kernel.org>
  L:    fcoe-devel@open-fcoe.org
  W:    www.Open-FCoE.org
  S:    Supported
@@@ -4981,13 -4883,6 +4981,13 @@@ F:    drivers/net/ethernet/freescale/gianf
  X:    drivers/net/ethernet/freescale/gianfar_ptp.c
  F:    Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
  
 +FREESCALE QUICC ENGINE UCC HDLC DRIVER
 +M:    Zhao Qiang <qiang.zhao@nxp.com>
 +L:    netdev@vger.kernel.org
 +L:    linuxppc-dev@lists.ozlabs.org
 +S:    Maintained
 +F:    drivers/net/wan/fsl_ucc_hdlc*
 +
  FREESCALE QUICC ENGINE UCC UART DRIVER
  M:    Timur Tabi <timur@tabi.org>
  L:    linuxppc-dev@lists.ozlabs.org
@@@ -5043,13 -4938,6 +5043,13 @@@ F:    Documentation/filesystems/caching
  F:    fs/fscache/
  F:    include/linux/fscache*.h
  
 +FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
 +M:    Theodore Y. Ts'o <tytso@mit.edu>
 +M:    Jaegeuk Kim <jaegeuk@kernel.org>
 +S:    Supported
 +F:    fs/crypto/
 +F:    include/linux/fscrypto.h
 +
  F2FS FILE SYSTEM
  M:    Jaegeuk Kim <jaegeuk@kernel.org>
  M:    Changman Lee <cm224.lee@samsung.com>
@@@ -5104,15 -4992,6 +5104,15 @@@ L:    linux-scsi@vger.kernel.or
  S:    Odd Fixes (e.g., new signatures)
  F:    drivers/scsi/fdomain.*
  
 +GCC PLUGINS
 +M:    Kees Cook <keescook@chromium.org>
 +R:    Emese Revfy <re.emese@gmail.com>
 +L:    kernel-hardening@lists.openwall.com
 +S:    Maintained
 +F:    scripts/gcc-plugins/
 +F:    scripts/gcc-plugin.sh
 +F:    Documentation/gcc-plugins.txt
 +
  GCOV BASED KERNEL PROFILING
  M:    Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
  S:    Maintained
@@@ -5287,10 -5166,10 +5287,10 @@@ S:   Maintaine
  F:    drivers/media/usb/gspca/m5602/
  
  GSPCA PAC207 SONIXB SUBDRIVER
 -M:    Hans de Goede <hdegoede@redhat.com>
 +M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
 -S:    Maintained
 +S:    Odd Fixes
  F:    drivers/media/usb/gspca/pac207.c
  
  GSPCA SN9C20X SUBDRIVER
@@@ -5308,10 -5187,10 +5308,10 @@@ S:   Maintaine
  F:    drivers/media/usb/gspca/t613.c
  
  GSPCA USB WEBCAM DRIVER
 -M:    Hans de Goede <hdegoede@redhat.com>
 +M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
 -S:    Maintained
 +S:    Odd Fixes
  F:    drivers/media/usb/gspca/
  
  GUID PARTITION TABLE (GPT)
@@@ -5392,7 -5271,6 +5392,7 @@@ M:      Matt Mackall <mpm@selenic.com
  M:    Herbert Xu <herbert@gondor.apana.org.au>
  L:    linux-crypto@vger.kernel.org
  S:    Odd fixes
 +F:    Documentation/devicetree/bindings/rng/
  F:    Documentation/hw_random.txt
  F:    drivers/char/hw_random/
  F:    include/linux/hw_random.h
@@@ -5403,9 -5281,8 +5403,9 @@@ M:      Bjorn Andersson <bjorn.andersson@lin
  L:    linux-remoteproc@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
 +F:    Documentation/devicetree/bindings/hwlock/
  F:    Documentation/hwspinlock.txt
 -F:    drivers/hwspinlock/hwspinlock_*
 +F:    drivers/hwspinlock/
  F:    include/linux/hwspinlock.h
  
  HARMONY SOUND DRIVER
@@@ -5423,12 -5300,6 +5423,12 @@@ T:    git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/dvb-frontends/hd29l2*
  
 +HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
 +M:    Brian Boylston <brian.boylston@hpe.com>
 +S:    Supported
 +F:    Documentation/watchdog/hpwdt.txt
 +F:    drivers/watchdog/hpwdt.c
 +
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
  M:    Don Brace <don.brace@microsemi.com>
  L:    iss_storagedev@hp.com
@@@ -5547,15 -5418,6 +5547,15 @@@ F:    include/uapi/linux/if_hippi.
  F:    net/802/hippi.c
  F:    drivers/net/hippi/
  
 +HISILICON NETWORK SUBSYSTEM DRIVER
 +M:    Yisen Zhuang <yisen.zhuang@huawei.com>
 +M:    Salil Mehta <salil.mehta@huawei.com>
 +L:    netdev@vger.kernel.org
 +W:    http://www.hisilicon.com
 +S:    Maintained
 +F:    drivers/net/ethernet/hisilicon/
 +F:    Documentation/devicetree/bindings/net/hisilicon*.txt
 +
  HISILICON SAS Controller
  M:    John Garry <john.garry@huawei.com>
  W:    http://www.hisilicon.com
@@@ -5831,15 -5693,7 +5831,15 @@@ M:    Tyrel Datwyler <tyreld@linux.vnet.ib
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/ibmvscsi/ibmvscsi*
 -F:    drivers/scsi/ibmvscsi/viosrp.h
 +F:    include/scsi/viosrp.h
 +
 +IBM Power Virtual SCSI Device Target Driver
 +M:    Bryant G. Ly <bryantly@linux.vnet.ibm.com>
 +M:    Michael Cyr <mikecyr@linux.vnet.ibm.com>
 +L:    linux-scsi@vger.kernel.org
 +L:    target-devel@vger.kernel.org
 +S:    Supported
 +F:    drivers/scsi/ibmvscsi_tgt/
  
  IBM Power Virtual FC Device Drivers
  M:    Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
@@@ -5931,9 -5785,7 +5931,9 @@@ R:      Hartmut Knaack <knaack.h@gmx.de
  R:    Lars-Peter Clausen <lars@metafoo.de>
  R:    Peter Meerwald-Stadler <pmeerw@pmeerw.net>
  L:    linux-iio@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/
  F:    drivers/iio/
  F:    drivers/staging/iio/
  F:    include/linux/iio/
@@@ -6063,12 -5915,6 +6063,12 @@@ L:    platform-driver-x86@vger.kernel.or
  S:    Maintained
  F:    drivers/platform/x86/intel-hid.c
  
 +INTEL VIRTUAL BUTTON DRIVER
 +M:    AceLan Kao <acelan.kao@canonical.com>
 +L:    platform-driver-x86@vger.kernel.org
 +S:    Maintained
 +F:    drivers/platform/x86/intel-vbtn.c
 +
  INTEL IDLE DRIVER
  M:    Len Brown <lenb@kernel.org>
  L:    linux-pm@vger.kernel.org
@@@ -6171,12 -6017,6 +6171,12 @@@ L:     linux-rdma@vger.kernel.or
  S:     Supported
  F:     drivers/infiniband/hw/i40iw/
  
 +INTEL MERRIFIELD GPIO DRIVER
 +M:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-merrifield.c
 +
  INTEL-MID GPIO DRIVER
  M:    David Cohen <david.a.cohen@linux.intel.com>
  L:    linux-gpio@vger.kernel.org
@@@ -6301,7 -6141,6 +6301,7 @@@ M:      Joerg Roedel <joro@8bytes.org
  L:    iommu@lists.linux-foundation.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/iommu/
  F:    drivers/iommu/
  
  IP MASQUERADING
@@@ -6392,6 -6231,7 +6392,6 @@@ F:      Documentation/devicetree/bindings/in
  F:    drivers/irqchip/
  
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
 -M:    Jiang Liu <jiang.liu@linux.intel.com>
  M:    Marc Zyngier <marc.zyngier@arm.com>
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@@ -6868,7 -6708,6 +6868,7 @@@ M:      Jacek Anaszewski <j.anaszewski@samsu
  L:    linux-leds@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/leds/
  F:    drivers/leds/
  F:    include/linux/leds.h
  
@@@ -6924,7 -6763,6 +6924,7 @@@ S:      Maintaine
  F:    drivers/ata/
  F:    include/linux/ata.h
  F:    include/linux/libata.h
 +F:    Documentation/devicetree/bindings/ata/
  
  LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
  M:    Viresh Kumar <vireshk@kernel.org>
@@@ -7032,7 -6870,6 +7032,7 @@@ F:      drivers/crypto/nx
  F:    drivers/crypto/vmx/
  F:    drivers/net/ethernet/ibm/ibmveth.*
  F:    drivers/net/ethernet/ibm/ibmvnic.*
 +F:    drivers/pci/hotplug/pnv_php.c
  F:    drivers/pci/hotplug/rpa*
  F:    drivers/scsi/ibmvscsi/
  N:    opal
@@@ -7130,7 -6967,7 +7130,7 @@@ T:      git git://git.kernel.org/pub/scm/lin
  LINUX KERNEL DUMP TEST MODULE (LKDTM)
  M:    Kees Cook <keescook@chromium.org>
  S:    Maintained
 -F:    drivers/misc/lkdtm.c
 +F:    drivers/misc/lkdtm*
  
  LLC (802.2)
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
@@@ -7183,23 -7020,15 +7183,23 @@@ Q:   http://patchwork.linuxtv.org/project
  S:    Maintained
  F:    drivers/media/usb/dvb-usb-v2/lmedm04*
  
 -LOCKDEP AND LOCKSTAT
 +LOCKING PRIMITIVES
  M:    Peter Zijlstra <peterz@infradead.org>
  M:    Ingo Molnar <mingo@redhat.com>
  L:    linux-kernel@vger.kernel.org
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
  S:    Maintained
 -F:    Documentation/locking/lockdep*.txt
 -F:    Documentation/locking/lockstat.txt
 +F:    Documentation/locking/
  F:    include/linux/lockdep.h
 +F:    include/linux/spinlock*.h
 +F:    arch/*/include/asm/spinlock*.h
 +F:    include/linux/rwlock*.h
 +F:    include/linux/mutex*.h
 +F:    arch/*/include/asm/mutex*.h
 +F:    include/linux/rwsem*.h
 +F:    arch/*/include/asm/rwsem.h
 +F:    include/linux/seqlock.h
 +F:    lib/locking*.[ch]
  F:    kernel/locking/
  
  LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
@@@ -7340,12 -7169,6 +7340,12 @@@ W:    http://www.kernel.org/doc/man-page
  L:    linux-man@vger.kernel.org
  S:    Maintained
  
 +MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
 +M:    Andrew Lunn <andrew@lunn.ch>
 +M:    Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 +S:    Maintained
 +F:    drivers/net/dsa/mv88e6xxx/
 +
  MARVELL ARMADA DRM SUPPORT
  M:    Russell King <rmk+kernel@armlinux.org.uk>
  S:    Maintained
@@@ -7353,6 -7176,11 +7353,6 @@@ F:     drivers/gpu/drm/armada
  F:    include/uapi/drm/armada_drm.h
  F:    Documentation/devicetree/bindings/display/armada/
  
 -MARVELL 88E6352 DSA support
 -M:    Guenter Roeck <linux@roeck-us.net>
 -S:    Maintained
 -F:    drivers/net/dsa/mv88e6352.c
 -
  MARVELL CRYPTO DRIVER
  M:    Boris Brezillon <boris.brezillon@free-electrons.com>
  M:    Arnaud Ebalard <arno@natisbad.org>
@@@ -7439,13 -7267,6 +7439,13 @@@ F:    Documentation/devicetree/bindings/i2
  F:    drivers/hwmon/max6697.c
  F:    include/linux/platform_data/max6697.h
  
 +MAX9860 MONO AUDIO VOICE CODEC DRIVER
 +M:    Peter Rosin <peda@axentia.se>
 +L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/sound/max9860.txt
 +F:    sound/soc/codecs/max9860.*
 +
  MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
  M:    Krzysztof Kozlowski <k.kozlowski@samsung.com>
  L:    linux-pm@vger.kernel.org
@@@ -7495,16 -7316,6 +7495,16 @@@ L:    linux-iio@vger.kernel.or
  S:    Maintained
  F:    drivers/iio/potentiometer/mcp4531.c
  
 +MEDIA DRIVERS FOR RENESAS - FCP
 +M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 +L:    linux-media@vger.kernel.org
 +L:    linux-renesas-soc@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Supported
 +F:    Documentation/devicetree/bindings/media/renesas,fcp.txt
 +F:    drivers/media/platform/rcar-fcp.c
 +F:    include/media/rcar-fcp.h
 +
  MEDIA DRIVERS FOR RENESAS - VSP1
  M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  L:    linux-media@vger.kernel.org
@@@ -7514,18 -7325,8 +7514,18 @@@ S:    Supporte
  F:    Documentation/devicetree/bindings/media/renesas,vsp1.txt
  F:    drivers/media/platform/vsp1/
  
 +MEDIA DRIVERS FOR HELENE
 +M:    Abylay Ospan <aospan@netup.ru>
 +L:    linux-media@vger.kernel.org
 +W:    https://linuxtv.org
 +W:    http://netup.tv/
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Supported
 +F:    drivers/media/dvb-frontends/helene*
 +
  MEDIA DRIVERS FOR ASCOT2E
  M:    Sergey Kozlov <serjk@netup.ru>
 +M:    Abylay Ospan <aospan@netup.ru>
  L:    linux-media@vger.kernel.org
  W:    https://linuxtv.org
  W:    http://netup.tv/
@@@ -7535,7 -7336,6 +7535,7 @@@ F:      drivers/media/dvb-frontends/ascot2e
  
  MEDIA DRIVERS FOR CXD2841ER
  M:    Sergey Kozlov <serjk@netup.ru>
 +M:    Abylay Ospan <aospan@netup.ru>
  L:    linux-media@vger.kernel.org
  W:    https://linuxtv.org
  W:    http://netup.tv/
@@@ -7545,7 -7345,6 +7545,7 @@@ F:      drivers/media/dvb-frontends/cxd2841e
  
  MEDIA DRIVERS FOR HORUS3A
  M:    Sergey Kozlov <serjk@netup.ru>
 +M:    Abylay Ospan <aospan@netup.ru>
  L:    linux-media@vger.kernel.org
  W:    https://linuxtv.org
  W:    http://netup.tv/
@@@ -7555,7 -7354,6 +7555,7 @@@ F:      drivers/media/dvb-frontends/horus3a
  
  MEDIA DRIVERS FOR LNBH25
  M:    Sergey Kozlov <serjk@netup.ru>
 +M:    Abylay Ospan <aospan@netup.ru>
  L:    linux-media@vger.kernel.org
  W:    https://linuxtv.org
  W:    http://netup.tv/
@@@ -7565,7 -7363,6 +7565,7 @@@ F:      drivers/media/dvb-frontends/lnbh25
  
  MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices
  M:    Sergey Kozlov <serjk@netup.ru>
 +M:    Abylay Ospan <aospan@netup.ru>
  L:    linux-media@vger.kernel.org
  W:    https://linuxtv.org
  W:    http://netup.tv/
@@@ -7623,7 -7420,7 +7623,7 @@@ F:      drivers/scsi/megaraid.
  F:    drivers/scsi/megaraid/
  
  MELLANOX ETHERNET DRIVER (mlx4_en)
 -M:    Eugenia Emantayev <eugenia@mellanox.com>
 +M:    Tariq Toukan <tariqt@mellanox.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  W:    http://www.mellanox.com
@@@ -7647,6 -7444,15 +7647,15 @@@ W:    http://www.mellanox.co
  Q:    http://patchwork.ozlabs.org/project/netdev/list/
  F:    drivers/net/ethernet/mellanox/mlxsw/
  
+ SOFT-ROCE DRIVER (rxe)
+ M:    Moni Shoua <monis@mellanox.com>
+ L:    linux-rdma@vger.kernel.org
+ S:    Supported
+ W:    https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
+ Q:    http://patchwork.kernel.org/project/linux-rdma/list/
+ F:    drivers/infiniband/hw/rxe/
+ F:    include/uapi/rdma/rdma_user_rxe.h
  MEMBARRIER SUPPORT
  M:    Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  M:    "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
@@@ -7675,7 -7481,6 +7684,7 @@@ Q:      http://patchwork.ozlabs.org/project/
  T:    git git://git.infradead.org/linux-mtd.git
  T:    git git://git.infradead.org/l2-mtd.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/mtd/
  F:    drivers/mtd/
  F:    include/linux/mtd/
  F:    include/uapi/mtd/
@@@ -7815,8 -7620,10 +7824,8 @@@ L:     linux-media@vger.kernel.or
  W:    https://linuxtv.org
  W:    http://palosaari.fi/linux/
  Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 -T:    git git://linuxtv.org/anttip/media_tree.git
  S:    Maintained
 -F:    drivers/staging/media/mn88472/
 -F:    drivers/media/dvb-frontends/mn88472.h
 +F:    drivers/media/dvb-frontends/mn88472*
  
  MN88473 MEDIA DRIVER
  M:    Antti Palosaari <crope@iki.fi>
@@@ -7942,7 -7749,6 +7951,7 @@@ M:      Ulf Hansson <ulf.hansson@linaro.org
  L:    linux-mmc@vger.kernel.org
  T:    git git://git.linaro.org/people/ulf.hansson/mmc.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/mmc/
  F:    drivers/mmc/
  F:    include/linux/mmc/
  F:    include/uapi/linux/mmc/
@@@ -8282,9 -8088,8 +8291,9 @@@ T:      git git://github.com/konis/nilfs2.gi
  S:    Supported
  F:    Documentation/filesystems/nilfs2.txt
  F:    fs/nilfs2/
 -F:    include/linux/nilfs2_fs.h
  F:    include/trace/events/nilfs2.h
 +F:    include/uapi/linux/nilfs2_api.h
 +F:    include/uapi/linux/nilfs2_ondisk.h
  
  NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER
  M:    YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
@@@ -8373,13 -8178,6 +8382,13 @@@ S:    Supporte
  F:    drivers/nvme/host/
  F:    include/linux/nvme.h
  
 +NVM EXPRESS TARGET DRIVER
 +M:    Christoph Hellwig <hch@lst.de>
 +M:    Sagi Grimberg <sagi@grimberg.me>
 +L:    linux-nvme@lists.infradead.org
 +S:    Supported
 +F:    drivers/nvme/target/
 +
  NVMEM FRAMEWORK
  M:    Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
  M:    Maxime Ripard <maxime.ripard@free-electrons.com>
@@@ -8902,7 -8700,6 +8911,7 @@@ L:      linux-pci@vger.kernel.or
  Q:    http://patchwork.ozlabs.org/project/linux-pci/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
  S:    Supported
 +F:    Documentation/devicetree/bindings/pci/
  F:    Documentation/PCI/
  F:    drivers/pci/
  F:    include/linux/pci*
@@@ -8966,13 -8763,6 +8975,13 @@@ L:    linux-arm-kernel@lists.infradead.or
  S:    Maintained
  F:    drivers/pci/host/*mvebu*
  
 +PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
 +M:    Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 +L:    linux-pci@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/pci/host/pci-aardvark.c
 +
  PCI DRIVER FOR NVIDIA TEGRA
  M:    Thierry Reding <thierry.reding@gmail.com>
  L:    linux-tegra@vger.kernel.org
@@@ -9055,15 -8845,6 +9064,15 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
  F:    drivers/pci/host/pci-xgene-msi.c
  
 +PCIE DRIVER FOR AXIS ARTPEC
 +M:    Niklas Cassel <niklas.cassel@axis.com>
 +M:    Jesper Nilsson <jesper.nilsson@axis.com>
 +L:    linux-arm-kernel@axis.com
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/axis,artpec*
 +F:    drivers/pci/host/*artpec*
 +
  PCIE DRIVER FOR HISILICON
  M:    Zhou Wang <wangzhou1@hisilicon.com>
  M:    Gabriele Paoloni <gabriele.paoloni@huawei.com>
@@@ -9187,7 -8968,6 +9196,7 @@@ L:      linux-gpio@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
  S:    Maintained
  F:    Documentation/devicetree/bindings/pinctrl/
 +F:    Documentation/pinctrl.txt
  F:    drivers/pinctrl/
  F:    include/linux/pinctrl/
  
@@@ -9310,8 -9090,6 +9319,8 @@@ M:      David Woodhouse <dwmw2@infradead.org
  L:    linux-pm@vger.kernel.org
  T:    git git://git.infradead.org/battery-2.6.git
  S:    Maintained
 +F:    Documentation/devicetree/bindings/power/
 +F:    Documentation/devicetree/bindings/power_supply/
  F:    include/linux/power_supply.h
  F:    drivers/power/
  X:    drivers/power/avs/
@@@ -9325,12 -9103,6 +9334,12 @@@ F:    drivers/firmware/psci.
  F:    include/linux/psci.h
  F:    include/uapi/linux/psci.h
  
 +POWERNV OPERATOR PANEL LCD DISPLAY DRIVER
 +M:    Suraj Jitindar Singh <sjitindarsingh@gmail.com>
 +L:    linuxppc-dev@lists.ozlabs.org
 +S:    Maintained
 +F:    drivers/char/powernv-op-panel.c
 +
  PNP SUPPORT
  M:    "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
  S:    Maintained
@@@ -9455,13 -9227,6 +9464,13 @@@ F:    include/linux/tracehook.
  F:    include/uapi/linux/ptrace.h
  F:    kernel/ptrace.c
  
 +PULSE8-CEC DRIVER
 +M:    Hans Verkuil <hverkuil@xs4all.nl>
 +L:    linux-media@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/staging/media/pulse8-cec
 +
  PVRUSB2 VIDEO4LINUX DRIVER
  M:    Mike Isely <isely@pobox.com>
  L:    pvrusb2@isely.net       (subscribers-only)
@@@ -9473,15 -9238,14 +9482,15 @@@ F:   Documentation/video4linux/README.pvr
  F:    drivers/media/usb/pvrusb2/
  
  PWC WEBCAM DRIVER
 -M:    Hans de Goede <hdegoede@redhat.com>
 +M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
 -S:    Maintained
 +S:    Odd Fixes
  F:    drivers/media/usb/pwc/*
  
  PWM FAN DRIVER
 -M:    Kamil Debski <k.debski@samsung.com>
 +M:    Kamil Debski <kamil@wypas.org>
 +M:    Lukasz Majewski <l.majewski@samsung.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@@ -9550,8 -9314,7 +9559,8 @@@ L:      rtc-linux@googlegroups.co
  S:    Maintained
  
  QAT DRIVER
 -M:    Tadeusz Struk <tadeusz.struk@intel.com>
 +M:    Giovanni Cabiddu <giovanni.cabiddu@intel.com>
 +M:    Salvatore Benedetto <salvatore.benedetto@intel.com>
  L:    qat-linux@intel.com
  S:    Supported
  F:    drivers/crypto/qat/
@@@ -9693,14 -9456,14 +9702,14 @@@ F:   drivers/video/fbdev/aty/radeon
  F:    include/uapi/linux/radeonfb.h
  
  RADIOSHARK RADIO DRIVER
 -M:    Hans de Goede <hdegoede@redhat.com>
 +M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
  S:    Maintained
  F:    drivers/media/radio/radio-shark.c
  
  RADIOSHARK2 RADIO DRIVER
 -M:    Hans de Goede <hdegoede@redhat.com>
 +M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
  S:    Maintained
@@@ -9774,7 -9537,7 +9783,7 @@@ M:      Florian Fainelli <florian@openwrt.or
  S:    Maintained
  
  RDC R6040 FAST ETHERNET DRIVER
 -M:    Florian Fainelli <florian@openwrt.org>
 +M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ethernet/rdc/r6040.c
@@@ -9841,7 -9604,6 +9850,7 @@@ M:      Mark Brown <broonie@kernel.org
  L:    linux-kernel@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
  S:    Supported
 +F:    Documentation/devicetree/bindings/regmap/
  F:    drivers/base/regmap/
  F:    include/linux/regmap.h
  
@@@ -9851,9 -9613,8 +9860,9 @@@ M:      Bjorn Andersson <bjorn.andersson@lin
  L:    linux-remoteproc@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
  S:    Maintained
 -F:    drivers/remoteproc/
 +F:    Documentation/devicetree/bindings/remoteproc/
  F:    Documentation/remoteproc.txt
 +F:    drivers/remoteproc/
  F:    include/linux/remoteproc.h
  
  REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
@@@ -9927,6 -9688,7 +9936,6 @@@ F:      Documentation/ABI/*/sysfs-driver-hid
  
  ROCKER DRIVER
  M:    Jiri Pirko <jiri@resnulli.us>
 -M:    Scott Feldman <sfeldma@gmail.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/rocker/
@@@ -10138,9 -9900,7 +10147,9 @@@ S:    Maintaine
  F:    drivers/platform/x86/samsung-laptop.c
  
  SAMSUNG AUDIO (ASoC) DRIVERS
 +M:    Krzysztof Kozlowski <k.kozlowski@samsung.com>
  M:    Sangbeom Kim <sbkim73@samsung.com>
 +M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Supported
  F:    sound/soc/samsung/
@@@ -10229,8 -9989,7 +10238,8 @@@ T:    git https://github.com/lmajewski/lin
  F:    drivers/thermal/samsung/
  
  SAMSUNG USB2 PHY DRIVER
 -M:    Kamil Debski <k.debski@samsung.com>
 +M:    Kamil Debski <kamil@wypas.org>
 +M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/phy/samsung-phy.txt
@@@ -10246,7 -10005,6 +10255,7 @@@ SERIAL DRIVER
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  L:    linux-serial@vger.kernel.org
  S:    Maintained
 +F:    Documentation/devicetree/bindings/serial/
  F:    drivers/tty/serial/
  
  SYNOPSYS DESIGNWARE DMAC DRIVER
@@@ -10383,7 -10141,6 +10392,7 @@@ M:   "Martin K. Petersen" <martin.peterse
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
  L:    linux-scsi@vger.kernel.org
  S:    Maintained
 +F:    Documentation/devicetree/bindings/scsi/
  F:    drivers/scsi/
  F:    include/scsi/
  
@@@ -10455,13 -10212,6 +10464,13 @@@ F: tools/testing/selftests/seccomp/
  K:    \bsecure_computing
  K:    \bTIF_SECCOMP\b
  
 +SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER
 +M:    Al Cooper <alcooperx@gmail.com>
 +L:    linux-mmc@vger.kernel.org
 +L:    bcm-kernel-feedback-list@broadcom.com
 +S:    Maintained
 +F:    drivers/mmc/host/sdhci-brcmstb*
 +
  SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
  M:    Ben Dooks <ben-linux@fluff.org>
  M:    Jaehoon Chung <jh80.chung@samsung.com>
@@@ -10535,9 -10285,10 +10544,9 @@@ W:  http://www.avagotech.co
  S:    Supported
  F:    drivers/scsi/be2iscsi/
  
 -Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
 +Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
  M:    Sathya Perla <sathya.perla@broadcom.com>
  M:    Ajit Khaparde <ajit.khaparde@broadcom.com>
 -M:    Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
  M:    Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
  M:    Somnath Kotur <somnath.kotur@broadcom.com>
  L:    netdev@vger.kernel.org
@@@ -10803,7 -10554,7 +10812,7 @@@ SMACK SECURITY MODUL
  M:    Casey Schaufler <casey@schaufler-ca.com>
  L:    linux-security-module@vger.kernel.org
  W:    http://schaufler-ca.com
 -T:    git git://git.gitorious.org/smack-next/kernel.git
 +T:    git git://github.com/cschaufler/smack-next
  S:    Maintained
  F:    Documentation/security/Smack.txt
  F:    security/smack/
@@@ -10977,7 -10728,6 +10986,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  W:    http://alsa-project.org/main/index.php/ASoC
  S:    Supported
 +F:    Documentation/devicetree/bindings/sound/
  F:    Documentation/sound/alsa/soc/
  F:    sound/soc/
  F:    include/sound/soc*
@@@ -11053,7 -10803,6 +11062,7 @@@ L:   linux-spi@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git
  Q:    http://patchwork.kernel.org/project/spi-devel-general/list/
  S:    Maintained
 +F:    Documentation/devicetree/bindings/spi/
  F:    Documentation/spi/
  F:    drivers/spi/
  F:    include/linux/spi/
@@@ -11116,7 -10865,6 +11125,7 @@@ STAGING - INDUSTRIAL I
  M:    Jonathan Cameron <jic23@kernel.org>
  L:    linux-iio@vger.kernel.org
  S:    Odd Fixes
 +F:    Documentation/devicetree/bindings/staging/iio/
  F:    drivers/staging/iio/
  
  STAGING - LIRC (LINUX INFRARED REMOTE CONTROL) DRIVERS
@@@ -11571,6 -11319,11 +11580,6 @@@ F:  Documentation/thermal/cpu-cooling-ap
  F:    drivers/thermal/cpu_cooling.c
  F:    include/linux/cpu_cooling.h
  
 -THINGM BLINK(1) USB RGB LED DRIVER
 -M:    Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 -S:    Maintained
 -F:    drivers/hid/hid-thingm.c
 -
  THINKPAD ACPI EXTRAS DRIVER
  M:    Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
  L:    ibm-acpi-devel@lists.sourceforge.net
@@@ -12021,12 -11774,6 +12030,12 @@@ S: Supporte
  F:    Documentation/scsi/ufs.txt
  F:    drivers/scsi/ufs/
  
 +UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
 +M:    Joao Pinto <Joao.Pinto@synopsys.com>
 +L:    linux-scsi@vger.kernel.org
 +S:    Supported
 +F:    drivers/scsi/ufs/*dwc*
 +
  UNSORTED BLOCK IMAGES (UBI)
  M:    Artem Bityutskiy <dedekind1@gmail.com>
  M:    Richard Weinberger <richard@nod.at>
@@@ -12052,7 -11799,8 +12061,7 @@@ S:   Maintaine
  F:    drivers/net/wireless/ath/ar5523/
  
  USB ATTACHED SCSI
 -M:    Hans de Goede <hdegoede@redhat.com>
 -M:    Gerd Hoffmann <kraxel@redhat.com>
 +M:    Oliver Neukum <oneukum@suse.com>
  L:    linux-usb@vger.kernel.org
  L:    linux-scsi@vger.kernel.org
  S:    Maintained
@@@ -12551,7 -12299,7 +12560,7 @@@ S:   Maintaine
  F:    drivers/net/vmxnet3/
  
  VMware PVSCSI driver
 -M:    Arvind Kumar <arvindkumar@vmware.com>
 +M:    Jim Gill <jgill@vmware.com>
  M:    VMware PV-Drivers <pv-drivers@vmware.com>
  L:    linux-scsi@vger.kernel.org
  S:    Maintained
index ad1b1adcf6f01894f1fc6f3f35eed308d55e269c,0451307bea185578c6dc7221cd17eeddd83a9528..e6dfa1bd3defae37e1ee549e10886c2edeece697
@@@ -68,6 -68,7 +68,7 @@@ MODULE_DESCRIPTION("Generic RDMA CM Age
  MODULE_LICENSE("Dual BSD/GPL");
  
  #define CMA_CM_RESPONSE_TIMEOUT 20
+ #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
  #define CMA_MAX_CM_RETRIES 15
  #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
  #define CMA_IBOE_PACKET_LIFETIME 18
@@@ -162,6 -163,14 +163,14 @@@ struct rdma_bind_list 
        unsigned short          port;
  };
  
+ struct class_port_info_context {
+       struct ib_class_port_info       *class_port_info;
+       struct ib_device                *device;
+       struct completion               done;
+       struct ib_sa_query              *sa_query;
+       u8                              port_num;
+ };
  static int cma_ps_alloc(struct net *net, enum rdma_port_space ps,
                        struct rdma_bind_list *bind_list, int snum)
  {
@@@ -306,6 -315,7 +315,7 @@@ struct cma_multicast 
        struct sockaddr_storage addr;
        struct kref             mcref;
        bool                    igmp_joined;
+       u8                      join_state;
  };
  
  struct cma_work {
@@@ -708,6 -718,17 +718,6 @@@ static void cma_deref_id(struct rdma_id
                complete(&id_priv->comp);
  }
  
 -static int cma_disable_callback(struct rdma_id_private *id_priv,
 -                              enum rdma_cm_state state)
 -{
 -      mutex_lock(&id_priv->handler_mutex);
 -      if (id_priv->state != state) {
 -              mutex_unlock(&id_priv->handler_mutex);
 -              return -EINVAL;
 -      }
 -      return 0;
 -}
 -
  struct rdma_cm_id *rdma_create_id(struct net *net,
                                  rdma_cm_event_handler event_handler,
                                  void *context, enum rdma_port_space ps,
@@@ -1660,12 -1681,11 +1670,12 @@@ static int cma_ib_handler(struct ib_cm_
        struct rdma_cm_event event;
        int ret = 0;
  
 +      mutex_lock(&id_priv->handler_mutex);
        if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
 -              cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
 +           id_priv->state != RDMA_CM_CONNECT) ||
            (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
 -              cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
 -              return 0;
 +           id_priv->state != RDMA_CM_DISCONNECT))
 +              goto out;
  
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@@ -1860,7 -1880,7 +1870,7 @@@ static int cma_check_req_qp_type(struc
  
  static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
  {
 -      struct rdma_id_private *listen_id, *conn_id;
 +      struct rdma_id_private *listen_id, *conn_id = NULL;
        struct rdma_cm_event event;
        struct net_device *net_dev;
        int offset, ret;
                goto net_dev_put;
        }
  
 -      if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
 +      mutex_lock(&listen_id->handler_mutex);
 +      if (listen_id->state != RDMA_CM_LISTEN) {
                ret = -ECONNABORTED;
 -              goto net_dev_put;
 +              goto err1;
        }
  
        memset(&event, 0, sizeof event);
@@@ -1967,9 -1986,8 +1977,9 @@@ static int cma_iw_handler(struct iw_cm_
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  
 -      if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
 -              return 0;
 +      mutex_lock(&id_priv->handler_mutex);
 +      if (id_priv->state != RDMA_CM_CONNECT)
 +              goto out;
  
        memset(&event, 0, sizeof event);
        switch (iw_event->event) {
                return ret;
        }
  
 +out:
        mutex_unlock(&id_priv->handler_mutex);
        return ret;
  }
@@@ -2032,15 -2049,13 +2042,15 @@@ static int iw_conn_req_handler(struct i
        struct rdma_cm_id *new_cm_id;
        struct rdma_id_private *listen_id, *conn_id;
        struct rdma_cm_event event;
 -      int ret;
 +      int ret = -ECONNABORTED;
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
  
        listen_id = cm_id->context;
 -      if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
 -              return -ECONNABORTED;
 +
 +      mutex_lock(&listen_id->handler_mutex);
 +      if (listen_id->state != RDMA_CM_LISTEN)
 +              goto out;
  
        /* Create a new RDMA id for the new IW CM ID */
        new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@@ -3211,9 -3226,8 +3221,9 @@@ static int cma_sidr_rep_handler(struct 
        struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
        int ret = 0;
  
 -      if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
 -              return 0;
 +      mutex_lock(&id_priv->handler_mutex);
 +      if (id_priv->state != RDMA_CM_CONNECT)
 +              goto out;
  
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@@ -3669,13 -3683,12 +3679,13 @@@ static int cma_ib_mc_handler(int status
        struct rdma_id_private *id_priv;
        struct cma_multicast *mc = multicast->context;
        struct rdma_cm_event event;
 -      int ret;
 +      int ret = 0;
  
        id_priv = mc->id_priv;
 -      if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
 -          cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
 -              return 0;
 +      mutex_lock(&id_priv->handler_mutex);
 +      if (id_priv->state != RDMA_CM_ADDR_BOUND &&
 +          id_priv->state != RDMA_CM_ADDR_RESOLVED)
 +              goto out;
  
        if (!status)
                status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
                return 0;
        }
  
 +out:
        mutex_unlock(&id_priv->handler_mutex);
        return 0;
  }
@@@ -3752,10 -3764,63 +3762,63 @@@ static void cma_set_mgid(struct rdma_id
        }
  }
  
+ static void cma_query_sa_classport_info_cb(int status,
+                                          struct ib_class_port_info *rec,
+                                          void *context)
+ {
+       struct class_port_info_context *cb_ctx = context;
+       WARN_ON(!context);
+       if (status || !rec) {
+               pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
+                        cb_ctx->device->name, cb_ctx->port_num, status);
+               goto out;
+       }
+       memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));
+ out:
+       complete(&cb_ctx->done);
+ }
+ static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
+                                      struct ib_class_port_info *class_port_info)
+ {
+       struct class_port_info_context *cb_ctx;
+       int ret;
+       cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
+       if (!cb_ctx)
+               return -ENOMEM;
+       cb_ctx->device = device;
+       cb_ctx->class_port_info = class_port_info;
+       cb_ctx->port_num = port_num;
+       init_completion(&cb_ctx->done);
+       ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
+                                            CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
+                                            GFP_KERNEL, cma_query_sa_classport_info_cb,
+                                            cb_ctx, &cb_ctx->sa_query);
+       if (ret < 0) {
+               pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
+                      device->name, port_num, ret);
+               goto out;
+       }
+       wait_for_completion(&cb_ctx->done);
+ out:
+       kfree(cb_ctx);
+       return ret;
+ }
  static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
                                 struct cma_multicast *mc)
  {
        struct ib_sa_mcmember_rec rec;
+       struct ib_class_port_info class_port_info;
        struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
        ib_sa_comp_mask comp_mask;
        int ret;
        rec.qkey = cpu_to_be32(id_priv->qkey);
        rdma_addr_get_sgid(dev_addr, &rec.port_gid);
        rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
-       rec.join_state = 1;
+       rec.join_state = mc->join_state;
+       if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
+               ret = cma_query_sa_classport_info(id_priv->id.device,
+                                                 id_priv->id.port_num,
+                                                 &class_port_info);
+               if (ret)
+                       return ret;
+               if (!(ib_get_cpi_capmask2(&class_port_info) &
+                     IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
+                       pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
+                               "RDMA CM: SM doesn't support Send Only Full Member option\n",
+                               id_priv->id.device->name, id_priv->id.port_num);
+                       return -EOPNOTSUPP;
+               }
+       }
  
        comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
                    IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
@@@ -3843,6 -3925,9 +3923,9 @@@ static int cma_iboe_join_multicast(stru
        struct sockaddr *addr = (struct sockaddr *)&mc->addr;
        struct net_device *ndev = NULL;
        enum ib_gid_type gid_type;
+       bool send_only;
+       send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
  
        if (cma_zero_addr((struct sockaddr *)&mc->addr))
                return -EINVAL;
        if (addr->sa_family == AF_INET) {
                if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
                        mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
-                       err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
-                                           true);
-                       if (!err)
-                               mc->igmp_joined = true;
+                       if (!send_only) {
+                               err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
+                                                   true);
+                               if (!err)
+                                       mc->igmp_joined = true;
+                       }
                }
        } else {
                if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@@ -3911,7 -3998,7 +3996,7 @@@ out1
  }
  
  int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
-                       void *context)
+                       u8 join_state, void *context)
  {
        struct rdma_id_private *id_priv;
        struct cma_multicast *mc;
        mc->context = context;
        mc->id_priv = id_priv;
        mc->igmp_joined = false;
+       mc->join_state = join_state;
        spin_lock(&id_priv->lock);
        list_add(&mc->list, &id_priv->mc_list);
        spin_unlock(&id_priv->lock);
index 60df4f8e81bed10ac8e9b6149ec4c3ef5c6abf9f,0d1ab73f8186daa18f4b0f0113e84fec4721bcec..15defefecb4f7ac26c137ea9a0398d666fa9aae9
@@@ -38,6 -38,7 +38,7 @@@
  #include <linux/stat.h>
  #include <linux/string.h>
  #include <linux/netdevice.h>
+ #include <linux/ethtool.h>
  
  #include <rdma/ib_mad.h>
  #include <rdma/ib_pma.h>
@@@ -530,7 -531,6 +531,7 @@@ static PORT_PMA_ATTR(port_xmit_dat
  static PORT_PMA_ATTR(port_rcv_data                , 13, 32, 224);
  static PORT_PMA_ATTR(port_xmit_packets                    , 14, 32, 256);
  static PORT_PMA_ATTR(port_rcv_packets             , 15, 32, 288);
 +static PORT_PMA_ATTR(port_xmit_wait               ,  0, 32, 320);
  
  /*
   * Counters added by extended set
@@@ -561,7 -561,6 +562,7 @@@ static struct attribute *pma_attrs[] = 
        &port_pma_attr_port_rcv_data.attr.attr,
        &port_pma_attr_port_xmit_packets.attr.attr,
        &port_pma_attr_port_rcv_packets.attr.attr,
 +      &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
  };
  
@@@ -581,7 -580,6 +582,7 @@@ static struct attribute *pma_attrs_ext[
        &port_pma_attr_ext_port_xmit_data.attr.attr,
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
 +      &port_pma_attr_port_xmit_wait.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_xmit_packets.attr.attr,
@@@ -607,7 -605,6 +608,7 @@@ static struct attribute *pma_attrs_noie
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
 +      &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
  };
  
@@@ -1200,16 -1197,28 +1201,28 @@@ static ssize_t set_node_desc(struct dev
        return count;
  }
  
+ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+                          char *buf)
+ {
+       struct ib_device *dev = container_of(device, struct ib_device, dev);
+       ib_get_device_fw_str(dev, buf, PAGE_SIZE);
+       strlcat(buf, "\n", PAGE_SIZE);
+       return strlen(buf);
+ }
  static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
  static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
  static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
  static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
+ static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
  
  static struct device_attribute *ib_class_attributes[] = {
        &dev_attr_node_type,
        &dev_attr_sys_image_guid,
        &dev_attr_node_guid,
-       &dev_attr_node_desc
+       &dev_attr_node_desc,
+       &dev_attr_fw_ver,
  };
  
  static void free_port_list_attributes(struct ib_device *device)
index 6298f54b413756a5bf0f19891a080739ec6300ef,6916d5c5920b7e4b9d0575a150f9b36163319c60..2e813edcddabd9919849f881c6a4da61bc6382e6
@@@ -511,16 -511,12 +511,16 @@@ int ib_init_ah_from_wc(struct ib_devic
                ah_attr->grh.dgid = sgid;
  
                if (!rdma_cap_eth_ah(device, port_num)) {
 -                      ret = ib_find_cached_gid_by_port(device, &dgid,
 -                                                       IB_GID_TYPE_IB,
 -                                                       port_num, NULL,
 -                                                       &gid_index);
 -                      if (ret)
 -                              return ret;
 +                      if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
 +                              ret = ib_find_cached_gid_by_port(device, &dgid,
 +                                                               IB_GID_TYPE_IB,
 +                                                               port_num, NULL,
 +                                                               &gid_index);
 +                              if (ret)
 +                                      return ret;
 +                      } else {
 +                              gid_index = 0;
 +                      }
                }
  
                ah_attr->grh.sgid_index = (u8) gid_index;
@@@ -758,6 -754,12 +758,12 @@@ struct ib_qp *ib_create_qp(struct ib_p
        struct ib_qp *qp;
        int ret;
  
+       if (qp_init_attr->rwq_ind_tbl &&
+           (qp_init_attr->recv_cq ||
+           qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
+           qp_init_attr->cap.max_recv_sge))
+               return ERR_PTR(-EINVAL);
        /*
         * If the callers is using the RDMA API calculate the resources
         * needed for the RDMA READ/WRITE operations.
        qp->real_qp    = qp;
        qp->uobject    = NULL;
        qp->qp_type    = qp_init_attr->qp_type;
+       qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
  
        atomic_set(&qp->usecnt, 0);
        qp->mrs_used = 0;
                qp->srq = NULL;
        } else {
                qp->recv_cq = qp_init_attr->recv_cq;
-               atomic_inc(&qp_init_attr->recv_cq->usecnt);
+               if (qp_init_attr->recv_cq)
+                       atomic_inc(&qp_init_attr->recv_cq->usecnt);
                qp->srq = qp_init_attr->srq;
                if (qp->srq)
                        atomic_inc(&qp_init_attr->srq->usecnt);
        qp->xrcd    = NULL;
  
        atomic_inc(&pd->usecnt);
-       atomic_inc(&qp_init_attr->send_cq->usecnt);
+       if (qp_init_attr->send_cq)
+               atomic_inc(&qp_init_attr->send_cq->usecnt);
+       if (qp_init_attr->rwq_ind_tbl)
+               atomic_inc(&qp->rwq_ind_tbl->usecnt);
  
        if (qp_init_attr->cap.max_rdma_ctxs) {
                ret = rdma_rw_init_mrs(qp, qp_init_attr);
@@@ -1283,6 -1290,7 +1294,7 @@@ int ib_destroy_qp(struct ib_qp *qp
        struct ib_pd *pd;
        struct ib_cq *scq, *rcq;
        struct ib_srq *srq;
+       struct ib_rwq_ind_table *ind_tbl;
        int ret;
  
        WARN_ON_ONCE(qp->mrs_used > 0);
        scq  = qp->send_cq;
        rcq  = qp->recv_cq;
        srq  = qp->srq;
+       ind_tbl = qp->rwq_ind_tbl;
  
        if (!qp->uobject)
                rdma_rw_cleanup_mrs(qp);
                        atomic_dec(&rcq->usecnt);
                if (srq)
                        atomic_dec(&srq->usecnt);
+               if (ind_tbl)
+                       atomic_dec(&ind_tbl->usecnt);
        }
  
        return ret;
@@@ -1558,6 -1569,150 +1573,150 @@@ int ib_dealloc_xrcd(struct ib_xrcd *xrc
  }
  EXPORT_SYMBOL(ib_dealloc_xrcd);
  
+ /**
+  * ib_create_wq - Creates a WQ associated with the specified protection
+  * domain.
+  * @pd: The protection domain associated with the WQ.
+  * @wq_init_attr: A list of initial attributes required to create the
+  * WQ. If WQ creation succeeds, then the attributes are updated to
+  * the actual capabilities of the created WQ.
+  *
+  * wq_init_attr->max_wr and wq_init_attr->max_sge determine
+  * the requested size of the WQ, and set to the actual values allocated
+  * on return.
+  * If ib_create_wq() succeeds, then max_wr and max_sge will always be
+  * at least as large as the requested values.
+  */
+ struct ib_wq *ib_create_wq(struct ib_pd *pd,
+                          struct ib_wq_init_attr *wq_attr)
+ {
+       struct ib_wq *wq;
+       if (!pd->device->create_wq)
+               return ERR_PTR(-ENOSYS);
+       wq = pd->device->create_wq(pd, wq_attr, NULL);
+       if (!IS_ERR(wq)) {
+               wq->event_handler = wq_attr->event_handler;
+               wq->wq_context = wq_attr->wq_context;
+               wq->wq_type = wq_attr->wq_type;
+               wq->cq = wq_attr->cq;
+               wq->device = pd->device;
+               wq->pd = pd;
+               wq->uobject = NULL;
+               atomic_inc(&pd->usecnt);
+               atomic_inc(&wq_attr->cq->usecnt);
+               atomic_set(&wq->usecnt, 0);
+       }
+       return wq;
+ }
+ EXPORT_SYMBOL(ib_create_wq);
+ /**
+  * ib_destroy_wq - Destroys the specified WQ.
+  * @wq: The WQ to destroy.
+  */
+ int ib_destroy_wq(struct ib_wq *wq)
+ {
+       int err;
+       struct ib_cq *cq = wq->cq;
+       struct ib_pd *pd = wq->pd;
+       if (atomic_read(&wq->usecnt))
+               return -EBUSY;
+       err = wq->device->destroy_wq(wq);
+       if (!err) {
+               atomic_dec(&pd->usecnt);
+               atomic_dec(&cq->usecnt);
+       }
+       return err;
+ }
+ EXPORT_SYMBOL(ib_destroy_wq);
+ /**
+  * ib_modify_wq - Modifies the specified WQ.
+  * @wq: The WQ to modify.
+  * @wq_attr: On input, specifies the WQ attributes to modify.
+  * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
+  *   are being modified.
+  * On output, the current values of selected WQ attributes are returned.
+  */
+ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+                u32 wq_attr_mask)
+ {
+       int err;
+       if (!wq->device->modify_wq)
+               return -ENOSYS;
+       err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
+       return err;
+ }
+ EXPORT_SYMBOL(ib_modify_wq);
+ /*
+  * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
+  * @device: The device on which to create the rwq indirection table.
+  * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
+  * create the Indirection Table.
+  *
+  * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
+  *    than the created ib_rwq_ind_table object and the caller is responsible
+  *    for its memory allocation/free.
+  */
+ struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
+                                                struct ib_rwq_ind_table_init_attr *init_attr)
+ {
+       struct ib_rwq_ind_table *rwq_ind_table;
+       int i;
+       u32 table_size;
+       if (!device->create_rwq_ind_table)
+               return ERR_PTR(-ENOSYS);
+       table_size = (1 << init_attr->log_ind_tbl_size);
+       rwq_ind_table = device->create_rwq_ind_table(device,
+                               init_attr, NULL);
+       if (IS_ERR(rwq_ind_table))
+               return rwq_ind_table;
+       rwq_ind_table->ind_tbl = init_attr->ind_tbl;
+       rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
+       rwq_ind_table->device = device;
+       rwq_ind_table->uobject = NULL;
+       atomic_set(&rwq_ind_table->usecnt, 0);
+       for (i = 0; i < table_size; i++)
+               atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
+       return rwq_ind_table;
+ }
+ EXPORT_SYMBOL(ib_create_rwq_ind_table);
+ /*
+  * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
+  * @wq_ind_table: The Indirection Table to destroy.
+ */
+ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
+ {
+       int err, i;
+       u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
+       struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
+       if (atomic_read(&rwq_ind_table->usecnt))
+               return -EBUSY;
+       err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
+       if (!err) {
+               for (i = 0; i < table_size; i++)
+                       atomic_dec(&ind_tbl[i]->usecnt);
+       }
+       return err;
+ }
+ EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
  struct ib_flow *ib_create_flow(struct ib_qp *qp,
                               struct ib_flow_attr *flow_attr,
                               int domain)
index c702a009608f27a62b06b7ad4eb2d54914fab03b,3b792c5023d0a96a6503352f8908cf71e2fd6a11..32c19fad12a4c4df5d074ac5540e3a2f8ee4973c
@@@ -203,9 -203,6 +203,9 @@@ static long hfi1_file_ioctl(struct fil
  
        switch (cmd) {
        case HFI1_IOCTL_ASSIGN_CTXT:
 +              if (uctxt)
 +                      return -EINVAL;
 +
                if (copy_from_user(&uinfo,
                                   (struct hfi1_user_info __user *)arg,
                                   sizeof(uinfo)))
                                    sizeof(struct hfi1_base_info));
                break;
        case HFI1_IOCTL_CREDIT_UPD:
-               if (uctxt && uctxt->sc)
+               if (uctxt)
                        sc_return_credits(uctxt->sc);
                break;
  
index 283b64c942eebfea6378ee8f4ad5206e6f549876,65ad249f90b3dc43e25ed3c3d4a36a409cbd685f..2360338877bf68ca4a809d153f83a64326fa467c
@@@ -79,7 -79,6 +79,7 @@@ static int i40iw_query_device(struct ib
        props->max_qp_init_rd_atom = props->max_qp_rd_atom;
        props->atomic_cap = IB_ATOMIC_NONE;
        props->max_map_per_fmr = 1;
 +      props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
        return 0;
  }
  
@@@ -529,7 -528,7 +529,7 @@@ static int i40iw_setup_kmode_qp(struct 
                status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
  
        if (status)
-               return -ENOSYS;
+               return -ENOMEM;
  
        sqdepth = sq_size << sqshift;
        rqdepth = rq_size << rqshift;
@@@ -671,7 -670,7 +671,7 @@@ static struct ib_qp *i40iw_create_qp(st
        iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
  
        if (init_attr->qp_type != IB_QPT_RC) {
-               err_code = -ENOSYS;
+               err_code = -EINVAL;
                goto error;
        }
        if (iwdev->push_mode)
@@@ -1474,7 -1473,6 +1474,7 @@@ static int i40iw_hw_alloc_stag(struct i
        info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
        info->pd_id = iwpd->sc_pd.pd_id;
        info->total_len = iwmr->length;
 +      info->remote_access = true;
        cqp_info->cqp_cmd = OP_ALLOC_STAG;
        cqp_info->post_sq = 1;
        cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
@@@ -1529,7 -1527,7 +1529,7 @@@ static struct ib_mr *i40iw_alloc_mr(str
        mutex_lock(&iwdev->pbl_mutex);
        status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
        mutex_unlock(&iwdev->pbl_mutex);
 -      if (!status)
 +      if (status)
                goto err1;
  
        if (palloc->level != I40IW_LEVEL_1)
@@@ -1840,6 -1838,7 +1840,7 @@@ struct ib_mr *i40iw_reg_phys_mr(struct 
        iwmr->ibmr.lkey = stag;
        iwmr->page_cnt = 1;
        iwmr->pgaddrmem[0]  = addr;
+       iwmr->length = size;
        status = i40iw_hwreg_mr(iwdev, iwmr, access);
        if (status) {
                i40iw_free_stag(iwdev, stag);
@@@ -1863,7 -1862,7 +1864,7 @@@ static struct ib_mr *i40iw_get_dma_mr(s
  {
        u64 kva = 0;
  
-       return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva);
+       return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
  }
  
  /**
@@@ -1974,18 -1973,6 +1975,6 @@@ static ssize_t i40iw_show_rev(struct de
        return sprintf(buf, "%x\n", hw_rev);
  }
  
- /**
-  * i40iw_show_fw_ver
-  */
- static ssize_t i40iw_show_fw_ver(struct device *dev,
-                                struct device_attribute *attr, char *buf)
- {
-       u32 firmware_version = I40IW_FW_VERSION;
-       return sprintf(buf, "%u.%u\n", firmware_version,
-                      (firmware_version & 0x000000ff));
- }
  /**
   * i40iw_show_hca
   */
@@@ -2006,13 -1993,11 +1995,11 @@@ static ssize_t i40iw_show_board(struct 
  }
  
  static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
- static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL);
  static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
  static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
  
  static struct device_attribute *i40iw_dev_attributes[] = {
        &dev_attr_hw_rev,
-       &dev_attr_fw_ver,
        &dev_attr_hca_type,
        &dev_attr_board_id
  };
@@@ -2091,8 -2076,12 +2078,12 @@@ static int i40iw_post_send(struct ib_q
                                ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
                        }
  
-                       if (ret)
-                               err = -EIO;
+                       if (ret) {
+                               if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+                                       err = -ENOMEM;
+                               else
+                                       err = -EINVAL;
+                       }
                        break;
                case IB_WR_RDMA_WRITE:
                        info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
                                ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
                        }
  
-                       if (ret)
-                               err = -EIO;
+                       if (ret) {
+                               if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+                                       err = -ENOMEM;
+                               else
+                                       err = -EINVAL;
+                       }
                        break;
                case IB_WR_RDMA_READ_WITH_INV:
                        inv_stag = true;
                        info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
                        info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
                        ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
-                       if (ret)
-                               err = -EIO;
+                       if (ret) {
+                               if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+                                       err = -ENOMEM;
+                               else
+                                       err = -EINVAL;
+                       }
                        break;
                case IB_WR_LOCAL_INV:
                        info.op_type = I40IW_OP_TYPE_INV_STAG;
                        info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
                        ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
                        if (ret)
-                               err = -EIO;
+                               err = -ENOMEM;
                        break;
                case IB_WR_REG_MR:
                {
                        struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
                        struct i40iw_fast_reg_stag_info info;
  
 +                      memset(&info, 0, sizeof(info));
                        info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
                        info.access_rights |= i40iw_get_user_access(flags);
                        info.stag_key = reg_wr(ib_wr)->key & 0xff;
                        info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
                        info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
                        info.total_len = iwmr->ibmr.length;
 +                      info.reg_addr_pa = *(u64 *)palloc->level1.addr;
                        info.first_pm_pbl_index = palloc->level1.idx;
                        info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
                        info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
  
 +                      if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
 +                              info.chunk_size = 1;
 +
                        if (page_shift == 21)
                                info.page_size = 1; /* 2M page */
  
                        ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
                        if (ret)
-                               err = -EIO;
+                               err = -ENOMEM;
                        break;
                }
                default:
@@@ -2214,6 -2206,7 +2213,7 @@@ static int i40iw_post_recv(struct ib_q
        struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
        enum i40iw_status_code ret = 0;
        unsigned long flags;
+       int err = 0;
  
        iwqp = (struct i40iw_qp *)ibqp;
        ukqp = &iwqp->sc_qp.qp_uk;
                ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
                if (ret) {
                        i40iw_pr_err(" post_recv err %d\n", ret);
+                       if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
+                               err = -ENOMEM;
+                       else
+                               err = -EINVAL;
                        *bad_wr = ib_wr;
                        goto out;
                }
        }
   out:
        spin_unlock_irqrestore(&iwqp->lock, flags);
-       if (ret)
-               return -ENOSYS;
-       return 0;
+       return err;
  }
  
  /**
@@@ -2264,7 -2259,7 +2266,7 @@@ static int i40iw_poll_cq(struct ib_cq *
  
        spin_lock_irqsave(&iwcq->lock, flags);
        while (cqe_count < num_entries) {
-               ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
+               ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
                if (ret == I40IW_ERR_QUEUE_EMPTY) {
                        break;
                } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
@@@ -2334,16 -2329,13 +2336,16 @@@ static int i40iw_req_notify_cq(struct i
  {
        struct i40iw_cq *iwcq;
        struct i40iw_cq_uk *ukcq;
 -      enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
 +      unsigned long flags;
 +      enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
  
        iwcq = (struct i40iw_cq *)ibcq;
        ukcq = &iwcq->sc_cq.cq_uk;
 -      if (notify_flags == IB_CQ_NEXT_COMP)
 -              cq_notify = IW_CQ_COMPL_EVENT;
 +      if (notify_flags == IB_CQ_SOLICITED)
 +              cq_notify = IW_CQ_COMPL_SOLICITED;
 +      spin_lock_irqsave(&iwcq->lock, flags);
        ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
 +      spin_unlock_irqrestore(&iwcq->lock, flags);
        return 0;
  }
  
@@@ -2437,6 -2429,15 +2439,15 @@@ static const char * const i40iw_hw_stat
                "iwRdmaInv"
  };
  
+ static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str,
+                                size_t str_len)
+ {
+       u32 firmware_version = I40IW_FW_VERSION;
+       snprintf(str, str_len, "%u.%u", firmware_version,
+                      (firmware_version & 0x000000ff));
+ }
  /**
   * i40iw_alloc_hw_stats - Allocate a hw stats structure
   * @ibdev: device pointer from stack
@@@ -2528,7 -2529,7 +2539,7 @@@ static int i40iw_modify_port(struct ib_
                             int port_modify_mask,
                             struct ib_port_modify *props)
  {
-       return 0;
+       return -ENOSYS;
  }
  
  /**
@@@ -2660,6 -2661,7 +2671,7 @@@ static struct i40iw_ib_device *i40iw_in
        memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
               sizeof(iwibdev->ibdev.iwcm->ifname));
        iwibdev->ibdev.get_port_immutable   = i40iw_port_immutable;
+       iwibdev->ibdev.get_dev_fw_str       = i40iw_get_dev_fw_str;
        iwibdev->ibdev.poll_cq = i40iw_poll_cq;
        iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
        iwibdev->ibdev.post_send = i40iw_post_send;
@@@ -2723,7 -2725,7 +2735,7 @@@ int i40iw_register_rdma_device(struct i
  
        iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
        if (!iwdev->iwibdev)
-               return -ENOSYS;
+               return -ENOMEM;
        iwibdev = iwdev->iwibdev;
  
        ret = ib_register_device(&iwibdev->ibdev, NULL);
@@@ -2748,5 -2750,5 +2760,5 @@@ error
        kfree(iwdev->iwibdev->ibdev.iwcm);
        iwdev->iwibdev->ibdev.iwcm = NULL;
        ib_dealloc_device(&iwdev->iwibdev->ibdev);
-       return -ENOSYS;
+       return ret;
  }
index 42a46078d7d52755109784e769f29c2e5532bbca,ee366948699656bdee766f3f1bb9433e320a4a19..2af44c2de2624a75d90a675727b32f914ec53e05
@@@ -1704,9 -1704,6 +1704,9 @@@ static struct ib_flow *mlx4_ib_create_f
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
  
 +      if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
 +              return ERR_PTR(-EINVAL);
 +
        if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
            (flow_attr->type != IB_FLOW_ATTR_NORMAL))
                return ERR_PTR(-EOPNOTSUPP);
@@@ -2025,16 -2022,6 +2025,6 @@@ static ssize_t show_hca(struct device *
        return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
  }
  
- static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
-                          char *buf)
- {
-       struct mlx4_ib_dev *dev =
-               container_of(device, struct mlx4_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
-                      (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
-                      (int) dev->dev->caps.fw_ver & 0xffff);
- }
  static ssize_t show_rev(struct device *device, struct device_attribute *attr,
                        char *buf)
  {
@@@ -2053,17 -2040,204 +2043,204 @@@ static ssize_t show_board(struct devic
  }
  
  static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
- static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
  static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
  static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
  
  static struct device_attribute *mlx4_class_attributes[] = {
        &dev_attr_hw_rev,
-       &dev_attr_fw_ver,
        &dev_attr_hca_type,
        &dev_attr_board_id
  };
  
+ struct diag_counter {
+       const char *name;
+       u32 offset;
+ };
+ #define DIAG_COUNTER(_name, _offset)                  \
+       { .name = #_name, .offset = _offset }
+ static const struct diag_counter diag_basic[] = {
+       DIAG_COUNTER(rq_num_lle, 0x00),
+       DIAG_COUNTER(sq_num_lle, 0x04),
+       DIAG_COUNTER(rq_num_lqpoe, 0x08),
+       DIAG_COUNTER(sq_num_lqpoe, 0x0C),
+       DIAG_COUNTER(rq_num_lpe, 0x18),
+       DIAG_COUNTER(sq_num_lpe, 0x1C),
+       DIAG_COUNTER(rq_num_wrfe, 0x20),
+       DIAG_COUNTER(sq_num_wrfe, 0x24),
+       DIAG_COUNTER(sq_num_mwbe, 0x2C),
+       DIAG_COUNTER(sq_num_bre, 0x34),
+       DIAG_COUNTER(sq_num_rire, 0x44),
+       DIAG_COUNTER(rq_num_rire, 0x48),
+       DIAG_COUNTER(sq_num_rae, 0x4C),
+       DIAG_COUNTER(rq_num_rae, 0x50),
+       DIAG_COUNTER(sq_num_roe, 0x54),
+       DIAG_COUNTER(sq_num_tree, 0x5C),
+       DIAG_COUNTER(sq_num_rree, 0x64),
+       DIAG_COUNTER(rq_num_rnr, 0x68),
+       DIAG_COUNTER(sq_num_rnr, 0x6C),
+       DIAG_COUNTER(rq_num_oos, 0x100),
+       DIAG_COUNTER(sq_num_oos, 0x104),
+ };
+ static const struct diag_counter diag_ext[] = {
+       DIAG_COUNTER(rq_num_dup, 0x130),
+       DIAG_COUNTER(sq_num_to, 0x134),
+ };
+ static const struct diag_counter diag_device_only[] = {
+       DIAG_COUNTER(num_cqovf, 0x1A0),
+       DIAG_COUNTER(rq_num_udsdprd, 0x118),
+ };
+ static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
+                                                   u8 port_num)
+ {
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+       if (!diag[!!port_num].name)
+               return NULL;
+       return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
+                                         diag[!!port_num].num_counters,
+                                         RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ }
+ static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
+                               struct rdma_hw_stats *stats,
+                               u8 port, int index)
+ {
+       struct mlx4_ib_dev *dev = to_mdev(ibdev);
+       struct mlx4_ib_diag_counters *diag = dev->diag_counters;
+       u32 hw_value[ARRAY_SIZE(diag_device_only) +
+               ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
+       int ret;
+       int i;
+       ret = mlx4_query_diag_counters(dev->dev,
+                                      MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
+                                      diag[!!port].offset, hw_value,
+                                      diag[!!port].num_counters, port);
+       if (ret)
+               return ret;
+       for (i = 0; i < diag[!!port].num_counters; i++)
+               stats->value[i] = hw_value[i];
+       return diag[!!port].num_counters;
+ }
+ static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
+                                        const char ***name,
+                                        u32 **offset,
+                                        u32 *num,
+                                        bool port)
+ {
+       u32 num_counters;
+       num_counters = ARRAY_SIZE(diag_basic);
+       if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
+               num_counters += ARRAY_SIZE(diag_ext);
+       if (!port)
+               num_counters += ARRAY_SIZE(diag_device_only);
+       *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
+       if (!*name)
+               return -ENOMEM;
+       *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
+       if (!*offset)
+               goto err_name;
+       *num = num_counters;
+       return 0;
+ err_name:
+       kfree(*name);
+       return -ENOMEM;
+ }
+ static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
+                                      const char **name,
+                                      u32 *offset,
+                                      bool port)
+ {
+       int i;
+       int j;
+       for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
+               name[i] = diag_basic[i].name;
+               offset[i] = diag_basic[i].offset;
+       }
+       if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
+               for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
+                       name[j] = diag_ext[i].name;
+                       offset[j] = diag_ext[i].offset;
+               }
+       }
+       if (!port) {
+               for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
+                       name[j] = diag_device_only[i].name;
+                       offset[j] = diag_device_only[i].offset;
+               }
+       }
+ }
+ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
+ {
+       struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
+       int i;
+       int ret;
+       bool per_port = !!(ibdev->dev->caps.flags2 &
+               MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
+       for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+               /* i == 1 means we are building port counters */
+               if (i && !per_port)
+                       continue;
+               ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+                                                   &diag[i].offset,
+                                                   &diag[i].num_counters, i);
+               if (ret)
+                       goto err_alloc;
+               mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+                                          diag[i].offset, i);
+       }
+       ibdev->ib_dev.get_hw_stats      = mlx4_ib_get_hw_stats;
+       ibdev->ib_dev.alloc_hw_stats    = mlx4_ib_alloc_hw_stats;
+       return 0;
+ err_alloc:
+       if (i) {
+               kfree(diag[i - 1].name);
+               kfree(diag[i - 1].offset);
+       }
+       return ret;
+ }
+ static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
+ {
+       int i;
+       for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
+               kfree(ibdev->diag_counters[i].offset);
+               kfree(ibdev->diag_counters[i].name);
+       }
+ }
  #define MLX4_IB_INVALID_MAC   ((u64)-1)
  static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
                               struct net_device *dev,
@@@ -2280,6 -2454,17 +2457,17 @@@ static int mlx4_port_immutable(struct i
        return 0;
  }
  
+ static void get_fw_ver_str(struct ib_device *device, char *str,
+                          size_t str_len)
+ {
+       struct mlx4_ib_dev *dev =
+               container_of(device, struct mlx4_ib_dev, ib_dev);
+       snprintf(str, str_len, "%d.%d.%d",
+                (int) (dev->dev->caps.fw_ver >> 32),
+                (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
+                (int) dev->dev->caps.fw_ver & 0xffff);
+ }
  static void *mlx4_ib_add(struct mlx4_dev *dev)
  {
        struct mlx4_ib_dev *ibdev;
        ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
        ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
        ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
+       ibdev->ib_dev.get_dev_fw_str    = get_fw_ver_str;
        ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext;
  
        if (!mlx4_is_slave(ibdev->dev)) {
        for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
                atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
  
-       if (ib_register_device(&ibdev->ib_dev, NULL))
+       if (mlx4_ib_alloc_diag_counters(ibdev))
                goto err_steer_free_bitmap;
  
+       if (ib_register_device(&ibdev->ib_dev, NULL))
+               goto err_diag_counters;
        if (mlx4_ib_mad_init(ibdev))
                goto err_reg;
  
@@@ -2623,6 -2812,9 +2815,9 @@@ err_mad
  err_reg:
        ib_unregister_device(&ibdev->ib_dev);
  
+ err_diag_counters:
+       mlx4_ib_diag_cleanup(ibdev);
  err_steer_free_bitmap:
        kfree(ibdev->ib_uc_qpns_bitmap);
  
@@@ -2726,6 -2918,7 +2921,7 @@@ static void mlx4_ib_remove(struct mlx4_
        mlx4_ib_close_sriov(ibdev);
        mlx4_ib_mad_cleanup(ibdev);
        ib_unregister_device(&ibdev->ib_dev);
+       mlx4_ib_diag_cleanup(ibdev);
        if (ibdev->iboe.nb.notifier_call) {
                if (unregister_netdevice_notifier(&ibdev->iboe.nb))
                        pr_warn("failure unregistering notifier\n");
index 29acda249612dd444ee42c37494674cc4e482790,43f0382ff4adffc7f028a9a9ba1768135fc9675c..7c5832ede4bd0cc213139900eeb3999532192858
@@@ -139,7 -139,7 +139,7 @@@ struct mlx4_ib_mr 
        u32                     max_pages;
        struct mlx4_mr          mmr;
        struct ib_umem         *umem;
 -      void                    *pages_alloc;
 +      size_t                  page_map_size;
  };
  
  struct mlx4_ib_mw {
@@@ -549,6 -549,14 +549,14 @@@ struct mlx4_ib_counters 
        u32                     default_counter;
  };
  
+ #define MLX4_DIAG_COUNTERS_TYPES 2
+ struct mlx4_ib_diag_counters {
+       const char **name;
+       u32 *offset;
+       u32 num_counters;
+ };
  struct mlx4_ib_dev {
        struct ib_device        ib_dev;
        struct mlx4_dev        *dev;
        /* protect resources needed as part of reset flow */
        spinlock_t              reset_flow_resource_lock;
        struct list_head                qp_list;
+       struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
  };
  
  struct ib_event_work {
index dad63f038bb86edd66f806b42e2a62828a6bb085,335fc541d62eca3cf248b038736b8933ee59d893..a84bb766fc62874bc45303268c25b961e45cd4f1
  #include <asm/pat.h>
  #endif
  #include <linux/sched.h>
+ #include <linux/delay.h>
  #include <rdma/ib_user_verbs.h>
  #include <rdma/ib_addr.h>
  #include <rdma/ib_cache.h>
  #include <linux/mlx5/port.h>
  #include <linux/mlx5/vport.h>
+ #include <linux/list.h>
  #include <rdma/ib_smi.h>
  #include <rdma/ib_umem.h>
  #include <linux/in.h>
@@@ -457,8 -459,17 +459,17 @@@ static int mlx5_ib_query_device(struct 
        int max_rq_sg;
        int max_sq_sg;
        u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+       struct mlx5_ib_query_device_resp resp = {};
+       size_t resp_len;
+       u64 max_tso;
  
-       if (uhw->inlen || uhw->outlen)
+       resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
+       if (uhw->outlen && uhw->outlen < resp_len)
+               return -EINVAL;
+       else
+               resp.response_length = resp_len;
+       if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
                return -EINVAL;
  
        memset(props, 0, sizeof(*props));
        if (MLX5_CAP_GEN(mdev, block_lb_mc))
                props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
  
-       if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
-           (MLX5_CAP_ETH(dev->mdev, csum_cap)))
+       if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
+               if (MLX5_CAP_ETH(mdev, csum_cap))
                        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
  
+               if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
+                       max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
+                       if (max_tso) {
+                               resp.tso_caps.max_tso = 1 << max_tso;
+                               resp.tso_caps.supported_qpts |=
+                                       1 << IB_QPT_RAW_PACKET;
+                               resp.response_length += sizeof(resp.tso_caps);
+                       }
+               }
+       }
        if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
                props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
                props->device_cap_flags |= IB_DEVICE_UD_TSO;
        if (!mlx5_core_is_pf(mdev))
                props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
  
+       if (uhw->outlen) {
+               err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+               if (err)
+                       return err;
+       }
        return 0;
  }
  
@@@ -983,6 -1012,7 +1012,7 @@@ static struct ib_ucontext *mlx5_ib_allo
                        goto out_uars;
        }
  
+       INIT_LIST_HEAD(&context->vma_private_list);
        INIT_LIST_HEAD(&context->db_page_list);
        mutex_init(&context->db_page_mutex);
  
        if (field_avail(typeof(resp), cqe_version, udata->outlen))
                resp.response_length += sizeof(resp.cqe_version);
  
+       if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
+               resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
+               resp.response_length += sizeof(resp.cmds_supp_uhw);
+       }
        /*
         * We don't want to expose information from the PCI bar that is located
         * after 4096 bytes, so if the arch only supports larger pages, let's
                        offsetof(struct mlx5_init_seg, internal_timer_h) %
                        PAGE_SIZE;
                resp.response_length += sizeof(resp.hca_core_clock_offset) +
-                                       sizeof(resp.reserved2) +
-                                       sizeof(resp.reserved3);
+                                       sizeof(resp.reserved2);
        }
  
        err = ib_copy_to_udata(udata, &resp, resp.response_length);
@@@ -1086,6 -1120,125 +1120,125 @@@ static int get_index(unsigned long offs
        return get_arg(offset);
  }
  
+ static void  mlx5_ib_vma_open(struct vm_area_struct *area)
+ {
+       /* vma_open is called when a new VMA is created on top of our VMA.  This
+        * is done through either mremap flow or split_vma (usually due to
+        * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
+        * as this VMA is strongly hardware related.  Therefore we set the
+        * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
+        * calling us again and trying to do incorrect actions.  We assume that
+        * the original VMA size is exactly a single page, and therefore all
+        * "splitting" operation will not happen to it.
+        */
+       area->vm_ops = NULL;
+ }
+ static void  mlx5_ib_vma_close(struct vm_area_struct *area)
+ {
+       struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
+       /* It's guaranteed that all VMAs opened on a FD are closed before the
+        * file itself is closed, therefore no sync is needed with the regular
+        * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
+        * However need a sync with accessing the vma as part of
+        * mlx5_ib_disassociate_ucontext.
+        * The close operation is usually called under mm->mmap_sem except when
+        * process is exiting.
+        * The exiting case is handled explicitly as part of
+        * mlx5_ib_disassociate_ucontext.
+        */
+       mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
+       /* setting the vma context pointer to null in the mlx5_ib driver's
+        * private data, to protect a race condition in
+        * mlx5_ib_disassociate_ucontext().
+        */
+       mlx5_ib_vma_priv_data->vma = NULL;
+       list_del(&mlx5_ib_vma_priv_data->list);
+       kfree(mlx5_ib_vma_priv_data);
+ }
+ static const struct vm_operations_struct mlx5_ib_vm_ops = {
+       .open = mlx5_ib_vma_open,
+       .close = mlx5_ib_vma_close
+ };
+ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
+                               struct mlx5_ib_ucontext *ctx)
+ {
+       struct mlx5_ib_vma_private_data *vma_prv;
+       struct list_head *vma_head = &ctx->vma_private_list;
+       vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
+       if (!vma_prv)
+               return -ENOMEM;
+       vma_prv->vma = vma;
+       vma->vm_private_data = vma_prv;
+       vma->vm_ops =  &mlx5_ib_vm_ops;
+       list_add(&vma_prv->list, vma_head);
+       return 0;
+ }
+ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+ {
+       int ret;
+       struct vm_area_struct *vma;
+       struct mlx5_ib_vma_private_data *vma_private, *n;
+       struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+       struct task_struct *owning_process  = NULL;
+       struct mm_struct   *owning_mm       = NULL;
+       owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
+       if (!owning_process)
+               return;
+       owning_mm = get_task_mm(owning_process);
+       if (!owning_mm) {
+               pr_info("no mm, disassociate ucontext is pending task termination\n");
+               while (1) {
+                       put_task_struct(owning_process);
+                       usleep_range(1000, 2000);
+                       owning_process = get_pid_task(ibcontext->tgid,
+                                                     PIDTYPE_PID);
+                       if (!owning_process ||
+                           owning_process->state == TASK_DEAD) {
+                               pr_info("disassociate ucontext done, task was terminated\n");
+                               /* in case task was dead need to release the
+                                * task struct.
+                                */
+                               if (owning_process)
+                                       put_task_struct(owning_process);
+                               return;
+                       }
+               }
+       }
+       /* need to protect from a race on closing the vma as part of
+        * mlx5_ib_vma_close.
+        */
+       down_read(&owning_mm->mmap_sem);
+       list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
+                                list) {
+               vma = vma_private->vma;
+               ret = zap_vma_ptes(vma, vma->vm_start,
+                                  PAGE_SIZE);
+               WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
+               /* context going to be destroyed, should
+                * not access ops any more.
+                */
+               vma->vm_ops = NULL;
+               list_del(&vma_private->list);
+               kfree(vma_private);
+       }
+       up_read(&owning_mm->mmap_sem);
+       mmput(owning_mm);
+       put_task_struct(owning_process);
+ }
  static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
  {
        switch (cmd) {
  }
  
  static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
-                   struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
+                   struct vm_area_struct *vma,
+                   struct mlx5_ib_ucontext *context)
  {
+       struct mlx5_uuar_info *uuari = &context->uuari;
        int err;
        unsigned long idx;
        phys_addr_t pfn, pa;
        mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
                    vma->vm_start, &pa);
  
-       return 0;
+       return mlx5_ib_set_vma_data(vma, context);
  }
  
  static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
  {
        struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
        struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
-       struct mlx5_uuar_info *uuari = &context->uuari;
        unsigned long command;
        phys_addr_t pfn;
  
        case MLX5_IB_MMAP_WC_PAGE:
        case MLX5_IB_MMAP_NC_PAGE:
        case MLX5_IB_MMAP_REGULAR_PAGE:
-               return uar_mmap(dev, command, vma, uuari);
+               return uar_mmap(dev, command, vma, context);
  
        case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
                return -ENOSYS;
@@@ -1331,6 -1485,32 +1485,32 @@@ static int parse_flow_attr(u32 *match_c
                       &ib_spec->ipv4.val.dst_ip,
                       sizeof(ib_spec->ipv4.val.dst_ip));
                break;
+       case IB_FLOW_SPEC_IPV6:
+               if (ib_spec->size != sizeof(ib_spec->ipv6))
+                       return -EINVAL;
+               MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+                        ethertype, 0xffff);
+               MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+                        ethertype, ETH_P_IPV6);
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      &ib_spec->ipv6.mask.src_ip,
+                      sizeof(ib_spec->ipv6.mask.src_ip));
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      &ib_spec->ipv6.val.src_ip,
+                      sizeof(ib_spec->ipv6.val.src_ip));
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      &ib_spec->ipv6.mask.dst_ip,
+                      sizeof(ib_spec->ipv6.mask.dst_ip));
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      &ib_spec->ipv6.val.dst_ip,
+                      sizeof(ib_spec->ipv6.val.dst_ip));
+               break;
        case IB_FLOW_SPEC_TCP:
                if (ib_spec->size != sizeof(ib_spec->tcp_udp))
                        return -EINVAL;
@@@ -1528,18 -1708,21 +1708,18 @@@ static struct mlx5_ib_flow_handler *cre
  {
        struct mlx5_flow_table  *ft = ft_prio->flow_table;
        struct mlx5_ib_flow_handler *handler;
 +      struct mlx5_flow_spec *spec;
        void *ib_flow = flow_attr + 1;
 -      u8 match_criteria_enable = 0;
        unsigned int spec_index;
 -      u32 *match_c;
 -      u32 *match_v;
        u32 action;
        int err = 0;
  
        if (!is_valid_attr(flow_attr))
                return ERR_PTR(-EINVAL);
  
 -      match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
 -      match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
 +      spec = mlx5_vzalloc(sizeof(*spec));
        handler = kzalloc(sizeof(*handler), GFP_KERNEL);
 -      if (!handler || !match_c || !match_v) {
 +      if (!handler || !spec) {
                err = -ENOMEM;
                goto free;
        }
        INIT_LIST_HEAD(&handler->list);
  
        for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
 -              err = parse_flow_attr(match_c, match_v, ib_flow);
 +              err = parse_flow_attr(spec->match_criteria,
 +                                    spec->match_value, ib_flow);
                if (err < 0)
                        goto free;
  
        }
  
        /* Outer header support only */
 -      match_criteria_enable = (!outer_header_zero(match_c)) << 0;
 +      spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
 +              << 0;
        action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
                MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
 -      handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
 -                                         match_c, match_v,
 +      handler->rule = mlx5_add_flow_rule(ft, spec,
                                           action,
                                           MLX5_FS_DEFAULT_FLOW_TAG,
                                           dst);
  free:
        if (err)
                kfree(handler);
 -      kfree(match_c);
 -      kfree(match_v);
 +      kvfree(spec);
        return err ? ERR_PTR(err) : handler;
  }
  
@@@ -1801,15 -1984,6 +1981,6 @@@ static ssize_t show_hca(struct device *
        return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
  }
  
- static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
-                          char *buf)
- {
-       struct mlx5_ib_dev *dev =
-               container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
-                      fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
- }
  static ssize_t show_rev(struct device *device, struct device_attribute *attr,
                        char *buf)
  {
@@@ -1828,7 -2002,6 +1999,6 @@@ static ssize_t show_board(struct devic
  }
  
  static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
- static DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
  static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
  static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
  static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
@@@ -1836,7 -2009,6 +2006,6 @@@ static DEVICE_ATTR(reg_pages, S_IRUGO, 
  
  static struct device_attribute *mlx5_class_attributes[] = {
        &dev_attr_hw_rev,
-       &dev_attr_fw_ver,
        &dev_attr_hca_type,
        &dev_attr_board_id,
        &dev_attr_fw_pages,
@@@ -1854,6 -2026,65 +2023,65 @@@ static void pkey_change_handler(struct 
        mutex_unlock(&ports->devr->mutex);
  }
  
+ static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
+ {
+       struct mlx5_ib_qp *mqp;
+       struct mlx5_ib_cq *send_mcq, *recv_mcq;
+       struct mlx5_core_cq *mcq;
+       struct list_head cq_armed_list;
+       unsigned long flags_qp;
+       unsigned long flags_cq;
+       unsigned long flags;
+       INIT_LIST_HEAD(&cq_armed_list);
+       /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+       spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+       list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+               spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+               if (mqp->sq.tail != mqp->sq.head) {
+                       send_mcq = to_mcq(mqp->ibqp.send_cq);
+                       spin_lock_irqsave(&send_mcq->lock, flags_cq);
+                       if (send_mcq->mcq.comp &&
+                           mqp->ibqp.send_cq->comp_handler) {
+                               if (!send_mcq->mcq.reset_notify_added) {
+                                       send_mcq->mcq.reset_notify_added = 1;
+                                       list_add_tail(&send_mcq->mcq.reset_notify,
+                                                     &cq_armed_list);
+                               }
+                       }
+                       spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+               }
+               spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+               spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+               /* no handling is needed for SRQ */
+               if (!mqp->ibqp.srq) {
+                       if (mqp->rq.tail != mqp->rq.head) {
+                               recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+                               spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+                               if (recv_mcq->mcq.comp &&
+                                   mqp->ibqp.recv_cq->comp_handler) {
+                                       if (!recv_mcq->mcq.reset_notify_added) {
+                                               recv_mcq->mcq.reset_notify_added = 1;
+                                               list_add_tail(&recv_mcq->mcq.reset_notify,
+                                                             &cq_armed_list);
+                                       }
+                               }
+                               spin_unlock_irqrestore(&recv_mcq->lock,
+                                                      flags_cq);
+                       }
+               }
+               spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+       }
+       /*At that point all inflight post send were put to be executed as of we
+        * lock/unlock above locks Now need to arm all involved CQs.
+        */
+       list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
+               mcq->comp(mcq);
+       }
+       spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+ }
  static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
                          enum mlx5_dev_event event, unsigned long param)
  {
        case MLX5_DEV_EVENT_SYS_ERROR:
                ibdev->ib_active = false;
                ibev.event = IB_EVENT_DEVICE_FATAL;
+               mlx5_ib_handle_internal_error(ibdev);
                break;
  
        case MLX5_DEV_EVENT_PORT_UP:
@@@ -2272,6 -2504,15 +2501,15 @@@ static int mlx5_port_immutable(struct i
        return 0;
  }
  
+ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
+                          size_t str_len)
+ {
+       struct mlx5_ib_dev *dev =
+               container_of(ibdev, struct mlx5_ib_dev, ib_dev);
+       snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
+                      fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
+ }
  static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
  {
        int err;
@@@ -2298,6 -2539,113 +2536,113 @@@ static void mlx5_disable_roce(struct ml
        unregister_netdevice_notifier(&dev->roce.nb);
  }
  
+ static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
+ {
+       unsigned int i;
+       for (i = 0; i < dev->num_ports; i++)
+               mlx5_core_dealloc_q_counter(dev->mdev,
+                                           dev->port[i].q_cnt_id);
+ }
+ static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
+ {
+       int i;
+       int ret;
+       for (i = 0; i < dev->num_ports; i++) {
+               ret = mlx5_core_alloc_q_counter(dev->mdev,
+                                               &dev->port[i].q_cnt_id);
+               if (ret) {
+                       mlx5_ib_warn(dev,
+                                    "couldn't allocate queue counter for port %d, err %d\n",
+                                    i + 1, ret);
+                       goto dealloc_counters;
+               }
+       }
+       return 0;
+ dealloc_counters:
+       while (--i >= 0)
+               mlx5_core_dealloc_q_counter(dev->mdev,
+                                           dev->port[i].q_cnt_id);
+       return ret;
+ }
+ static const char * const names[] = {
+       "rx_write_requests",
+       "rx_read_requests",
+       "rx_atomic_requests",
+       "out_of_buffer",
+       "out_of_sequence",
+       "duplicate_request",
+       "rnr_nak_retry_err",
+       "packet_seq_err",
+       "implied_nak_seq_err",
+       "local_ack_timeout_err",
+ };
+ static const size_t stats_offsets[] = {
+       MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
+       MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
+       MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
+       MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
+       MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
+       MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
+       MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
+       MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
+       MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
+       MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
+ };
+ static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
+                                                   u8 port_num)
+ {
+       BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets));
+       /* We support only per port stats */
+       if (port_num == 0)
+               return NULL;
+       return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names),
+                                         RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ }
+ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
+                               struct rdma_hw_stats *stats,
+                               u8 port, int index)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+       void *out;
+       __be32 val;
+       int ret;
+       int i;
+       if (!port || !stats)
+               return -ENOSYS;
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return -ENOMEM;
+       ret = mlx5_core_query_q_counter(dev->mdev,
+                                       dev->port[port - 1].q_cnt_id, 0,
+                                       out, outlen);
+       if (ret)
+               goto free;
+       for (i = 0; i < ARRAY_SIZE(names); i++) {
+               val = *(__be32 *)(out + stats_offsets[i]);
+               stats->value[i] = (u64)be32_to_cpu(val);
+       }
+ free:
+       kvfree(out);
+       return ARRAY_SIZE(names);
+ }
  static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
  {
        struct mlx5_ib_dev *dev;
  
        dev->mdev = mdev;
  
+       dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+                           GFP_KERNEL);
+       if (!dev->port)
+               goto err_dealloc;
        rwlock_init(&dev->roce.netdev_lock);
        err = get_port_caps(dev);
        if (err)
-               goto err_dealloc;
+               goto err_free_port;
  
        if (mlx5_use_mad_ifc(dev))
                get_ext_port_caps(dev);
        dev->ib_dev.map_mr_sg           = mlx5_ib_map_mr_sg;
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
        dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
+       dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
        if (mlx5_core_is_pf(mdev)) {
                dev->ib_dev.get_vf_config       = mlx5_ib_get_vf_config;
                dev->ib_dev.set_vf_link_state   = mlx5_ib_set_vf_link_state;
                dev->ib_dev.set_vf_guid         = mlx5_ib_set_vf_guid;
        }
  
+       dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
        mlx5_ib_internal_fill_odp_caps(dev);
  
        if (MLX5_CAP_GEN(mdev, imaicl)) {
                        (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
        }
  
+       if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
+           MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
+               dev->ib_dev.get_hw_stats        = mlx5_ib_get_hw_stats;
+               dev->ib_dev.alloc_hw_stats      = mlx5_ib_alloc_hw_stats;
+       }
        if (MLX5_CAP_GEN(mdev, xrc)) {
                dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
                dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
            IB_LINK_LAYER_ETHERNET) {
                dev->ib_dev.create_flow = mlx5_ib_create_flow;
                dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
+               dev->ib_dev.create_wq    = mlx5_ib_create_wq;
+               dev->ib_dev.modify_wq    = mlx5_ib_modify_wq;
+               dev->ib_dev.destroy_wq   = mlx5_ib_destroy_wq;
+               dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
+               dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
                dev->ib_dev.uverbs_ex_cmd_mask |=
                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
-                       (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
+                       (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
+                       (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
+                       (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
+                       (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
+                       (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
+                       (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
        }
        err = init_node_data(dev);
        if (err)
  
        mutex_init(&dev->flow_db.lock);
        mutex_init(&dev->cap_mask_mutex);
+       INIT_LIST_HEAD(&dev->qp_list);
+       spin_lock_init(&dev->reset_flow_resource_lock);
  
        if (ll == IB_LINK_LAYER_ETHERNET) {
                err = mlx5_enable_roce(dev);
        if (err)
                goto err_rsrc;
  
-       err = ib_register_device(&dev->ib_dev, NULL);
+       err = mlx5_ib_alloc_q_counters(dev);
        if (err)
                goto err_odp;
  
+       err = ib_register_device(&dev->ib_dev, NULL);
+       if (err)
+               goto err_q_cnt;
        err = create_umr_res(dev);
        if (err)
                goto err_dev;
@@@ -2497,6 -2875,9 +2872,9 @@@ err_umrc
  err_dev:
        ib_unregister_device(&dev->ib_dev);
  
+ err_q_cnt:
+       mlx5_ib_dealloc_q_counters(dev);
  err_odp:
        mlx5_ib_odp_remove_one(dev);
  
@@@ -2507,6 -2888,9 +2885,9 @@@ err_disable_roce
        if (ll == IB_LINK_LAYER_ETHERNET)
                mlx5_disable_roce(dev);
  
+ err_free_port:
+       kfree(dev->port);
  err_dealloc:
        ib_dealloc_device((struct ib_device *)dev);
  
@@@ -2519,11 -2903,13 +2900,13 @@@ static void mlx5_ib_remove(struct mlx5_
        enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
  
        ib_unregister_device(&dev->ib_dev);
+       mlx5_ib_dealloc_q_counters(dev);
        destroy_umrc_res(dev);
        mlx5_ib_odp_remove_one(dev);
        destroy_dev_resources(&dev->devr);
        if (ll == IB_LINK_LAYER_ETHERNET)
                mlx5_disable_roce(dev);
+       kfree(dev->port);
        ib_dealloc_device(&dev->ib_dev);
  }
  
index ce0a7ab35a227c569deae7f6b322b4c84eb5dcec,40b204551e5735185be6b61d41b4c5506c4e1c1f..0dd7d93cac95b9e2bba86749b18f83846fa36245
@@@ -77,6 -77,10 +77,10 @@@ struct mlx5_wqe_eth_pad 
        u8 rsvd0[16];
  };
  
+ static void get_cqs(enum ib_qp_type qp_type,
+                   struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
+                   struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
  static int is_qp0(enum ib_qp_type qp_type)
  {
        return qp_type == IB_QPT_SMI;
@@@ -609,6 -613,11 +613,11 @@@ static int to_mlx5_st(enum ib_qp_type t
        }
  }
  
+ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
+                            struct mlx5_ib_cq *recv_cq);
+ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
+                              struct mlx5_ib_cq *recv_cq);
  static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
  {
        return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
@@@ -649,6 -658,71 +658,71 @@@ err_umem
        return err;
  }
  
+ static void destroy_user_rq(struct ib_pd *pd, struct mlx5_ib_rwq *rwq)
+ {
+       struct mlx5_ib_ucontext *context;
+       context = to_mucontext(pd->uobject->context);
+       mlx5_ib_db_unmap_user(context, &rwq->db);
+       if (rwq->umem)
+               ib_umem_release(rwq->umem);
+ }
+ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+                         struct mlx5_ib_rwq *rwq,
+                         struct mlx5_ib_create_wq *ucmd)
+ {
+       struct mlx5_ib_ucontext *context;
+       int page_shift = 0;
+       int npages;
+       u32 offset = 0;
+       int ncont = 0;
+       int err;
+       if (!ucmd->buf_addr)
+               return -EINVAL;
+       context = to_mucontext(pd->uobject->context);
+       rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
+                              rwq->buf_size, 0, 0);
+       if (IS_ERR(rwq->umem)) {
+               mlx5_ib_dbg(dev, "umem_get failed\n");
+               err = PTR_ERR(rwq->umem);
+               return err;
+       }
+       mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
+                          &ncont, NULL);
+       err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
+                                    &rwq->rq_page_offset);
+       if (err) {
+               mlx5_ib_warn(dev, "bad offset\n");
+               goto err_umem;
+       }
+       rwq->rq_num_pas = ncont;
+       rwq->page_shift = page_shift;
+       rwq->log_page_size =  page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+       rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
+       mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
+                   (unsigned long long)ucmd->buf_addr, rwq->buf_size,
+                   npages, page_shift, ncont, offset);
+       err = mlx5_ib_db_map_user(context, ucmd->db_addr, &rwq->db);
+       if (err) {
+               mlx5_ib_dbg(dev, "map failed\n");
+               goto err_umem;
+       }
+       rwq->create_type = MLX5_WQ_USER;
+       return 0;
+ err_umem:
+       ib_umem_release(rwq->umem);
+       return err;
+ }
  static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                          struct mlx5_ib_qp *qp, struct ib_udata *udata,
                          struct ib_qp_init_attr *attr,
@@@ -1201,6 -1275,187 +1275,187 @@@ static void raw_packet_qp_copy_info(str
        rq->doorbell = &qp->db;
  }
  
+ static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+ {
+       mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
+ }
+ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+                                struct ib_pd *pd,
+                                struct ib_qp_init_attr *init_attr,
+                                struct ib_udata *udata)
+ {
+       struct ib_uobject *uobj = pd->uobject;
+       struct ib_ucontext *ucontext = uobj->context;
+       struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
+       struct mlx5_ib_create_qp_resp resp = {};
+       int inlen;
+       int err;
+       u32 *in;
+       void *tirc;
+       void *hfso;
+       u32 selected_fields = 0;
+       size_t min_resp_len;
+       u32 tdn = mucontext->tdn;
+       struct mlx5_ib_create_qp_rss ucmd = {};
+       size_t required_cmd_sz;
+       if (init_attr->qp_type != IB_QPT_RAW_PACKET)
+               return -EOPNOTSUPP;
+       if (init_attr->create_flags || init_attr->send_cq)
+               return -EINVAL;
+       min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
+       if (udata->outlen < min_resp_len)
+               return -EINVAL;
+       required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
+       if (udata->inlen < required_cmd_sz) {
+               mlx5_ib_dbg(dev, "invalid inlen\n");
+               return -EINVAL;
+       }
+       if (udata->inlen > sizeof(ucmd) &&
+           !ib_is_udata_cleared(udata, sizeof(ucmd),
+                                udata->inlen - sizeof(ucmd))) {
+               mlx5_ib_dbg(dev, "inlen is not supported\n");
+               return -EOPNOTSUPP;
+       }
+       if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+               mlx5_ib_dbg(dev, "copy failed\n");
+               return -EFAULT;
+       }
+       if (ucmd.comp_mask) {
+               mlx5_ib_dbg(dev, "invalid comp mask\n");
+               return -EOPNOTSUPP;
+       }
+       if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
+               mlx5_ib_dbg(dev, "invalid reserved\n");
+               return -EOPNOTSUPP;
+       }
+       err = ib_copy_to_udata(udata, &resp, min_resp_len);
+       if (err) {
+               mlx5_ib_dbg(dev, "copy failed\n");
+               return -EINVAL;
+       }
+       inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+       tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+       MLX5_SET(tirc, tirc, disp_type,
+                MLX5_TIRC_DISP_TYPE_INDIRECT);
+       MLX5_SET(tirc, tirc, indirect_table,
+                init_attr->rwq_ind_tbl->ind_tbl_num);
+       MLX5_SET(tirc, tirc, transport_domain, tdn);
+       hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+       switch (ucmd.rx_hash_function) {
+       case MLX5_RX_HASH_FUNC_TOEPLITZ:
+       {
+               void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+               size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+               if (len != ucmd.rx_key_len) {
+                       err = -EINVAL;
+                       goto err;
+               }
+               MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+               memcpy(rss_key, ucmd.rx_hash_key, len);
+               break;
+       }
+       default:
+               err = -EOPNOTSUPP;
+               goto err;
+       }
+       if (!ucmd.rx_hash_fields_mask) {
+               /* special case when this TIR serves as steering entry without hashing */
+               if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
+                       goto create_tir;
+               err = -EINVAL;
+               goto err;
+       }
+       if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+            (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+            ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+            (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+               err = -EINVAL;
+               goto err;
+       }
+       /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
+       if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+           (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+       else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+                (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+       if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+            (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) &&
+            ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+            (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))) {
+               err = -EINVAL;
+               goto err;
+       }
+       /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
+       if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+           (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+       else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+                (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+       if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+           (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+               selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
+       if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+           (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+               selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
+       if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+           (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+               selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
+       if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+           (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+               selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
+       MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
+ create_tir:
+       err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+       if (err)
+               goto err;
+       kvfree(in);
+       /* qpn is reserved for that QP */
+       qp->trans_qp.base.mqp.qpn = 0;
+       return 0;
+ err:
+       kvfree(in);
+       return err;
+ }
  static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                            struct ib_qp_init_attr *init_attr,
                            struct ib_udata *udata, struct mlx5_ib_qp *qp)
        struct mlx5_ib_create_qp_resp resp;
        struct mlx5_create_qp_mbox_in *in;
        struct mlx5_ib_create_qp ucmd;
+       struct mlx5_ib_cq *send_cq;
+       struct mlx5_ib_cq *recv_cq;
+       unsigned long flags;
        int inlen = sizeof(*in);
        int err;
        u32 uidx = MLX5_IB_DEFAULT_UIDX;
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
  
+       if (init_attr->rwq_ind_tbl) {
+               if (!udata)
+                       return -ENOSYS;
+               err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
+               return err;
+       }
        if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
                if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
                        mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
        base->container_mibqp = qp;
        base->mqp.event = mlx5_ib_qp_event;
  
+       get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
+               &send_cq, &recv_cq);
+       spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+       mlx5_ib_lock_cqs(send_cq, recv_cq);
+       /* Maintain device to QPs access, needed for further handling via reset
+        * flow
+        */
+       list_add_tail(&qp->qps_list, &dev->qp_list);
+       /* Maintain CQ to QPs access, needed for further handling via reset flow
+        */
+       if (send_cq)
+               list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+       if (recv_cq)
+               list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+       mlx5_ib_unlock_cqs(send_cq, recv_cq);
+       spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
        return 0;
  
  err_create:
@@@ -1478,23 -1761,23 +1761,23 @@@ static void mlx5_ib_lock_cqs(struct mlx
        if (send_cq) {
                if (recv_cq) {
                        if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
-                               spin_lock_irq(&send_cq->lock);
+                               spin_lock(&send_cq->lock);
                                spin_lock_nested(&recv_cq->lock,
                                                 SINGLE_DEPTH_NESTING);
                        } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
-                               spin_lock_irq(&send_cq->lock);
+                               spin_lock(&send_cq->lock);
                                __acquire(&recv_cq->lock);
                        } else {
-                               spin_lock_irq(&recv_cq->lock);
+                               spin_lock(&recv_cq->lock);
                                spin_lock_nested(&send_cq->lock,
                                                 SINGLE_DEPTH_NESTING);
                        }
                } else {
-                       spin_lock_irq(&send_cq->lock);
+                       spin_lock(&send_cq->lock);
                        __acquire(&recv_cq->lock);
                }
        } else if (recv_cq) {
-               spin_lock_irq(&recv_cq->lock);
+               spin_lock(&recv_cq->lock);
                __acquire(&send_cq->lock);
        } else {
                __acquire(&send_cq->lock);
@@@ -1509,21 -1792,21 +1792,21 @@@ static void mlx5_ib_unlock_cqs(struct m
                if (recv_cq) {
                        if (send_cq->mcq.cqn < recv_cq->mcq.cqn)  {
                                spin_unlock(&recv_cq->lock);
-                               spin_unlock_irq(&send_cq->lock);
+                               spin_unlock(&send_cq->lock);
                        } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
                                __release(&recv_cq->lock);
-                               spin_unlock_irq(&send_cq->lock);
+                               spin_unlock(&send_cq->lock);
                        } else {
                                spin_unlock(&send_cq->lock);
-                               spin_unlock_irq(&recv_cq->lock);
+                               spin_unlock(&recv_cq->lock);
                        }
                } else {
                        __release(&recv_cq->lock);
-                       spin_unlock_irq(&send_cq->lock);
+                       spin_unlock(&send_cq->lock);
                }
        } else if (recv_cq) {
                __release(&send_cq->lock);
-               spin_unlock_irq(&recv_cq->lock);
+               spin_unlock(&recv_cq->lock);
        } else {
                __release(&recv_cq->lock);
                __release(&send_cq->lock);
@@@ -1535,17 -1818,18 +1818,18 @@@ static struct mlx5_ib_pd *get_pd(struc
        return to_mpd(qp->ibqp.pd);
  }
  
- static void get_cqs(struct mlx5_ib_qp *qp,
+ static void get_cqs(enum ib_qp_type qp_type,
+                   struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
                    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
  {
-       switch (qp->ibqp.qp_type) {
+       switch (qp_type) {
        case IB_QPT_XRC_TGT:
                *send_cq = NULL;
                *recv_cq = NULL;
                break;
        case MLX5_IB_QPT_REG_UMR:
        case IB_QPT_XRC_INI:
-               *send_cq = to_mcq(qp->ibqp.send_cq);
+               *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
                *recv_cq = NULL;
                break;
  
        case IB_QPT_RAW_IPV6:
        case IB_QPT_RAW_ETHERTYPE:
        case IB_QPT_RAW_PACKET:
-               *send_cq = to_mcq(qp->ibqp.send_cq);
-               *recv_cq = to_mcq(qp->ibqp.recv_cq);
+               *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+               *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
                break;
  
        case IB_QPT_MAX:
@@@ -1577,8 -1861,14 +1861,14 @@@ static void destroy_qp_common(struct ml
        struct mlx5_ib_cq *send_cq, *recv_cq;
        struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
        struct mlx5_modify_qp_mbox_in *in;
+       unsigned long flags;
        int err;
  
+       if (qp->ibqp.rwq_ind_tbl) {
+               destroy_rss_raw_qp_tir(dev, qp);
+               return;
+       }
        base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
               &qp->raw_packet_qp.rq.base :
               &qp->trans_qp.base;
                                     base->mqp.qpn);
        }
  
-       get_cqs(qp, &send_cq, &recv_cq);
+       get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+               &send_cq, &recv_cq);
+       spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+       mlx5_ib_lock_cqs(send_cq, recv_cq);
+       /* del from lists under both locks above to protect reset flow paths */
+       list_del(&qp->qps_list);
+       if (send_cq)
+               list_del(&qp->cq_send_list);
+       if (recv_cq)
+               list_del(&qp->cq_recv_list);
  
        if (qp->create_type == MLX5_QP_KERNEL) {
-               mlx5_ib_lock_cqs(send_cq, recv_cq);
                __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
                                   qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
                if (send_cq != recv_cq)
                        __mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
                                           NULL);
-               mlx5_ib_unlock_cqs(send_cq, recv_cq);
        }
+       mlx5_ib_unlock_cqs(send_cq, recv_cq);
+       spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
  
        if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
                destroy_raw_packet_qp(dev, qp);
@@@ -2300,7 -2601,8 +2601,8 @@@ static int __mlx5_ib_modify_qp(struct i
        }
  
        pd = get_pd(qp);
-       get_cqs(qp, &send_cq, &recv_cq);
+       get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+               &send_cq, &recv_cq);
  
        context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
        context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
        else
                sqd_event = 0;
  
+       if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+               u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+                              qp->port) - 1;
+               struct mlx5_ib_port *mibport = &dev->port[port_num];
+               context->qp_counter_set_usr_page |=
+                       cpu_to_be32((u32)(mibport->q_cnt_id) << 24);
+       }
        if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
                context->sq_crq_size |= cpu_to_be16(1 << 4);
  
@@@ -2439,6 -2750,9 +2750,9 @@@ int mlx5_ib_modify_qp(struct ib_qp *ibq
        int port;
        enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
  
+       if (ibqp->rwq_ind_tbl)
+               return -ENOSYS;
        if (unlikely(ibqp->qp_type == IB_QPT_GSI))
                return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
  
@@@ -3332,11 -3646,10 +3646,11 @@@ static u8 get_fence(u8 fence, struct ib
                        return MLX5_FENCE_MODE_SMALL_AND_FENCE;
                else
                        return fence;
 -
 -      } else {
 -              return 0;
 +      } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
 +              return MLX5_FENCE_MODE_FENCE;
        }
 +
 +      return 0;
  }
  
  static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
@@@ -3397,6 -3710,7 +3711,7 @@@ int mlx5_ib_post_send(struct ib_qp *ibq
  {
        struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+       struct mlx5_core_dev *mdev = dev->mdev;
        struct mlx5_ib_qp *qp;
        struct mlx5_ib_mr *mr;
        struct mlx5_wqe_data_seg *dpseg;
  
        spin_lock_irqsave(&qp->sq.lock, flags);
  
+       if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               err = -EIO;
+               *bad_wr = wr;
+               nreq = 0;
+               goto out;
+       }
        for (nreq = 0; wr; nreq++, wr = wr->next) {
                if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
                        mlx5_ib_warn(dev, "\n");
@@@ -3725,6 -4046,8 +4047,8 @@@ int mlx5_ib_post_recv(struct ib_qp *ibq
        struct mlx5_ib_qp *qp = to_mqp(ibqp);
        struct mlx5_wqe_data_seg *scat;
        struct mlx5_rwqe_sig *sig;
+       struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+       struct mlx5_core_dev *mdev = dev->mdev;
        unsigned long flags;
        int err = 0;
        int nreq;
  
        spin_lock_irqsave(&qp->rq.lock, flags);
  
+       if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               err = -EIO;
+               *bad_wr = wr;
+               nreq = 0;
+               goto out;
+       }
        ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
  
        for (nreq = 0; wr; nreq++, wr = wr->next) {
@@@ -4055,6 -4385,9 +4386,9 @@@ int mlx5_ib_query_qp(struct ib_qp *ibqp
        int err = 0;
        u8 raw_packet_qp_state;
  
+       if (ibqp->rwq_ind_tbl)
+               return -ENOSYS;
        if (unlikely(ibqp->qp_type == IB_QPT_GSI))
                return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
                                            qp_init_attr);
@@@ -4164,3 -4497,322 +4498,322 @@@ int mlx5_ib_dealloc_xrcd(struct ib_xrc
  
        return 0;
  }
+ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
+                     struct ib_wq_init_attr *init_attr)
+ {
+       struct mlx5_ib_dev *dev;
+       __be64 *rq_pas0;
+       void *in;
+       void *rqc;
+       void *wq;
+       int inlen;
+       int err;
+       dev = to_mdev(pd->device);
+       inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+       rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+       MLX5_SET(rqc,  rqc, mem_rq_type,
+                MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+       MLX5_SET(rqc, rqc, user_index, rwq->user_index);
+       MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
+       MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
+       MLX5_SET(rqc,  rqc, flush_in_error_en, 1);
+       wq = MLX5_ADDR_OF(rqc, rqc, wq);
+       MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+       MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+       MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+       MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
+       MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
+       MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
+       MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
+       MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
+       MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
+       rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+       mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
+       err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn);
+       kvfree(in);
+       return err;
+ }
+ static int set_user_rq_size(struct mlx5_ib_dev *dev,
+                           struct ib_wq_init_attr *wq_init_attr,
+                           struct mlx5_ib_create_wq *ucmd,
+                           struct mlx5_ib_rwq *rwq)
+ {
+       /* Sanity check RQ size before proceeding */
+       if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
+               return -EINVAL;
+       if (!ucmd->rq_wqe_count)
+               return -EINVAL;
+       rwq->wqe_count = ucmd->rq_wqe_count;
+       rwq->wqe_shift = ucmd->rq_wqe_shift;
+       rwq->buf_size = (rwq->wqe_count << rwq->wqe_shift);
+       rwq->log_rq_stride = rwq->wqe_shift;
+       rwq->log_rq_size = ilog2(rwq->wqe_count);
+       return 0;
+ }
+ static int prepare_user_rq(struct ib_pd *pd,
+                          struct ib_wq_init_attr *init_attr,
+                          struct ib_udata *udata,
+                          struct mlx5_ib_rwq *rwq)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct mlx5_ib_create_wq ucmd = {};
+       int err;
+       size_t required_cmd_sz;
+       required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+       if (udata->inlen < required_cmd_sz) {
+               mlx5_ib_dbg(dev, "invalid inlen\n");
+               return -EINVAL;
+       }
+       if (udata->inlen > sizeof(ucmd) &&
+           !ib_is_udata_cleared(udata, sizeof(ucmd),
+                                udata->inlen - sizeof(ucmd))) {
+               mlx5_ib_dbg(dev, "inlen is not supported\n");
+               return -EOPNOTSUPP;
+       }
+       if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+               mlx5_ib_dbg(dev, "copy failed\n");
+               return -EFAULT;
+       }
+       if (ucmd.comp_mask) {
+               mlx5_ib_dbg(dev, "invalid comp mask\n");
+               return -EOPNOTSUPP;
+       }
+       if (ucmd.reserved) {
+               mlx5_ib_dbg(dev, "invalid reserved\n");
+               return -EOPNOTSUPP;
+       }
+       err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
+       if (err) {
+               mlx5_ib_dbg(dev, "err %d\n", err);
+               return err;
+       }
+       err = create_user_rq(dev, pd, rwq, &ucmd);
+       if (err) {
+               mlx5_ib_dbg(dev, "err %d\n", err);
+               if (err)
+                       return err;
+       }
+       rwq->user_index = ucmd.user_index;
+       return 0;
+ }
+ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+                               struct ib_wq_init_attr *init_attr,
+                               struct ib_udata *udata)
+ {
+       struct mlx5_ib_dev *dev;
+       struct mlx5_ib_rwq *rwq;
+       struct mlx5_ib_create_wq_resp resp = {};
+       size_t min_resp_len;
+       int err;
+       if (!udata)
+               return ERR_PTR(-ENOSYS);
+       min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+       if (udata->outlen && udata->outlen < min_resp_len)
+               return ERR_PTR(-EINVAL);
+       dev = to_mdev(pd->device);
+       switch (init_attr->wq_type) {
+       case IB_WQT_RQ:
+               rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
+               if (!rwq)
+                       return ERR_PTR(-ENOMEM);
+               err = prepare_user_rq(pd, init_attr, udata, rwq);
+               if (err)
+                       goto err;
+               err = create_rq(rwq, pd, init_attr);
+               if (err)
+                       goto err_user_rq;
+               break;
+       default:
+               mlx5_ib_dbg(dev, "unsupported wq type %d\n",
+                           init_attr->wq_type);
+               return ERR_PTR(-EINVAL);
+       }
+       rwq->ibwq.wq_num = rwq->rqn;
+       rwq->ibwq.state = IB_WQS_RESET;
+       if (udata->outlen) {
+               resp.response_length = offsetof(typeof(resp), response_length) +
+                               sizeof(resp.response_length);
+               err = ib_copy_to_udata(udata, &resp, resp.response_length);
+               if (err)
+                       goto err_copy;
+       }
+       return &rwq->ibwq;
+ err_copy:
+       mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+ err_user_rq:
+       destroy_user_rq(pd, rwq);
+ err:
+       kfree(rwq);
+       return ERR_PTR(err);
+ }
+ int mlx5_ib_destroy_wq(struct ib_wq *wq)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(wq->device);
+       struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+       mlx5_core_destroy_rq(dev->mdev, rwq->rqn);
+       destroy_user_rq(wq->pd, rwq);
+       kfree(rwq);
+       return 0;
+ }
+ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
+                                                     struct ib_rwq_ind_table_init_attr *init_attr,
+                                                     struct ib_udata *udata)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(device);
+       struct mlx5_ib_rwq_ind_table *rwq_ind_tbl;
+       int sz = 1 << init_attr->log_ind_tbl_size;
+       struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
+       size_t min_resp_len;
+       int inlen;
+       int err;
+       int i;
+       u32 *in;
+       void *rqtc;
+       if (udata->inlen > 0 &&
+           !ib_is_udata_cleared(udata, 0,
+                                udata->inlen))
+               return ERR_PTR(-EOPNOTSUPP);
+       min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
+       if (udata->outlen && udata->outlen < min_resp_len)
+               return ERR_PTR(-EINVAL);
+       rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL);
+       if (!rwq_ind_tbl)
+               return ERR_PTR(-ENOMEM);
+       inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               err = -ENOMEM;
+               goto err;
+       }
+       rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+       MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+       MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+       for (i = 0; i < sz; i++)
+               MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
+       err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
+       kvfree(in);
+       if (err)
+               goto err;
+       rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
+       if (udata->outlen) {
+               resp.response_length = offsetof(typeof(resp), response_length) +
+                                       sizeof(resp.response_length);
+               err = ib_copy_to_udata(udata, &resp, resp.response_length);
+               if (err)
+                       goto err_copy;
+       }
+       return &rwq_ind_tbl->ib_rwq_ind_tbl;
+ err_copy:
+       mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+ err:
+       kfree(rwq_ind_tbl);
+       return ERR_PTR(err);
+ }
+ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+ {
+       struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
+       struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
+       mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+       kfree(rwq_ind_tbl);
+       return 0;
+ }
+ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+                     u32 wq_attr_mask, struct ib_udata *udata)
+ {
+       struct mlx5_ib_dev *dev = to_mdev(wq->device);
+       struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+       struct mlx5_ib_modify_wq ucmd = {};
+       size_t required_cmd_sz;
+       int curr_wq_state;
+       int wq_state;
+       int inlen;
+       int err;
+       void *rqc;
+       void *in;
+       required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved);
+       if (udata->inlen < required_cmd_sz)
+               return -EINVAL;
+       if (udata->inlen > sizeof(ucmd) &&
+           !ib_is_udata_cleared(udata, sizeof(ucmd),
+                                udata->inlen - sizeof(ucmd)))
+               return -EOPNOTSUPP;
+       if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
+               return -EFAULT;
+       if (ucmd.comp_mask || ucmd.reserved)
+               return -EOPNOTSUPP;
+       inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+       rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+       curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ?
+               wq_attr->curr_wq_state : wq->state;
+       wq_state = (wq_attr_mask & IB_WQ_STATE) ?
+               wq_attr->wq_state : curr_wq_state;
+       if (curr_wq_state == IB_WQS_ERR)
+               curr_wq_state = MLX5_RQC_STATE_ERR;
+       if (wq_state == IB_WQS_ERR)
+               wq_state = MLX5_RQC_STATE_ERR;
+       MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+       MLX5_SET(rqc, rqc, state, wq_state);
+       err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen);
+       kvfree(in);
+       if (!err)
+               rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
+       return err;
+ }
index f4497cf4d06dcfda02c3ea565f82a4280efbe14c,0a525604659db6e8431e0affeaafdb1d12997ab6..d728704d0c7b523c6dc90d35c10390d122439222
@@@ -721,6 -721,7 +721,7 @@@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev 
  #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET                0x98
  #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET               0xa0
  #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET               0x9c
+ #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT      0x9c
  #define QUERY_DEV_CAP_FW_REASSIGN_MAC         0x9d
  #define QUERY_DEV_CAP_VXLAN                   0x9e
  #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET                0xb0
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
        if (field32 & (1 << 7))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
+       MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
+       if (field32 & (1 << 17))
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
        if (field & 1<<6)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@@ -1128,7 -1132,6 +1132,7 @@@ int mlx4_QUERY_PORT(struct mlx4_dev *de
                port_cap->max_pkeys        = 1 << (field & 0xf);
                MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
                port_cap->max_vl           = field & 0xf;
 +              port_cap->max_tc_eth       = field >> 4;
                MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
                port_cap->log_max_macs  = field & 0xf;
                port_cap->log_max_vlans = field >> 4;
@@@ -2457,6 -2460,42 +2461,42 @@@ int mlx4_NOP(struct mlx4_dev *dev
                        MLX4_CMD_NATIVE);
  }
  
+ int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
+                            const u32 offset[],
+                            u32 value[], size_t array_len, u8 port)
+ {
+       struct mlx4_cmd_mailbox *mailbox;
+       u32 *outbox;
+       size_t i;
+       int ret;
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       outbox = mailbox->buf;
+       ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
+                          MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
+                          MLX4_CMD_NATIVE);
+       if (ret)
+               goto out;
+       for (i = 0; i < array_len; i++) {
+               if (offset[i] > MLX4_MAILBOX_SIZE) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               MLX4_GET(value[i], outbox, offset[i]);
+       }
+ out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return ret;
+ }
+ EXPORT_SYMBOL(mlx4_query_diag_counters);
  int mlx4_get_phys_port_id(struct mlx4_dev *dev)
  {
        u8 port;
index e6f6910278f30a3fa734da4308175574eb1f0cac,d73d8e4d3e09f8e4b5844fce1bd7cb731f14b7bb..42da3552f7cbe00f00c9c01ab49651da421aa80a
@@@ -220,6 -220,7 +220,7 @@@ enum 
        MLX4_DEV_CAP_FLAG2_LB_SRC_CHK           = 1ULL << 32,
        MLX4_DEV_CAP_FLAG2_ROCE_V1_V2           = 1ULL <<  33,
        MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER   = 1ULL <<  34,
+       MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT        = 1ULL <<  35,
  };
  
  enum {
@@@ -466,7 -467,6 +467,7 @@@ enum 
  enum {
        MLX4_INTERFACE_STATE_UP         = 1 << 0,
        MLX4_INTERFACE_STATE_DELETION   = 1 << 1,
 +      MLX4_INTERFACE_STATE_SHUTDOWN   = 1 << 2,
  };
  
  #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
@@@ -536,7 -536,6 +537,7 @@@ struct mlx4_caps 
        int                     max_rq_desc_sz;
        int                     max_qp_init_rdma;
        int                     max_qp_dest_rdma;
 +      int                     max_tc_eth;
        u32                     *qp0_qkey;
        u32                     *qp0_proxy;
        u32                     *qp1_proxy;
@@@ -1342,6 -1341,9 +1343,9 @@@ enum 
        VXLAN_STEER_BY_INNER_VLAN       = 1 << 4,
  };
  
+ enum {
+       MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS = 0x2,
+ };
  
  int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
                                enum mlx4_net_trans_promisc_mode mode);
@@@ -1382,6 -1384,9 +1386,9 @@@ void mlx4_fmr_unmap(struct mlx4_dev *de
  int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
  int mlx4_SYNC_TPT(struct mlx4_dev *dev);
  int mlx4_test_interrupts(struct mlx4_dev *dev);
+ int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
+                            const u32 offset[], u32 value[],
+                            size_t array_len, u8 port);
  u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
  bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
  struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
@@@ -1496,7 -1501,6 +1503,7 @@@ int mlx4_mr_rereg_mem_write(struct mlx4
  
  int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
                         u16 offset, u16 size, u8 *data);
 +int mlx4_max_tc(struct mlx4_dev *dev);
  
  /* Returns true if running in low memory profile (kdump kernel) */
  static inline bool mlx4_low_memory_profile(void)
index a041b99fceac5bf9d5c70fb637146ec60b3a8eac,ba933335772cfa19a13fececa254c98ec8173323..ccea6fb1648287fa25f9675df045eec867295528
@@@ -46,6 -46,7 +46,7 @@@
  
  #include <linux/mlx5/device.h>
  #include <linux/mlx5/doorbell.h>
+ #include <linux/mlx5/srq.h>
  
  enum {
        MLX5_RQ_BITMASK_VSD = 1 << 1,
@@@ -469,7 -470,7 +470,7 @@@ struct mlx5_irq_info 
  };
  
  struct mlx5_fc_stats {
 -      struct list_head list;
 +      struct rb_root counters;
        struct list_head addlist;
        /* protect addlist add/splice operations */
        spinlock_t addlist_lock;
  
  struct mlx5_eswitch;
  
 +struct mlx5_rl_entry {
 +      u32                     rate;
 +      u16                     index;
 +      u16                     refcount;
 +};
 +
 +struct mlx5_rl_table {
 +      /* protect rate limit table */
 +      struct mutex            rl_lock;
 +      u16                     max_size;
 +      u32                     max_rate;
 +      u32                     min_rate;
 +      struct mlx5_rl_entry   *rl_entry;
 +};
 +
  struct mlx5_priv {
        char                    name[MLX5_MAX_NAME_LEN];
        struct mlx5_eq_table    eq_table;
        struct list_head        ctx_list;
        spinlock_t              ctx_lock;
  
 +      struct mlx5_flow_steering *steering;
        struct mlx5_eswitch     *eswitch;
        struct mlx5_core_sriov  sriov;
        unsigned long           pci_dev_data;
 -      struct mlx5_flow_root_namespace *root_ns;
 -      struct mlx5_flow_root_namespace *fdb_root_ns;
 -      struct mlx5_flow_root_namespace *esw_egress_root_ns;
 -      struct mlx5_flow_root_namespace *esw_ingress_root_ns;
 -
        struct mlx5_fc_stats            fc_stats;
 +      struct mlx5_rl_table            rl_table;
  };
  
  enum mlx5_device_state {
@@@ -574,18 -563,6 +575,18 @@@ enum mlx5_pci_status 
        MLX5_PCI_STATUS_ENABLED,
  };
  
 +struct mlx5_td {
 +      struct list_head tirs_list;
 +      u32              tdn;
 +};
 +
 +struct mlx5e_resources {
 +      struct mlx5_uar            cq_uar;
 +      u32                        pdn;
 +      struct mlx5_td             td;
 +      struct mlx5_core_mkey      mkey;
 +};
 +
  struct mlx5_core_dev {
        struct pci_dev         *pdev;
        /* sync pci state */
        struct mlx5_profile     *profile;
        atomic_t                num_qps;
        u32                     issi;
 +      struct mlx5e_resources  mlx5e_res;
  #ifdef CONFIG_RFS_ACCEL
        struct cpu_rmap         *rmap;
  #endif
@@@ -654,7 -630,6 +655,7 @@@ struct mlx5_cmd_work_ent 
        void                   *uout;
        int                     uout_size;
        mlx5_cmd_cbk_t          callback;
 +      struct delayed_work     cb_timeout_work;
        void                   *context;
        int                     idx;
        struct completion       done;
@@@ -798,11 -773,10 +799,10 @@@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd
  void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
                                 struct mlx5_cmd_mailbox *head);
  int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-                        struct mlx5_create_srq_mbox_in *in, int inlen,
-                        int is_xrc);
+                        struct mlx5_srq_attr *in);
  int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
  int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-                       struct mlx5_query_srq_mbox_out *out);
+                       struct mlx5_srq_attr *out);
  int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
                      u16 lwm, int is_srq);
  void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
@@@ -887,12 -861,6 +887,12 @@@ int mlx5_query_odp_caps(struct mlx5_cor
  int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
                             u8 port_num, void *out, size_t sz);
  
 +int mlx5_init_rl_table(struct mlx5_core_dev *dev);
 +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
 +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
 +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
 +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
 +
  static inline int fw_initializing(struct mlx5_core_dev *dev)
  {
        return ioread32be(&dev->iseg->initializing) >> 31;
@@@ -970,11 -938,6 +970,11 @@@ static inline int mlx5_get_gid_table_le
        return 8 * (1 << param);
  }
  
 +static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
 +{
 +      return !!(dev->priv.rl_table.max_size);
 +}
 +
  enum {
        MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
  };
diff --combined include/linux/mlx5/qp.h
index ab310819ac3605a426c35ae273d4a66926794c48,acb28c989deeab587ed1ee6e169b274e19950caf..7879bf41189105c816e6deec5bfdc4218d5e4c86
@@@ -172,7 -172,6 +172,7 @@@ enum 
  enum {
        MLX5_FENCE_MODE_NONE                    = 0 << 5,
        MLX5_FENCE_MODE_INITIATOR_SMALL         = 1 << 5,
 +      MLX5_FENCE_MODE_FENCE                   = 2 << 5,
        MLX5_FENCE_MODE_STRONG_ORDERING         = 3 << 5,
        MLX5_FENCE_MODE_SMALL_AND_FENCE         = 4 << 5,
  };
@@@ -556,9 -555,9 +556,9 @@@ struct mlx5_destroy_qp_mbox_out 
  struct mlx5_modify_qp_mbox_in {
        struct mlx5_inbox_hdr   hdr;
        __be32                  qpn;
-       u8                      rsvd1[4];
-       __be32                  optparam;
        u8                      rsvd0[4];
+       __be32                  optparam;
+       u8                      rsvd1[4];
        struct mlx5_qp_context  ctx;
        u8                      rsvd2[16];
  };
diff --combined include/rdma/ib_verbs.h
index a8137dcf5a0072f10468c326a2581674b2e16369,156673a807043c0b14609a67ee74abcde1449543..94a0bc5b5bdd47b16678148a04690793321b85c2
@@@ -562,6 -562,7 +562,7 @@@ enum ib_event_type 
        IB_EVENT_QP_LAST_WQE_REACHED,
        IB_EVENT_CLIENT_REREGISTER,
        IB_EVENT_GID_CHANGE,
+       IB_EVENT_WQ_FATAL,
  };
  
  const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
@@@ -572,6 -573,7 +573,7 @@@ struct ib_event 
                struct ib_cq    *cq;
                struct ib_qp    *qp;
                struct ib_srq   *srq;
+               struct ib_wq    *wq;
                u8              port_num;
        } element;
        enum ib_event_type      event;
@@@ -1015,6 -1017,7 +1017,7 @@@ struct ib_qp_init_attr 
         * Only needed for special QP types, or when using the RW API.
         */
        u8                      port_num;
+       struct ib_rwq_ind_table *rwq_ind_tbl;
  };
  
  struct ib_qp_open_attr {
@@@ -1323,6 -1326,8 +1326,8 @@@ struct ib_ucontext 
        struct list_head        ah_list;
        struct list_head        xrcd_list;
        struct list_head        rule_list;
+       struct list_head        wq_list;
+       struct list_head        rwq_ind_tbl_list;
        int                     closing;
  
        struct pid             *tgid;
@@@ -1428,6 -1433,63 +1433,63 @@@ struct ib_srq 
        } ext;
  };
  
+ enum ib_wq_type {
+       IB_WQT_RQ
+ };
+ enum ib_wq_state {
+       IB_WQS_RESET,
+       IB_WQS_RDY,
+       IB_WQS_ERR
+ };
+ struct ib_wq {
+       struct ib_device       *device;
+       struct ib_uobject      *uobject;
+       void                *wq_context;
+       void                (*event_handler)(struct ib_event *, void *);
+       struct ib_pd           *pd;
+       struct ib_cq           *cq;
+       u32             wq_num;
+       enum ib_wq_state       state;
+       enum ib_wq_type wq_type;
+       atomic_t                usecnt;
+ };
+ struct ib_wq_init_attr {
+       void                   *wq_context;
+       enum ib_wq_type wq_type;
+       u32             max_wr;
+       u32             max_sge;
+       struct  ib_cq          *cq;
+       void                (*event_handler)(struct ib_event *, void *);
+ };
+ enum ib_wq_attr_mask {
+       IB_WQ_STATE     = 1 << 0,
+       IB_WQ_CUR_STATE = 1 << 1,
+ };
+ struct ib_wq_attr {
+       enum    ib_wq_state     wq_state;
+       enum    ib_wq_state     curr_wq_state;
+ };
+ struct ib_rwq_ind_table {
+       struct ib_device        *device;
+       struct ib_uobject      *uobject;
+       atomic_t                usecnt;
+       u32             ind_tbl_num;
+       u32             log_ind_tbl_size;
+       struct ib_wq    **ind_tbl;
+ };
+ struct ib_rwq_ind_table_init_attr {
+       u32             log_ind_tbl_size;
+       /* Each entry is a pointer to Receive Work Queue */
+       struct ib_wq    **ind_tbl;
+ };
  struct ib_qp {
        struct ib_device       *device;
        struct ib_pd           *pd;
        void                   *qp_context;
        u32                     qp_num;
        enum ib_qp_type         qp_type;
+       struct ib_rwq_ind_table *rwq_ind_tbl;
  };
  
  struct ib_mr {
@@@ -1506,6 -1569,7 +1569,7 @@@ enum ib_flow_spec_type 
        IB_FLOW_SPEC_IB         = 0x22,
        /* L3 header*/
        IB_FLOW_SPEC_IPV4       = 0x30,
+       IB_FLOW_SPEC_IPV6       = 0x31,
        /* L4 headers*/
        IB_FLOW_SPEC_TCP        = 0x40,
        IB_FLOW_SPEC_UDP        = 0x41
@@@ -1567,6 -1631,18 +1631,18 @@@ struct ib_flow_spec_ipv4 
        struct ib_flow_ipv4_filter mask;
  };
  
+ struct ib_flow_ipv6_filter {
+       u8      src_ip[16];
+       u8      dst_ip[16];
+ };
+ struct ib_flow_spec_ipv6 {
+       enum ib_flow_spec_type     type;
+       u16                        size;
+       struct ib_flow_ipv6_filter val;
+       struct ib_flow_ipv6_filter mask;
+ };
  struct ib_flow_tcp_udp_filter {
        __be16  dst_port;
        __be16  src_port;
@@@ -1588,6 -1664,7 +1664,7 @@@ union ib_flow_spec 
        struct ib_flow_spec_ib          ib;
        struct ib_flow_spec_ipv4        ipv4;
        struct ib_flow_spec_tcp_udp     tcp_udp;
+       struct ib_flow_spec_ipv6        ipv6;
  };
  
  struct ib_flow_attr {
@@@ -1921,7 -1998,18 +1998,18 @@@ struct ib_device 
                                                   struct ifla_vf_stats *stats);
        int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
                                                  int type);
+       struct ib_wq *             (*create_wq)(struct ib_pd *pd,
+                                               struct ib_wq_init_attr *init_attr,
+                                               struct ib_udata *udata);
+       int                        (*destroy_wq)(struct ib_wq *wq);
+       int                        (*modify_wq)(struct ib_wq *wq,
+                                               struct ib_wq_attr *attr,
+                                               u32 wq_attr_mask,
+                                               struct ib_udata *udata);
+       struct ib_rwq_ind_table *  (*create_rwq_ind_table)(struct ib_device *device,
+                                                          struct ib_rwq_ind_table_init_attr *init_attr,
+                                                          struct ib_udata *udata);
+       int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
        struct ib_dma_mapping_ops   *dma_ops;
  
        struct module               *owner;
         * in fast paths.
         */
        int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
+       void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len);
  };
  
  struct ib_client {
  struct ib_device *ib_alloc_device(size_t size);
  void ib_dealloc_device(struct ib_device *device);
  
+ void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len);
  int ib_register_device(struct ib_device *device,
                       int (*port_callback)(struct ib_device *,
                                            u8, struct kobject *));
@@@ -2819,19 -2910,19 +2910,19 @@@ static inline void ib_dma_unmap_single(
  static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
                                          void *cpu_addr, size_t size,
                                          enum dma_data_direction direction,
 -                                        struct dma_attrs *attrs)
 +                                        unsigned long dma_attrs)
  {
        return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
 -                                  direction, attrs);
 +                                  direction, dma_attrs);
  }
  
  static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
                                             u64 addr, size_t size,
                                             enum dma_data_direction direction,
 -                                           struct dma_attrs *attrs)
 +                                           unsigned long dma_attrs)
  {
        return dma_unmap_single_attrs(dev->dma_device, addr, size,
 -                                    direction, attrs);
 +                                    direction, dma_attrs);
  }
  
  /**
@@@ -2906,18 -2997,17 +2997,18 @@@ static inline void ib_dma_unmap_sg(stru
  static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
                                      struct scatterlist *sg, int nents,
                                      enum dma_data_direction direction,
 -                                    struct dma_attrs *attrs)
 +                                    unsigned long dma_attrs)
  {
 -      return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
 +      return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
 +                              dma_attrs);
  }
  
  static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
                                         struct scatterlist *sg, int nents,
                                         enum dma_data_direction direction,
 -                                       struct dma_attrs *attrs)
 +                                       unsigned long dma_attrs)
  {
 -      dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
 +      dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
  }
  /**
   * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@@ -3168,6 -3258,15 +3259,15 @@@ int ib_check_mr_status(struct ib_mr *mr
  struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port,
                                            u16 pkey, const union ib_gid *gid,
                                            const struct sockaddr *addr);
+ struct ib_wq *ib_create_wq(struct ib_pd *pd,
+                          struct ib_wq_init_attr *init_attr);
+ int ib_destroy_wq(struct ib_wq *wq);
+ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
+                u32 wq_attr_mask);
+ struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
+                                                struct ib_rwq_ind_table_init_attr*
+                                                wq_ind_table_init_attr);
+ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
  
  int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
                 unsigned int *sg_offset, unsigned int page_size);
This page took 0.130894 seconds and 5 git commands to generate.