Merge tag 'arc-4.6-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Apr 2016 16:46:21 +0000 (09:46 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 27 Apr 2016 16:46:21 +0000 (09:46 -0700)
Pull ARC fixes from Vineet Gupta:

 - lockdep now works for ARCv2 builds

 - enable DT reserved-memory binding (for forthcoming HDMI driver)

* tag 'arc-4.6-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: add support for reserved memory defined by device tree
  ARC: support generic per-device coherent dma mem
  Documentation: dt: arc: fix spelling mistakes
  ARCv2: Enable LOCKDEP

248 files changed:
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/net/mediatek-net.txt
Documentation/devicetree/bindings/phy/rockchip-dp-phy.txt
Documentation/devicetree/bindings/phy/rockchip-emmc-phy.txt
Documentation/devicetree/bindings/rtc/s3c-rtc.txt
Documentation/input/event-codes.txt
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/dm814x-clocks.dtsi
arch/arm/boot/dts/dra62x-clocks.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/qcom-msm8974.dtsi
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/r8a7791-porter.dts
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/include/asm/cputype.h
arch/arm/kernel/setup.c
arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
arch/arm/mach-omap2/clockdomains7xx_data.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-shmobile/timer.c
arch/arm/mm/dma-mapping.c
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
arch/arm64/kernel/head.S
arch/arm64/kernel/smp_spin_table.c
arch/nios2/lib/memset.c
arch/powerpc/include/uapi/asm/cputable.h
arch/powerpc/kernel/prom.c
arch/s390/Kconfig
arch/s390/include/asm/pci.h
arch/s390/include/asm/seccomp.h
arch/s390/lib/spinlock.c
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/include/asm/hugetlb.h
arch/x86/kernel/cpu/mshyperv.c
crypto/rsa-pkcs1pad.c
drivers/bcma/main.c
drivers/clocksource/tango_xtal.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
drivers/crypto/ccp/ccp-crypto-sha.c
drivers/crypto/talitos.c
drivers/edac/sb_edac.c
drivers/firmware/psci.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/infiniband/hw/mlx5/main.c
drivers/input/joystick/xpad.c
drivers/input/misc/arizona-haptics.c
drivers/input/misc/pmic8xxx-pwrkey.c
drivers/input/misc/twl4030-vibra.c
drivers/input/misc/twl6040-vibra.c
drivers/input/tablet/gtco.c
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu.c
drivers/irqchip/irq-mips-gic.c
drivers/isdn/mISDN/socket.c
drivers/net/Kconfig
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/macsec.c
drivers/net/phy/spi_ks8995.c
drivers/net/usb/cdc_mbim.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vrf.c
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
drivers/pci/access.c
drivers/pci/host/pci-imx6.c
drivers/pci/pci.h
drivers/perf/arm_pmu.c
drivers/phy/phy-rockchip-dp.c
drivers/phy/phy-rockchip-emmc.c
drivers/pinctrl/freescale/Kconfig
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/pinctrl-single.c
drivers/platform/x86/hp_accel.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel_pmc_ipc.c
drivers/platform/x86/intel_punit_ipc.c
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/rtc/rtc-ds1307.c
drivers/s390/block/dcssblk.c
drivers/s390/block/scm_blk.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/soc/mediatek/mtk-scpsys.c
drivers/thermal/Kconfig
drivers/thermal/mtk_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/power_allocator.c
drivers/thermal/thermal_core.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/uartlite.c
drivers/tty/tty_io.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/video/fbdev/amba-clcd.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
fs/devpts/inode.c
include/asm-generic/futex.h
include/drm/drm_cache.h
include/linux/devpts_fs.h
include/linux/mlx4/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/port.h
include/linux/mlx5/vport.h
include/linux/pci.h
include/linux/rculist_nulls.h
include/linux/thermal.h
include/linux/tty_driver.h
include/net/cls_cgroup.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/route.h
include/net/sctp/structs.h
include/net/sock.h
include/net/switchdev.h
include/net/tcp.h
include/sound/hda_regmap.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/Kbuild
include/uapi/linux/if_macsec.h
kernel/bpf/verifier.c
kernel/cpu.c
kernel/futex.c
kernel/irq/ipi.c
kernel/locking/qspinlock_stat.h
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/bridge/netfilter/ebtables.c
net/core/skbuff.c
net/decnet/dn_route.c
net/ipv4/fib_frontend.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/route.c
net/ipv6/udp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/packet/af_packet.c
net/rds/cong.c
net/rds/ib_cm.c
net/sched/sch_generic.c
net/sctp/outqueue.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/transport.c
net/switchdev/switchdev.c
net/tipc/core.c
net/tipc/core.h
net/tipc/name_distr.c
net/vmw_vsock/vmci_transport.c
net/wireless/nl80211.c
sound/hda/hdac_device.c
sound/hda/hdac_regmap.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/pcxhr/pcxhr_core.c
tools/objtool/Documentation/stack-validation.txt
tools/objtool/builtin-check.c
tools/perf/util/intel-pt.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/reuseport_dualstack.c [new file with mode: 0644]

index ccc62f1453066fc10ba23cc3f3fa037ee9ccac9c..3f0cbbb8395f84ef1bc1cba898854d386f236b53 100644 (file)
@@ -192,7 +192,6 @@ nodes to be present and contain the properties described below.
                          can be one of:
                            "allwinner,sun6i-a31"
                            "allwinner,sun8i-a23"
-                           "arm,psci"
                            "arm,realview-smp"
                            "brcm,bcm-nsp-smp"
                            "brcm,brahma-b15"
index 5ca79290eabf0fc65c1a37c03b10fa60ef9e71c4..32eaaca04d9bf501809b0e5719100463f614d37b 100644 (file)
@@ -9,7 +9,8 @@ have dual GMAC each represented by a child node..
 Required properties:
 - compatible: Should be "mediatek,mt7623-eth"
 - reg: Address and length of the register set for the device
-- interrupts: Should contain the frame engines interrupt
+- interrupts: Should contain the three frame engines interrupts in numeric
+       order. These are fe_int0, fe_int1 and fe_int2.
 - clocks: the clock used by the core
 - clock-names: the names of the clock listed in the clocks property. These are
        "ethif", "esw", "gp2", "gp1"
@@ -42,7 +43,9 @@ eth: ethernet@1b100000 {
                 <&ethsys CLK_ETHSYS_GP2>,
                 <&ethsys CLK_ETHSYS_GP1>;
        clock-names = "ethif", "esw", "gp2", "gp1";
-       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW>;
+       interrupts = <GIC_SPI 200 IRQ_TYPE_LEVEL_LOW
+                     GIC_SPI 199 IRQ_TYPE_LEVEL_LOW
+                     GIC_SPI 198 IRQ_TYPE_LEVEL_LOW>;
        power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
        resets = <&ethsys MT2701_ETHSYS_ETH_RST>;
        reset-names = "eth";
index 50c4f9b00adff971d32cda52189cd29880cf694e..e3b4809fbe82c4798b7e5bef55e666505d6df527 100644 (file)
@@ -8,15 +8,19 @@ Required properties:
        of memory mapped region.
 - clock-names: from common clock binding:
        Required elements: "24m"
-- rockchip,grf: phandle to the syscon managing the "general register files"
 - #phy-cells : from the generic PHY bindings, must be 0;
 
 Example:
 
-edp_phy: edp-phy {
-       compatible = "rockchip,rk3288-dp-phy";
-       rockchip,grf = <&grf>;
-       clocks = <&cru SCLK_EDP_24M>;
-       clock-names = "24m";
-       #phy-cells = <0>;
+grf: syscon@ff770000 {
+       compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
+
+...
+
+       edp_phy: edp-phy {
+               compatible = "rockchip,rk3288-dp-phy";
+               clocks = <&cru SCLK_EDP_24M>;
+               clock-names = "24m";
+               #phy-cells = <0>;
+       };
 };
index 61916f15a949cdae63356e7f4c827b8a9e394841..555cb0f406908a0fc5ebfc995a8df97af8a2dcba 100644 (file)
@@ -3,17 +3,23 @@ Rockchip EMMC PHY
 
 Required properties:
  - compatible: rockchip,rk3399-emmc-phy
- - rockchip,grf : phandle to the syscon managing the "general
-   register files"
  - #phy-cells: must be 0
- - reg: PHY configure reg address offset in "general
+ - reg: PHY register address offset and length in "general
    register files"
 
 Example:
 
-emmcphy: phy {
-       compatible = "rockchip,rk3399-emmc-phy";
-       rockchip,grf = <&grf>;
-       reg = <0xf780>;
-       #phy-cells = <0>;
+
+grf: syscon@ff770000 {
+       compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+...
+
+       emmcphy: phy@f780 {
+               compatible = "rockchip,rk3399-emmc-phy";
+               reg = <0xf780 0x20>;
+               #phy-cells = <0>;
+       };
 };
index 1068ffce9f9125559e301f60f9c0ce6ca40d2374..fdde63a5419cb1942ea1480a761beb963db08479 100644 (file)
@@ -15,9 +15,10 @@ Required properties:
   is the rtc tick interrupt. The number of cells representing a interrupt
   depends on the parent interrupt controller.
 - clocks: Must contain a list of phandle and clock specifier for the rtc
-          and source clocks.
-- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
-               same order as the clocks property.
+          clock and in the case of a s3c6410 compatible controller, also
+          a source clock.
+- clock-names: Must contain "rtc" and for a s3c6410 compatible controller,
+               a "rtc_src" sorted in the same order as the clocks property.
 
 Example:
 
index 3f0f5ce3338b63a8153410c9b08edba739989e22..36ea940e5bb91fe9711544e5d0bbe2e2293c8ca8 100644 (file)
@@ -173,6 +173,10 @@ A few EV_ABS codes have special meanings:
     proximity of the device and while the value of the BTN_TOUCH code is 0. If
     the input device may be used freely in three dimensions, consider ABS_Z
     instead.
+  - BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
+    proximity and set to 0 when the tool leaves detectable proximity.
+    BTN_TOOL_<name> signals the type of tool that is currently detected by the
+    hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
 
 * ABS_MT_<name>:
   - Used to describe multitouch input events. Please see
index c518dce7da4d62da22b192cbc3595ebf62ae3037..5aa738346062887731679d1376747f2e99db93e9 100644 (file)
@@ -19,7 +19,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
 ... unused hole ...
 ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
-ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
+ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
 ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
 
@@ -31,8 +31,8 @@ vmalloc space is lazily synchronized into the different PML4 pages of
 the processes using the page fault handler, with init_level4_pgt as
 reference.
 
-Current X86-64 implementations only support 40 bits of address space,
-but we support up to 46 bits. This expands into MBZ space in the page tables.
+Current X86-64 implementations support up to 46 bits of address space (64 TB),
+which is our current limit. This expands into MBZ space in the page tables.
 
 We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
 memory window (this size is arbitrary, it can be raised later if needed).
index 1d5b4becab6f9a1d3272dc950a16665590e8b6a0..849133608da722c0a92147d1e2ac6d61fd0e2e69 100644 (file)
@@ -11071,6 +11071,15 @@ S:     Maintained
 F:     drivers/clk/ti/
 F:     include/linux/clk/ti.h
 
+TI ETHERNET SWITCH DRIVER (CPSW)
+M:     Mugunthan V N <mugunthanvnm@ti.com>
+R:     Grygorii Strashko <grygorii.strashko@ti.com>
+L:     linux-omap@vger.kernel.org
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/ti/cpsw*
+F:     drivers/net/ethernet/ti/davinci*
+
 TI FLASH MEDIA INTERFACE DRIVER
 M:     Alex Dubov <oakad@yahoo.com>
 S:     Maintained
index 873411873c036622755caf194037d5e6b2ba17bc..9496df81b9a89508e5138058ecf61c3409d76728 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
@@ -1008,7 +1008,8 @@ prepare0: archprepare FORCE
 prepare: prepare0 prepare-objtool
 
 ifdef CONFIG_STACK_VALIDATION
-  has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0)
+  has_libelf := $(call try-run,\
+               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
index 55ca9c7dcf6aacf6f7a6702a266dc8725be3a4c7..0467846b4cc33cd0e5bf22c1973407fdc38248a9 100644 (file)
                        ti,no-idle-on-init;
                        reg = <0x50000000 0x2000>;
                        interrupts = <100>;
-                       dmas = <&edma 52>;
+                       dmas = <&edma 52 0>;
                        dma-names = "rxtx";
                        gpmc,num-cs = <7>;
                        gpmc,num-waitpins = <2>;
index 344b861a55a5bc6f9652ec1dce8fcb22ad256042..ba580a9da390fb3dc1a723b7b77da08e6600e96e 100644 (file)
                gpmc: gpmc@50000000 {
                        compatible = "ti,am3352-gpmc";
                        ti,hwmods = "gpmc";
-                       dmas = <&edma 52>;
+                       dmas = <&edma 52 0>;
                        dma-names = "rxtx";
                        clocks = <&l3s_gclk>;
                        clock-names = "fck";
index 0a5fc5d02ce2ba9893ac30b6f9997df08d04eb65..4168eb9dd3698c827e8707007ecca0d0f918349a 100644 (file)
                #cooling-cells = <2>;
        };
 
-       extcon_usb1: extcon_usb1 {
-               compatible = "linux,extcon-usb-gpio";
-               id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
-               pinctrl-names = "default";
-               pinctrl-0 = <&extcon_usb1_pins>;
-       };
-
        hdmi0: connector {
                compatible = "hdmi-connector";
                label = "hdmi";
                >;
        };
 
-       extcon_usb1_pins: extcon_usb1_pins {
-               pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */
-               >;
-       };
-
        tpd12s015_pins: pinmux_tpd12s015_pins {
                pinctrl-single,pins = <
                        DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14)              /* gpio7_10 CT_CP_HPD */
        pinctrl-0 = <&usb1_pins>;
 };
 
-&omap_dwc3_1 {
-       extcon = <&extcon_usb1>;
-};
-
 &omap_dwc3_2 {
        extcon = <&extcon_usb2>;
 };
index e0ea6a93a22ed722392295828c21802ac54dae2d..792a64ee0df7fac5d201863ecffb80d7381fec17 100644 (file)
@@ -4,6 +4,157 @@
  * published by the Free Software Foundation.
  */
 
+&pllss {
+       /*
+        * See TRM "2.6.10 Connected outputso DPLLS" and
+        * "2.6.11 Connected Outputs of DPLLJ". Only clkout is
+        * connected except for hdmi and usb.
+        */
+       adpll_mpu_ck: adpll@40 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-s-clock";
+               reg = <0x40 0x40>;
+               clocks = <&devosc_ck &devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow", "clkinphif";
+               clock-output-names = "481c5040.adpll.dcoclkldo",
+                                    "481c5040.adpll.clkout",
+                                    "481c5040.adpll.clkoutx2",
+                                    "481c5040.adpll.clkouthif";
+       };
+
+       adpll_dsp_ck: adpll@80 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x80 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5080.adpll.dcoclkldo",
+                                    "481c5080.adpll.clkout",
+                                    "481c5080.adpll.clkoutldo";
+       };
+
+       adpll_sgx_ck: adpll@b0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0xb0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c50b0.adpll.dcoclkldo",
+                                    "481c50b0.adpll.clkout",
+                                    "481c50b0.adpll.clkoutldo";
+       };
+
+       adpll_hdvic_ck: adpll@e0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0xe0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c50e0.adpll.dcoclkldo",
+                                    "481c50e0.adpll.clkout",
+                                    "481c50e0.adpll.clkoutldo";
+       };
+
+       adpll_l3_ck: adpll@110 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x110 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5110.adpll.dcoclkldo",
+                                    "481c5110.adpll.clkout",
+                                    "481c5110.adpll.clkoutldo";
+       };
+
+       adpll_isp_ck: adpll@140 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x140 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5140.adpll.dcoclkldo",
+                                    "481c5140.adpll.clkout",
+                                    "481c5140.adpll.clkoutldo";
+       };
+
+       adpll_dss_ck: adpll@170 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x170 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5170.adpll.dcoclkldo",
+                                    "481c5170.adpll.clkout",
+                                    "481c5170.adpll.clkoutldo";
+       };
+
+       adpll_video0_ck: adpll@1a0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x1a0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c51a0.adpll.dcoclkldo",
+                                    "481c51a0.adpll.clkout",
+                                    "481c51a0.adpll.clkoutldo";
+       };
+
+       adpll_video1_ck: adpll@1d0 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x1d0 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c51d0.adpll.dcoclkldo",
+                                    "481c51d0.adpll.clkout",
+                                    "481c51d0.adpll.clkoutldo";
+       };
+
+       adpll_hdmi_ck: adpll@200 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x200 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5200.adpll.dcoclkldo",
+                                    "481c5200.adpll.clkout",
+                                    "481c5200.adpll.clkoutldo";
+       };
+
+       adpll_audio_ck: adpll@230 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x230 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5230.adpll.dcoclkldo",
+                                    "481c5230.adpll.clkout",
+                                    "481c5230.adpll.clkoutldo";
+       };
+
+       adpll_usb_ck: adpll@260 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x260 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5260.adpll.dcoclkldo",
+                                    "481c5260.adpll.clkout",
+                                    "481c5260.adpll.clkoutldo";
+       };
+
+       adpll_ddr_ck: adpll@290 {
+               #clock-cells = <1>;
+               compatible = "ti,dm814-adpll-lj-clock";
+               reg = <0x290 0x30>;
+               clocks = <&devosc_ck &devosc_ck>;
+               clock-names = "clkinp", "clkinpulow";
+               clock-output-names = "481c5290.adpll.dcoclkldo",
+                                    "481c5290.adpll.clkout",
+                                    "481c5290.adpll.clkoutldo";
+       };
+};
+
 &pllss_clocks {
        timer1_fck: timer1_fck {
                #clock-cells = <0>;
                reg = <0x2e0>;
        };
 
+       /* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */
+       cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&adpll_video0_ck 1
+                         &adpll_video1_ck 1
+                         &adpll_audio_ck 1>;
+               ti,bit-shift = <1>;
+               reg = <0x2e8>;
+       };
+
+       /* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */
+       cpsw_125mhz_gclk: cpsw_125mhz_gclk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <125000000>;
+       };
+
        sysclk18_ck: sysclk18_ck {
                #clock-cells = <0>;
                compatible = "ti,mux-clock";
                compatible = "fixed-clock";
                clock-frequency = <1000000000>;
        };
-
-       sysclk4_ck: sysclk4_ck {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <222000000>;
-       };
-
-       sysclk6_ck: sysclk6_ck {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <100000000>;
-       };
-
-       sysclk10_ck: sysclk10_ck {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <48000000>;
-       };
-
-        cpsw_125mhz_gclk: cpsw_125mhz_gclk {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <125000000>;
-       };
-
-       cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
-               #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <250000000>;
-       };
-
 };
 
 &prcm_clocks {
                clock-div = <78125>;
        };
 
+       /* L4_HS 220 MHz*/
+       sysclk4_ck: sysclk4_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_l3_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <1>;
+       };
+
+       /* L4_FWCFG */
+       sysclk5_ck: sysclk5_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_l3_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <2>;
+       };
+
+       /* L4_LS 110 MHz */
+       sysclk6_ck: sysclk6_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_l3_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <2>;
+       };
+
+       sysclk8_ck: sysclk8_ck {
+               #clock-cells = <0>;
+               compatible = "ti,fixed-factor-clock";
+               clocks = <&adpll_usb_ck 1>;
+               ti,clock-mult = <1>;
+               ti,clock-div = <1>;
+       };
+
+       sysclk10_ck: sysclk10_ck {
+               compatible = "ti,divider-clock";
+               reg = <0x324>;
+               ti,max-div = <7>;
+               #clock-cells = <0>;
+               clocks = <&adpll_usb_ck 1>;
+       };
+
        aud_clkin0_ck: aud_clkin0_ck {
                #clock-cells = <0>;
                compatible = "fixed-clock";
index 6f98dc8df9dd1fc430adb66e562d4e8bdd1afc9a..0e49741747efb80ffe4a87764713f6398dec58fd 100644 (file)
@@ -6,6 +6,32 @@
 
 #include "dm814x-clocks.dtsi"
 
+/* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */
+&adpll_hdvic_ck {
+       status = "disabled";
+};
+
+&adpll_l3_ck {
+       status = "disabled";
+};
+
+&adpll_dss_ck {
+       status = "disabled";
+};
+
+/* Compared to dm814x, dra62x has interconnect clocks on isp PLL */
+&sysclk4_ck {
+       clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk5_ck {
+       clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk6_ck {
+       clocks = <&adpll_isp_ck 1>;
+};
+
 /*
  * Compared to dm814x, dra62x has different shifts and more mux options.
  * Please add the extra options for ysclk_14 and 16 if really needed.
index d0bae06b7eb7e8600f75213a576b778704ecacff..ef2164a99d0f0202a8f2c021c626dc55ec3f1390 100644 (file)
                clock-frequency = <32768>;
        };
 
-       sys_32k_ck: sys_32k_ck {
+       sys_clk32_crystal_ck: sys_clk32_crystal_ck {
                #clock-cells = <0>;
                compatible = "fixed-clock";
                clock-frequency = <32768>;
        };
 
+       sys_clk32_pseudo_ck: sys_clk32_pseudo_ck {
+               #clock-cells = <0>;
+               compatible = "fixed-factor-clock";
+               clocks = <&sys_clkin1>;
+               clock-mult = <1>;
+               clock-div = <610>;
+       };
+
        virt_12000000_ck: virt_12000000_ck {
                #clock-cells = <0>;
                compatible = "fixed-clock";
                ti,bit-shift = <22>;
                reg = <0x0558>;
        };
+
+       sys_32k_ck: sys_32k_ck {
+               #clock-cells = <0>;
+               compatible = "ti,mux-clock";
+               clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>;
+               ti,bit-shift = <8>;
+               reg = <0x6c4>;
+       };
 };
index ef53305784318a40d692fa7053a631abac28e552..8193139d0d8706037a29b5c03516f8d8dd15d12c 100644 (file)
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/clock/qcom,gcc-msm8974.h>
 #include "skeleton.dtsi"
 
                        clock-names = "core", "iface";
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
-                       dma-names = "tx", "rx";
                };
 
                spmi_bus: spmi@fc4cf000 {
                        interrupt-controller;
                        #interrupt-cells = <4>;
                };
-
-               blsp2_dma: dma-controller@f9944000 {
-                       compatible = "qcom,bam-v1.4.0";
-                       reg = <0xf9944000 0x19000>;
-                       interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&gcc GCC_BLSP2_AHB_CLK>;
-                       clock-names = "bam_clk";
-                       #dma-cells = <1>;
-                       qcom,ee = <0>;
-               };
        };
 
        smd {
index 0ad71b81d3a25f59aeaa3e329de0ac9f55c0cdad..cc6e28f81fe4f3721352deb9450f6c36a731b30c 100644 (file)
 };
 
 &pcie_bus_clk {
+       clock-frequency = <100000000>;
        status = "okay";
 };
 
index 6c08314427d6aab2e54d016319d8cc532bacdbdc..a9285d9a57cdf914b448d000f7db66f5f8846620 100644 (file)
 };
 
 &pfc {
-       pinctrl-0 = <&scif_clk_pins>;
-       pinctrl-names = "default";
-
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_d";
                renesas,function = "scif0";
        };
 
-       scif_clk_pins: scif_clk {
-               renesas,groups = "scif_clk";
-               renesas,function = "scif_clk";
-       };
-
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
-&scif_clk {
-       clock-frequency = <14745600>;
-       status = "okay";
-};
-
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
 };
 
 &pcie_bus_clk {
+       clock-frequency = <100000000>;
        status = "okay";
 };
 
index 6439f0569fe2c578bc26e66b97947fb7cf220dd9..1cd1b6a3a72a5c27f7aa43e930347bf7fb53158a 100644 (file)
                pcie_bus_clk: pcie_bus_clk {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <100000000>;
+                       clock-frequency = <0>;
                        clock-output-names = "pcie_bus";
-                       status = "disabled";
                };
 
                /* External SCIF clock */
                        #clock-cells = <0>;
                        /* This value must be overridden by the board. */
                        clock-frequency = <0>;
-                       status = "disabled";
                };
 
                /* External USB clock - can be overridden by the board */
                        /* This value must be overridden by the board. */
                        clock-frequency = <0>;
                        clock-output-names = "can_clk";
-                       status = "disabled";
                };
 
                /* Special CPG clocks */
index b23c6c81c9ad88df191354d926be53cf4810e8f8..1ee94c716a7f87f45c00436fb9b2fa640014c811 100644 (file)
@@ -276,7 +276,7 @@ static inline int __attribute_const__ cpuid_feature_extract_field(u32 features,
        int feature = (features >> field) & 15;
 
        /* feature registers are signed values */
-       if (feature > 8)
+       if (feature > 7)
                feature -= 16;
 
        return feature;
index a28fce0bdbbe11b7a950a3a0943f8362d7124b87..2c4bea39cf224f8368cca2b4dba61a5b807a3dde 100644 (file)
@@ -512,7 +512,7 @@ static void __init elf_hwcap_fixup(void)
         */
        if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
            (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
-            cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
+            cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
                elf_hwcap &= ~HWCAP_SWP;
 }
 
index a5edd7d60266985472ac9de11e6fb9801f8eadeb..3d039ef021e0f0f4945040a01a8d0e79888bfa8f 100644 (file)
@@ -71,6 +71,7 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx(
        if (!pdata)
                pdata = &default_esdhc_pdata;
 
-       return imx_add_platform_device(data->devid, data->id, res,
-                       ARRAY_SIZE(res), pdata, sizeof(*pdata));
+       return imx_add_platform_device_dmamask(data->devid, data->id, res,
+                       ARRAY_SIZE(res), pdata, sizeof(*pdata),
+                       DMA_BIT_MASK(32));
 }
index 7581e036bda62e5b58c01140929d08705f7bbb8e..ef9ed36e8a612154c5e3adbff9439b9a5871f85f 100644 (file)
@@ -461,7 +461,7 @@ static struct clockdomain ipu_7xx_clkdm = {
        .cm_inst          = DRA7XX_CM_CORE_AON_IPU_INST,
        .clkdm_offs       = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
        .dep_bit          = DRA7XX_IPU_STATDEP_SHIFT,
-       .flags            = CLKDM_CAN_HWSUP_SWSUP,
+       .flags            = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain mpu1_7xx_clkdm = {
index 9821be6dfd5ed95afcf996ac02d3a9da1257564a..49de4dd227be4b7513b2aa7c414016464a86ce38 100644 (file)
@@ -737,7 +737,8 @@ void __init omap5_init_late(void)
 #ifdef CONFIG_SOC_DRA7XX
 void __init dra7xx_init_early(void)
 {
-       omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
+       omap2_set_globals_tap(DRA7XX_CLASS,
+                             OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
        omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
        omap2_control_base_init();
        omap4_pm_init_early();
index f397bd6bd6e30149c525e270853701a916500e96..2c04f274147602c8a9131e14dbf0625a23b0f918 100644 (file)
@@ -274,6 +274,10 @@ static inline void omap5_irq_save_context(void)
  */
 static void irq_save_context(void)
 {
+       /* DRA7 has no SAR to save */
+       if (soc_is_dra7xx())
+               return;
+
        if (!sar_base)
                sar_base = omap4_get_sar_ram_base();
 
@@ -290,6 +294,9 @@ static void irq_sar_clear(void)
 {
        u32 val;
        u32 offset = SAR_BACKUP_STATUS_OFFSET;
+       /* DRA7 has no SAR to save */
+       if (soc_is_dra7xx())
+               return;
 
        if (soc_is_omap54xx())
                offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
index 2dbd3785ee6f0d00b4c3d88bada28d83c0203b2a..d44e0e2f11063134e9dd031c0a55b523dd76befa 100644 (file)
@@ -198,7 +198,6 @@ void omap_sram_idle(void)
        int per_next_state = PWRDM_POWER_ON;
        int core_next_state = PWRDM_POWER_ON;
        int per_going_off;
-       int core_prev_state;
        u32 sdrc_pwr = 0;
 
        mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
@@ -278,16 +277,20 @@ void omap_sram_idle(void)
                sdrc_write_reg(sdrc_pwr, SDRC_POWER);
 
        /* CORE */
-       if (core_next_state < PWRDM_POWER_ON) {
-               core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
-               if (core_prev_state == PWRDM_POWER_OFF) {
-                       omap3_core_restore_context();
-                       omap3_cm_restore_context();
-                       omap3_sram_restore_context();
-                       omap2_sms_restore_context();
-               }
+       if (core_next_state < PWRDM_POWER_ON &&
+           pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
+               omap3_core_restore_context();
+               omap3_cm_restore_context();
+               omap3_sram_restore_context();
+               omap2_sms_restore_context();
+       } else {
+               /*
+                * In off-mode resume path above, omap3_core_restore_context
+                * also handles the INTC autoidle restore done here so limit
+                * this to non-off mode resume paths so we don't do it twice.
+                */
+               omap3_intc_resume_idle();
        }
-       omap3_intc_resume_idle();
 
        pwrdm_post_transition(NULL);
 
index ad008e4b0c49a4aa1be23992be444198d67e41d5..67d79f9c6bad5be8be0418a331d90f4be42e94f7 100644 (file)
@@ -40,8 +40,7 @@ static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
 void __init shmobile_init_delay(void)
 {
        struct device_node *np, *cpus;
-       bool is_a7_a8_a9 = false;
-       bool is_a15 = false;
+       unsigned int div = 0;
        bool has_arch_timer = false;
        u32 max_freq = 0;
 
@@ -55,27 +54,22 @@ void __init shmobile_init_delay(void)
                if (!of_property_read_u32(np, "clock-frequency", &freq))
                        max_freq = max(max_freq, freq);
 
-               if (of_device_is_compatible(np, "arm,cortex-a8") ||
-                   of_device_is_compatible(np, "arm,cortex-a9")) {
-                       is_a7_a8_a9 = true;
-               } else if (of_device_is_compatible(np, "arm,cortex-a7")) {
-                       is_a7_a8_a9 = true;
-                       has_arch_timer = true;
-               } else if (of_device_is_compatible(np, "arm,cortex-a15")) {
-                       is_a15 = true;
+               if (of_device_is_compatible(np, "arm,cortex-a8")) {
+                       div = 2;
+               } else if (of_device_is_compatible(np, "arm,cortex-a9")) {
+                       div = 1;
+               } else if (of_device_is_compatible(np, "arm,cortex-a7") ||
+                        of_device_is_compatible(np, "arm,cortex-a15")) {
+                       div = 1;
                        has_arch_timer = true;
                }
        }
 
        of_node_put(cpus);
 
-       if (!max_freq)
+       if (!max_freq || !div)
                return;
 
-       if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
-               if (is_a7_a8_a9)
-                       shmobile_setup_delay_hz(max_freq, 1, 3);
-               else if (is_a15)
-                       shmobile_setup_delay_hz(max_freq, 2, 4);
-       }
+       if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
+               shmobile_setup_delay_hz(max_freq, 1, div);
 }
index deac58d5f1f7cf053ca215ec718c6902961a7908..c941e93048ad4d2ba09dabc2bb9451eaabcf8760 100644 (file)
@@ -762,7 +762,8 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        if (!mask)
                return NULL;
 
-       buf = kzalloc(sizeof(*buf), gfp);
+       buf = kzalloc(sizeof(*buf),
+                     gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
        if (!buf)
                return NULL;
 
index 727ae5f8c4e716178a4cda1e484b5118a16646c5..b0ed44313a5bc134c4919f5b82b02eedfa01a590 100644 (file)
@@ -70,7 +70,6 @@
                i2c3 = &i2c3;
                i2c4 = &i2c4;
                i2c5 = &i2c5;
-               i2c6 = &i2c6;
        };
 };
 
index e682a3f52791b38caf45defec637e8247e64f6a3..651c9d9d2d54658b7fa4eb73b1e0ed5f57d02995 100644 (file)
 
                i2c2: i2c@58782000 {
                        compatible = "socionext,uniphier-fi2c";
-                       status = "disabled";
                        reg = <0x58782000 0x80>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        interrupts = <0 43 4>;
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_i2c2>;
                        clocks = <&i2c_clk>;
-                       clock-frequency = <100000>;
+                       clock-frequency = <400000>;
                };
 
                i2c3: i2c@58783000 {
 
                i2c4: i2c@58784000 {
                        compatible = "socionext,uniphier-fi2c";
+                       status = "disabled";
                        reg = <0x58784000 0x80>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        interrupts = <0 45 4>;
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&pinctrl_i2c4>;
                        clocks = <&i2c_clk>;
-                       clock-frequency = <400000>;
+                       clock-frequency = <100000>;
                };
 
                i2c5: i2c@58785000 {
                        clock-frequency = <400000>;
                };
 
-               i2c6: i2c@58786000 {
-                       compatible = "socionext,uniphier-fi2c";
-                       reg = <0x58786000 0x80>;
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       interrupts = <0 26 4>;
-                       clocks = <&i2c_clk>;
-                       clock-frequency = <400000>;
-               };
-
                system_bus: system-bus@58c00000 {
                        compatible = "socionext,uniphier-system-bus";
                        status = "disabled";
index 4203d5f257bcf396f8cdadb4f613d7048b486f59..85da0f599cd6ac174c52eb1a8b376d171686c718 100644 (file)
@@ -588,6 +588,15 @@ set_hcr:
        msr     vpidr_el2, x0
        msr     vmpidr_el2, x1
 
+       /*
+        * When VHE is not in use, early init of EL2 and EL1 needs to be
+        * done here.
+        * When VHE _is_ in use, EL1 will not be used in the host and
+        * requires no configuration, and all non-hyp-specific EL2 setup
+        * will be done via the _EL1 system register aliases in __cpu_setup.
+        */
+       cbnz    x2, 1f
+
        /* sctlr_el1 */
        mov     x0, #0x0800                     // Set/clear RES{1,0} bits
 CPU_BE(        movk    x0, #0x33d0, lsl #16    )       // Set EE and E0E on BE systems
@@ -597,6 +606,7 @@ CPU_LE(     movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
        /* Coprocessor traps. */
        mov     x0, #0x33ff
        msr     cptr_el2, x0                    // Disable copro. traps to EL2
+1:
 
 #ifdef CONFIG_COMPAT
        msr     hstr_el2, xzr                   // Disable CP15 traps to EL2
@@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
 
        .macro  update_early_cpu_boot_status status, tmp1, tmp2
        mov     \tmp2, #\status
-       str_l   \tmp2, __early_cpu_boot_status, \tmp1
+       adr_l   \tmp1, __early_cpu_boot_status
+       str     \tmp2, [\tmp1]
        dmb     sy
        dc      ivac, \tmp1                     // Invalidate potentially stale cache line
        .endm
index aef3605a8c47845ce2ce2d32f75e0fd59a8e4405..18a71bcd26ee4a15ea63e9f85102713f644495d5 100644 (file)
@@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
 static int smp_spin_table_cpu_init(unsigned int cpu)
 {
        struct device_node *dn;
+       int ret;
 
        dn = of_get_cpu_node(cpu, NULL);
        if (!dn)
@@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
        /*
         * Determine the address from which the CPU is polling.
         */
-       if (of_property_read_u64(dn, "cpu-release-addr",
-                                &cpu_release_addr[cpu])) {
+       ret = of_property_read_u64(dn, "cpu-release-addr",
+                                  &cpu_release_addr[cpu]);
+       if (ret)
                pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
                       cpu);
 
-               return -1;
-       }
+       of_node_put(dn);
 
-       return 0;
+       return ret;
 }
 
 static int smp_spin_table_cpu_prepare(unsigned int cpu)
index c2cfcb121e346590f0de5d805d9952ac69b42d96..2fcefe720283001789483f9be39b8924099c647d 100644 (file)
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count)
                  "=r" (charcnt),       /* %1  Output */
                  "=r" (dwordcnt),      /* %2  Output */
                  "=r" (fill8reg),      /* %3  Output */
-                 "=r" (wrkrega)        /* %4  Output */
+                 "=&r" (wrkrega)       /* %4  Output only */
                : "r" (c),              /* %5  Input */
                  "0" (s),              /* %0  Input/Output */
                  "1" (count)           /* %1  Input/Output */
index 8dde19962a5b49fbce13685656911c8f47622f3f..f63c96cd360863ac312aca929475de7629695169 100644 (file)
@@ -31,6 +31,7 @@
 #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
                                        0x00000040
 
+/* Reserved - do not use               0x00000004 */
 #define PPC_FEATURE_TRUE_LE            0x00000002
 #define PPC_FEATURE_PPC_LE             0x00000001
 
index 7030b035905dbf85b03bd36fa4bdc2a4a14982de..a15fe1d4e84aec9955622b603823fe44633da98d 100644 (file)
@@ -148,23 +148,25 @@ static struct ibm_pa_feature {
        unsigned long   cpu_features;   /* CPU_FTR_xxx bit */
        unsigned long   mmu_features;   /* MMU_FTR_xxx bit */
        unsigned int    cpu_user_ftrs;  /* PPC_FEATURE_xxx bit */
+       unsigned int    cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
        unsigned char   pabyte;         /* byte number in ibm,pa-features */
        unsigned char   pabit;          /* bit number (big-endian) */
        unsigned char   invert;         /* if 1, pa bit set => clear feature */
 } ibm_pa_features[] __initdata = {
-       {0, 0, PPC_FEATURE_HAS_MMU,     0, 0, 0},
-       {0, 0, PPC_FEATURE_HAS_FPU,     0, 1, 0},
-       {CPU_FTR_CTRL, 0, 0,            0, 3, 0},
-       {CPU_FTR_NOEXECUTE, 0, 0,       0, 6, 0},
-       {CPU_FTR_NODSISRALIGN, 0, 0,    1, 1, 1},
-       {0, MMU_FTR_CI_LARGE_PAGE, 0,   1, 2, 0},
-       {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
+       {0, 0, PPC_FEATURE_HAS_MMU, 0,          0, 0, 0},
+       {0, 0, PPC_FEATURE_HAS_FPU, 0,          0, 1, 0},
+       {CPU_FTR_CTRL, 0, 0, 0,                 0, 3, 0},
+       {CPU_FTR_NOEXECUTE, 0, 0, 0,            0, 6, 0},
+       {CPU_FTR_NODSISRALIGN, 0, 0, 0,         1, 1, 1},
+       {0, MMU_FTR_CI_LARGE_PAGE, 0, 0,                1, 2, 0},
+       {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
        /*
-        * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
-        * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
-        * which is 0 if the kernel doesn't support TM.
+        * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
+        * we don't want to turn on TM here, so we use the *_COMP versions
+        * which are 0 if the kernel doesn't support TM.
         */
-       {CPU_FTR_TM_COMP, 0, 0,         22, 0, 0},
+       {CPU_FTR_TM_COMP, 0, 0,
+        PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
 };
 
 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
                if (bit ^ fp->invert) {
                        cur_cpu_spec->cpu_features |= fp->cpu_features;
                        cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
+                       cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
                        cur_cpu_spec->mmu_features |= fp->mmu_features;
                } else {
                        cur_cpu_spec->cpu_features &= ~fp->cpu_features;
                        cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
+                       cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
                        cur_cpu_spec->mmu_features &= ~fp->mmu_features;
                }
        }
index aad23e3dff2c17b0bba5a7d2740ea47c21839480..bf24ab1889215abed3b8759dae2d45177ab960ab 100644 (file)
@@ -4,6 +4,9 @@ config MMU
 config ZONE_DMA
        def_bool y
 
+config CPU_BIG_ENDIAN
+       def_bool y
+
 config LOCKDEP_SUPPORT
        def_bool y
 
index b6bfa169a002c051649a2909281fe8c203a00ee7..535a46d46d2894e486284b4a8eb74df38738305f 100644 (file)
@@ -44,7 +44,8 @@ struct zpci_fmb {
        u64 rpcit_ops;
        u64 dma_rbytes;
        u64 dma_wbytes;
-} __packed __aligned(64);
+       u64 pad[2];
+} __packed __aligned(128);
 
 enum zpci_state {
        ZPCI_FN_STATE_RESERVED,
index 781a9cf9b002930429697dd973b214b844a5c244..e10f8337367b31c3de87b045937070e5dd975f94 100644 (file)
@@ -13,4 +13,6 @@
 #define __NR_seccomp_exit_32   __NR_exit
 #define __NR_seccomp_sigreturn_32 __NR_sigreturn
 
+#include <asm-generic/seccomp.h>
+
 #endif /* _ASM_S390_SECCOMP_H */
index d4549c9645892aef684c60ac73a2c8d22886e58e..e5f50a7d2f4eb07fd2037c605e4c7e87160455b2 100644 (file)
@@ -105,6 +105,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
                        if (_raw_compare_and_swap(&lp->lock, 0, cpu))
                                return;
                        local_irq_restore(flags);
+                       continue;
                }
                /* Check if the lock owner is running. */
                if (first_diag && cpu_is_preempted(~owner)) {
index a8a0224fa0f8a4682f76281034a3172001f50200..081255cea1ee5d442a75529172e097afce7f396c 100644 (file)
@@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
 
                        req = cast_mcryptd_ctx_to_req(req_ctx);
                        if (irqs_disabled())
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                        else {
                                local_bh_disable();
-                               rctx->complete(&req->base, ret);
+                               req_ctx->complete(&req->base, ret);
                                local_bh_enable();
                        }
                }
index f8a29d2c97b012febc3728809a5daa53de22cf0f..e6a8613fbfb0ea19f8d507c4ce6ae1f0d4a35be6 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm/page.h>
 #include <asm-generic/hugetlb.h>
 
+#define hugepages_supported() cpu_has_pse
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
                                         unsigned long addr,
index 4e7c6933691cc8de42aa4a82473482127bd0faab..10c11b4da31df0ded6a7a788b8647f22067e95be 100644 (file)
@@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static unsigned char hv_get_nmi_reason(void)
+{
+       return 0;
+}
+
 static void __init ms_hyperv_init_platform(void)
 {
        /*
@@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
        machine_ops.crash_shutdown = hv_machine_crash_shutdown;
 #endif
        mark_tsc_unstable("running on Hyper-V");
+
+       /*
+        * Generation 2 instances don't support reading the NMI status from
+        * 0x61 port.
+        */
+       if (efi_enabled(EFI_BOOT))
+               x86_platform.get_nmi_reason = hv_get_nmi_reason;
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
index 1cea67d43e1db25c55db64b2f805f035428faa29..ead8dc0d084e749e35733abc164f1e209aca3f6f 100644 (file)
@@ -387,16 +387,16 @@ static int pkcs1pad_decrypt(struct akcipher_request *req)
        req_ctx->child_req.src = req->src;
        req_ctx->child_req.src_len = req->src_len;
        req_ctx->child_req.dst = req_ctx->out_sg;
-       req_ctx->child_req.dst_len = ctx->key_size - 1;
+       req_ctx->child_req.dst_len = ctx->key_size ;
 
-       req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+       req_ctx->out_buf = kmalloc(ctx->key_size,
                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                        GFP_KERNEL : GFP_ATOMIC);
        if (!req_ctx->out_buf)
                return -ENOMEM;
 
        pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-                       ctx->key_size - 1, NULL);
+                           ctx->key_size, NULL);
 
        akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
        akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
@@ -595,16 +595,16 @@ static int pkcs1pad_verify(struct akcipher_request *req)
        req_ctx->child_req.src = req->src;
        req_ctx->child_req.src_len = req->src_len;
        req_ctx->child_req.dst = req_ctx->out_sg;
-       req_ctx->child_req.dst_len = ctx->key_size - 1;
+       req_ctx->child_req.dst_len = ctx->key_size;
 
-       req_ctx->out_buf = kmalloc(ctx->key_size - 1,
+       req_ctx->out_buf = kmalloc(ctx->key_size,
                        (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                        GFP_KERNEL : GFP_ATOMIC);
        if (!req_ctx->out_buf)
                return -ENOMEM;
 
        pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
-                       ctx->key_size - 1, NULL);
+                           ctx->key_size, NULL);
 
        akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
        akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
index 786be8fed39e980add0b9e1cddc9e0682171dc9a..1f635471f318cb9d9395e26466ea9abebf6ddb7f 100644 (file)
@@ -136,7 +136,6 @@ static bool bcma_is_core_needed_early(u16 core_id)
        return false;
 }
 
-#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
 static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
                                                     struct bcma_device *core)
 {
@@ -184,7 +183,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
        struct of_phandle_args out_irq;
        int ret;
 
-       if (!parent || !parent->dev.of_node)
+       if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
                return 0;
 
        ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -202,23 +201,15 @@ static void bcma_of_fill_device(struct platform_device *parent,
 {
        struct device_node *node;
 
+       if (!IS_ENABLED(CONFIG_OF_IRQ))
+               return;
+
        node = bcma_of_find_child_device(parent, core);
        if (node)
                core->dev.of_node = node;
 
        core->irq = bcma_of_get_irq(parent, core, 0);
 }
-#else
-static void bcma_of_fill_device(struct platform_device *parent,
-                               struct bcma_device *core)
-{
-}
-static inline unsigned int bcma_of_get_irq(struct platform_device *parent,
-                                          struct bcma_device *core, int num)
-{
-       return 0;
-}
-#endif /* CONFIG_OF */
 
 unsigned int bcma_core_irq(struct bcma_device *core, int num)
 {
index 2bcecafdeaeac76b2e4decf3dda66d8d7a6f027b..c407c47a32326a568f11b2f586c8e29aa3588572 100644 (file)
@@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
 
        ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
                                    32, clocksource_mmio_readl_up);
-       if (!ret) {
+       if (ret) {
                pr_err("%s: registration failed\n", np->full_name);
                return;
        }
index b87596b591b3ca9fdf332884700e5a3cc96ebf10..e93405f0eac46565d18c25873d1113c3a1aa65fd 100644 (file)
@@ -1491,6 +1491,9 @@ static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
 {
        unsigned int new_freq;
 
+       if (cpufreq_suspended)
+               return 0;
+
        new_freq = cpufreq_driver->get(policy->cpu);
        if (!new_freq)
                return 0;
index 8b5a415ee14a53d21e9358e2b6707df852e827b8..30fe323c4551b4628de4c5878500d29eab8d57ca 100644 (file)
@@ -1130,6 +1130,10 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
                sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
                                      int_tofp(duration_ns));
                core_busy = mul_fp(core_busy, sample_ratio);
+       } else {
+               sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
+               if (sample_ratio < int_tofp(1))
+                       core_busy = 0;
        }
 
        cpu->sample.busy_scaled = core_busy;
index 3d9acc53d2473818d1e37c4af432a4afcbdf4ec4..60fc0fa26fd3b19ab557888d9bd6e61c208fe0f6 100644 (file)
@@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
        struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
        struct ccp_aes_cmac_exp_ctx state;
 
+       /* Don't let anything leak to 'out' */
+       memset(&state, 0, sizeof(state));
+
        state.null_msg = rctx->null_msg;
        memcpy(state.iv, rctx->iv, sizeof(state.iv));
        state.buf_count = rctx->buf_count;
index b5ad72897dc25b8d11642315f3e087af32d1c845..8f36af62fe951032d22dfd9f7ea0150e43d6360b 100644 (file)
@@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
        struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
        struct ccp_sha_exp_ctx state;
 
+       /* Don't let anything leak to 'out' */
+       memset(&state, 0, sizeof(state));
+
        state.type = rctx->type;
        state.msg_bits = rctx->msg_bits;
        state.first = rctx->first;
index a0d4a08313ae895d1595428a6e70f4c1c528b6d4..aae05547b924bf86b518245ced599e02c787bf35 100644 (file)
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
                ptr->eptr = upper_32_bits(dma_addr);
 }
 
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+                            struct talitos_ptr *src_ptr, bool is_sec1)
+{
+       dst_ptr->ptr = src_ptr->ptr;
+       if (!is_sec1)
+               dst_ptr->eptr = src_ptr->eptr;
+}
+
 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
                               bool is_sec1)
 {
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
                              (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
                                                           : DMA_TO_DEVICE);
-
        /* hmac data */
        desc->ptr[1].len = cpu_to_be16(areq->assoclen);
        if (sg_count > 1 &&
            (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
                                         areq->assoclen,
                                         &edesc->link_tbl[tbl_off])) > 1) {
-               tbl_off += ret;
-
                to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
                               sizeof(struct talitos_ptr), 0);
                desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
 
                dma_sync_single_for_device(dev, edesc->dma_link_tbl,
                                           edesc->dma_len, DMA_BIDIRECTIONAL);
+
+               tbl_off += ret;
        } else {
                to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
                desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
        if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
                sg_link_tbl_len += authsize;
 
-       if (sg_count > 1 &&
-           (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
-                                        sg_link_tbl_len,
-                                        &edesc->link_tbl[tbl_off])) > 1) {
-               tbl_off += ret;
+       if (sg_count == 1) {
+               to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
+                              areq->assoclen, 0);
+       } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
+                                               areq->assoclen, sg_link_tbl_len,
+                                               &edesc->link_tbl[tbl_off])) >
+                  1) {
                desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
                to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
                                              tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
                dma_sync_single_for_device(dev, edesc->dma_link_tbl,
                                           edesc->dma_len,
                                           DMA_BIDIRECTIONAL);
-       } else
-               to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
+               tbl_off += ret;
+       } else {
+               copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+       }
 
        /* cipher out */
        desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
 
        edesc->icv_ool = false;
 
-       if (sg_count > 1 &&
-           (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+       if (sg_count == 1) {
+               to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
+                              areq->assoclen, 0);
+       } else if ((sg_count =
+                       sg_to_link_tbl_offset(areq->dst, sg_count,
                                              areq->assoclen, cryptlen,
-                                             &edesc->link_tbl[tbl_off])) >
-           1) {
+                                             &edesc->link_tbl[tbl_off])) > 1) {
                struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
 
                to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
                                           edesc->dma_len, DMA_BIDIRECTIONAL);
 
                edesc->icv_ool = true;
-       } else
-               to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
+       } else {
+               copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+       }
 
        /* iv out */
        map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
        struct talitos_alg_template algt;
 };
 
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_init_common(struct talitos_ctx *ctx,
+                              struct talitos_crypto_alg *talitos_alg)
 {
-       struct crypto_alg *alg = tfm->__crt_alg;
-       struct talitos_crypto_alg *talitos_alg;
-       struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
        struct talitos_private *priv;
 
-       if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
-               talitos_alg = container_of(__crypto_ahash_alg(alg),
-                                          struct talitos_crypto_alg,
-                                          algt.alg.hash);
-       else
-               talitos_alg = container_of(alg, struct talitos_crypto_alg,
-                                          algt.alg.crypto);
-
        /* update context with ptr to dev */
        ctx->dev = talitos_alg->dev;
 
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
        return 0;
 }
 
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+       struct talitos_crypto_alg *talitos_alg;
+       struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+               talitos_alg = container_of(__crypto_ahash_alg(alg),
+                                          struct talitos_crypto_alg,
+                                          algt.alg.hash);
+       else
+               talitos_alg = container_of(alg, struct talitos_crypto_alg,
+                                          algt.alg.crypto);
+
+       return talitos_init_common(ctx, talitos_alg);
+}
+
 static int talitos_cra_init_aead(struct crypto_aead *tfm)
 {
-       talitos_cra_init(crypto_aead_tfm(tfm));
-       return 0;
+       struct aead_alg *alg = crypto_aead_alg(tfm);
+       struct talitos_crypto_alg *talitos_alg;
+       struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+       talitos_alg = container_of(alg, struct talitos_crypto_alg,
+                                  algt.alg.aead);
+
+       return talitos_init_common(ctx, talitos_alg);
 }
 
 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
index 93f0d4120289fa92d01a6873fac6d2efd37337e7..468447aff8ebd2009b565a6525a259ec813ee80a 100644 (file)
@@ -362,6 +362,7 @@ struct sbridge_pvt {
 
        /* Memory type detection */
        bool                    is_mirrored, is_lockstep, is_close_pg;
+       bool                    is_chan_hash;
 
        /* Fifo double buffers */
        struct mce              mce_entry[MCE_LOG_LEN];
@@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
        return (pkg >> 2) & 0x1;
 }
 
+static int haswell_chan_hash(int idx, u64 addr)
+{
+       int i;
+
+       /*
+        * XOR even bits from 12:26 to bit0 of idx,
+        *     odd bits from 13:27 to bit1
+        */
+       for (i = 12; i < 28; i += 2)
+               idx ^= (addr >> i) & 3;
+
+       return idx;
+}
+
 /****************************************************************************
                        Memory check routines
  ****************************************************************************/
@@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
                KNL_MAX_CHANNELS : NUM_CHANNELS;
        u64 knl_mc_sizes[KNL_MAX_CHANNELS];
 
+       if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+               pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
+               pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+       }
        if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
                        pvt->info.type == KNIGHTS_LANDING)
                pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
@@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
        }
 
        ch_way = TAD_CH(reg) + 1;
-       sck_way = 1 << TAD_SOCK(reg);
+       sck_way = TAD_SOCK(reg);
 
        if (ch_way == 3)
                idx = addr >> 6;
-       else
+       else {
                idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
+               if (pvt->is_chan_hash)
+                       idx = haswell_chan_hash(idx, addr);
+       }
        idx = idx % ch_way;
 
        /*
@@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
                switch(ch_way) {
                case 2:
                case 4:
-                       sck_xch = 1 << sck_way * (ch_way >> 1);
+                       sck_xch = (1 << sck_way) * (ch_way >> 1);
                        break;
                default:
                        sprintf(msg, "Invalid mirror set. Can't decode addr");
@@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
 
        ch_addr = addr - offset;
        ch_addr >>= (6 + shiftup);
-       ch_addr /= ch_way * sck_way;
+       ch_addr /= sck_xch;
        ch_addr <<= (6 + shiftup);
        ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
 
index 11bfee8b79a9f65418bcbe53dfc708edb220e69d..b5d05807e6ecd509058fcb85a3f4810ce17a8adc 100644 (file)
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
        .init = psci_dt_cpu_init_idle,
 };
 
-CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
 #endif
 #endif
 
index b77489dec6e8ce86cc8c283191fee6a49069f781..1bcbade479dc2e384d4abf0481f5571c95699a16 100644 (file)
@@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
+       unsigned                fw_version;
        void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
index d6b0bff510aaa719dbb0f9cce269645b06716127..b7b583c42ea82410b2c80825185da124e385219c 100644 (file)
@@ -425,6 +425,10 @@ static int acp_resume(void *handle)
        struct acp_pm_domain *apd;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /* return early if no ACP */
+       if (!adev->acp.acp_genpd)
+               return 0;
+
        /* SMU block will power on ACP irrespective of ACP runtime status.
         * Power off explicitly based on genpd ACP runtime status so that ACP
         * hw and ACP-genpd status are in sync.
index aef70db168329fc7c5db57a1c730fc48c35f1b6e..b04337de65d193884ba823f511df802b08aadec6 100644 (file)
@@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        fw_info.feature = adev->vce.fb_version;
                        break;
                case AMDGPU_INFO_FW_UVD:
-                       fw_info.ver = 0;
+                       fw_info.ver = adev->uvd.fw_version;
                        fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_GMC:
index 8d432e6901af50d2267089757685f2e6f16f4f40..81bd964d3dfc2f12cba2bf0e64b63c677e964d37 100644 (file)
@@ -53,7 +53,7 @@ struct amdgpu_hpd;
 
 #define AMDGPU_MAX_HPD_PINS 6
 #define AMDGPU_MAX_CRTCS 6
-#define AMDGPU_MAX_AFMT_BLOCKS 7
+#define AMDGPU_MAX_AFMT_BLOCKS 9
 
 enum amdgpu_rmx_type {
        RMX_OFF,
@@ -309,8 +309,8 @@ struct amdgpu_mode_info {
        struct atom_context *atom_context;
        struct card_info *atom_card_info;
        bool mode_config_initialized;
-       struct amdgpu_crtc *crtcs[6];
-       struct amdgpu_afmt *afmt[7];
+       struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+       struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
        /* DVI-I properties */
        struct drm_property *coherent_mode_property;
        /* DAC enable load detect */
index 6f3369de232fe5f545382a6cc2ca528bdf8c1026..11af4492b4bee2e3bf29d7910e9d25b3adb5f843 100644 (file)
@@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
 
+       if (amdgpu_ttm_tt_get_usermm(bo->ttm))
+               return -EPERM;
        return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
index 338da80006b66ced7671b5ed63fabb02750355ba..871018c634e0af896aa5a864bd87ee0653e35396 100644 (file)
@@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
                version_major, version_minor, family_id);
 
+       adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+                               (family_id << 8));
+
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                 +  AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
        r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
@@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_UVD_HANDLES)
                return 0;
 
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
        size = amdgpu_bo_size(adev->uvd.vcpu_bo);
        ptr = adev->uvd.cpu_addr;
 
index 4bec0c108cea9887ed612b252fc6b990547f029a..481a64fa9b470bfa7b2c2290bf6f85b882b4066a 100644 (file)
@@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_VCE_HANDLES)
                return 0;
 
+       cancel_delayed_work_sync(&adev->vce.idle_work);
        /* TODO: suspending running encoding sessions isn't supported */
        return -EINVAL;
 }
index 27fbd79d0daf0003be2014cee00c9dcdb49fdb43..e17fbdaf874bd7b7a74e3af2b7e8ee60e4ed8046 100644 (file)
@@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
        int i;
 
+       port = drm_dp_get_validated_port_ref(mgr, port);
+       if (!port)
+               return -EINVAL;
+
        port_num = port->port_num;
        mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
        if (!mstb) {
                mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
 
-               if (!mstb)
+               if (!mstb) {
+                       drm_dp_put_port(port);
                        return -EINVAL;
+               }
        }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        kfree(txmsg);
 fail_put:
        drm_dp_put_mst_branch_device(mstb);
+       drm_dp_put_port(port);
        return ret;
 }
 
index 10480939159c24435768a535b9f234fe00cbf387..daba7ebb969903d11dbf6d20a0e932bfc922f128 100644 (file)
@@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
 
 /* WaRsDisableCoarsePowerGating:skl,bxt */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
-                                                ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
-                                                 IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+                                                IS_SKL_GT3(dev) || \
+                                                IS_SKL_GT4(dev))
+
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
index 18ba8139e922898e89d11423899bc0b4176b73fe..4d30b60defda44adcbc24eea0be77d1e9278e804 100644 (file)
@@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
 
-               down_read(&mm->mmap_sem);
-               while (pinned < npages) {
-                       ret = get_user_pages_remote(work->task, mm,
-                                       obj->userptr.ptr + pinned * PAGE_SIZE,
-                                       npages - pinned,
-                                       !obj->userptr.read_only, 0,
-                                       pvec + pinned, NULL);
-                       if (ret < 0)
-                               break;
-
-                       pinned += ret;
+               ret = -EFAULT;
+               if (atomic_inc_not_zero(&mm->mm_users)) {
+                       down_read(&mm->mmap_sem);
+                       while (pinned < npages) {
+                               ret = get_user_pages_remote
+                                       (work->task, mm,
+                                        obj->userptr.ptr + pinned * PAGE_SIZE,
+                                        npages - pinned,
+                                        !obj->userptr.read_only, 0,
+                                        pvec + pinned, NULL);
+                               if (ret < 0)
+                                       break;
+
+                               pinned += ret;
+                       }
+                       up_read(&mm->mmap_sem);
+                       mmput(mm);
                }
-               up_read(&mm->mmap_sem);
        }
 
        mutex_lock(&dev->struct_mutex);
index 6a978ce8024436251009979cf1b1b47fa14ff549..5c6080fd09688aee82c24841fa9e4c114164f384 100644 (file)
@@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
@@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        int ret;
 
-       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+       ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
        if (ret)
                return ret;
 
+       /* We're using qword write, seqno should be aligned to 8 bytes. */
+       BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
+
        /* w/a for post sync ops following a GPGPU operation we
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf,
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
@@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
        intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       /* We're thrashing one dword of HWS. */
+       intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
 }
 
index 347d4df49a9bf37cc751d9bfdedde6701709b2f8..8ed3cf34f82d31bbefa98847d1413f625e2b2f74 100644 (file)
@@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
                             const struct drm_plane_state *pstate,
                             int y)
 {
-       struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+       struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
        struct drm_framebuffer *fb = pstate->fb;
+       uint32_t width = 0, height = 0;
+
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(pstate->rotation))
+               swap(width, height);
 
        /* for planar format */
        if (fb->pixel_format == DRM_FORMAT_NV12) {
                if (y)  /* y-plane data rate */
-                       return intel_crtc->config->pipe_src_w *
-                               intel_crtc->config->pipe_src_h *
+                       return width * height *
                                drm_format_plane_cpp(fb->pixel_format, 0);
                else    /* uv-plane data rate */
-                       return (intel_crtc->config->pipe_src_w/2) *
-                               (intel_crtc->config->pipe_src_h/2) *
+                       return (width / 2) * (height / 2) *
                                drm_format_plane_cpp(fb->pixel_format, 1);
        }
 
        /* for packed formats */
-       return intel_crtc->config->pipe_src_w *
-               intel_crtc->config->pipe_src_h *
-               drm_format_plane_cpp(fb->pixel_format, 0);
+       return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
 }
 
 /*
@@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                struct drm_framebuffer *fb = plane->state->fb;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (fb == NULL)
+               if (!to_intel_plane_state(plane->state)->visible)
                        continue;
+
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
 
@@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
                uint16_t plane_blocks, y_plane_blocks = 0;
                int id = skl_wm_plane_id(intel_plane);
 
-               if (pstate->fb == NULL)
+               if (!to_intel_plane_state(pstate)->visible)
                        continue;
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        continue;
@@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 {
        struct drm_plane *plane = &intel_plane->base;
        struct drm_framebuffer *fb = plane->state->fb;
+       struct intel_plane_state *intel_pstate =
+                                       to_intel_plane_state(plane->state);
        uint32_t latency = dev_priv->wm.skl_latency[level];
        uint32_t method1, method2;
        uint32_t plane_bytes_per_line, plane_blocks_per_line;
        uint32_t res_blocks, res_lines;
        uint32_t selected_result;
        uint8_t cpp;
+       uint32_t width = 0, height = 0;
 
-       if (latency == 0 || !cstate->base.active || !fb)
+       if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
                return false;
 
+       width = drm_rect_width(&intel_pstate->src) >> 16;
+       height = drm_rect_height(&intel_pstate->src) >> 16;
+
+       if (intel_rotation_90_or_270(plane->state->rotation))
+               swap(width, height);
+
        cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
                                 cpp, latency);
        method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
-                                cstate->pipe_src_w,
-                                cpp, fb->modifier[0],
+                                width,
+                                cpp,
+                                fb->modifier[0],
                                 latency);
 
-       plane_bytes_per_line = cstate->pipe_src_w * cpp;
+       plane_bytes_per_line = width * cpp;
        plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
        if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
index 45ce45a5e122046047b2fadc75f5f6f89f34c379..9121646d7c4dd98c4c32ed74343948e662059d06 100644 (file)
@@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
 
        /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
        tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
+       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
            IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
                tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
        WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
+       /* This is tied to WaForceContextSaveRestoreNonCoherent */
+       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
                /*
                 *Use Force Non-Coherent whenever executing a 3D context. This
                 * is a workaround for a possible hang in the unlikely event
@@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj = ringbuf->obj;
+       /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+       unsigned flags = PIN_OFFSET_BIAS | 4096;
        int ret;
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
                if (ret)
                        return ret;
 
@@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                        return -ENOMEM;
                }
        } else {
-               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
+               ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
+                                           flags | PIN_MAPPABLE);
                if (ret)
                        return ret;
 
@@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
                if (unlikely(total_bytes > remain_usable)) {
                        /*
                         * The base request will fit but the reserved space
-                        * falls off the end. So only need to to wait for the
-                        * reserved size after flushing out the remainder.
+                        * falls off the end. So don't need an immediate wrap
+                        * and only need to effectively wait for the reserved
+                        * size space from the start of ringbuffer.
                         */
                        wait_bytes = remain_actual + ringbuf->reserved_size;
-                       need_wrap = true;
                } else if (total_bytes > ringbuf->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
index 436d8f2b86823d30bd7f33bc2247d7d905a787ab..68b6f69aa6820d88952173e4aca801ef56370a7c 100644 (file)
@@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
        } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
                dev_priv->uncore.funcs.force_wake_get =
                        fw_domains_get_with_thread_status;
-               dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+               if (IS_HASWELL(dev))
+                       dev_priv->uncore.funcs.force_wake_put =
+                               fw_domains_put_with_fifo;
+               else
+                       dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
                fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
                               FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
        } else if (IS_IVYBRIDGE(dev)) {
index ae96ebc490fb2b80cbed26bacca113325c52fc82..e81aefe5ffa7c3c278cdda8253cdd228f228369e 100644 (file)
@@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
                break;
        default:
                if (disp->dithering_mode) {
+                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
                        drm_object_attach_property(&connector->base,
                                                   disp->dithering_mode,
                                                   nv_connector->
                                                   dithering_mode);
-                       nv_connector->dithering_mode = DITHERING_MODE_AUTO;
                }
                if (disp->dithering_depth) {
+                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
                        drm_object_attach_property(&connector->base,
                                                   disp->dithering_depth,
                                                   nv_connector->
                                                   dithering_depth);
-                       nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
                }
                break;
        }
index c56a886229f196e86340a726e5b64431ba30dac2..b2de290da16feedf3834f31a32879ab315a586b5 100644 (file)
@@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
 
        gf100_gr_mmio(gr, gr->func->mmio);
 
+       nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
+
        memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
        for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
                do {
index fd8c4d317e60c12c164b49b4142a6c6e3d7dabf4..95f4fea893021cb8f233244404fed6878742dd3b 100644 (file)
@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
        return radeon_atpx_priv.atpx_detected;
 }
 
-bool radeon_has_atpx_dgpu_power_cntl(void) {
-       return radeon_atpx_priv.atpx.functions.power_cntl;
-}
-
 /**
  * radeon_atpx_call - call an ATPX method
  *
@@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
  */
 static int radeon_atpx_validate(struct radeon_atpx *atpx)
 {
+       /* make sure required functions are enabled */
+       /* dGPU power control is required */
+       if (atpx->functions.power_cntl == false) {
+               printk("ATPX dGPU power cntl not present, forcing\n");
+               atpx->functions.power_cntl = true;
+       }
+
        if (atpx->functions.px_params) {
                union acpi_object *info;
                struct atpx_px_params output;
index cfcc099c537d0475da7eb6d6ce4576cc7a116b98..81a63d7f5cd9df6e41ef7748d4635a2cddf5b508 100644 (file)
@@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                   rdev->mode_info.dither_property,
                                                   RADEON_FMT_DITHER_DISABLE);
 
-                       if (radeon_audio != 0)
+                       if (radeon_audio != 0) {
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
+                       }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.output_csc_property,
@@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
@@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
@@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
                                drm_object_attach_property(&radeon_connector->base.base,
                                                           rdev->mode_info.audio_property,
                                                           RADEON_AUDIO_AUTO);
+                               radeon_connector->audio = RADEON_AUDIO_AUTO;
                        }
                        if (ASIC_IS_DCE5(rdev))
                                drm_object_attach_property(&radeon_connector->base.base,
index 4fd1a961012d8ff314386765edf72d0662660fe8..d0826fb0434c45f35647c5a41288c17634c3c95f 100644 (file)
@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
        "LAST",
 };
 
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
 #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
 #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
 
@@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
        }
        rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
 
-       DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
-               radeon_family_name[rdev->family], pdev->vendor, pdev->device,
-               pdev->subsystem_vendor, pdev->subsystem_device);
+       DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+                radeon_family_name[rdev->family], pdev->vendor, pdev->device,
+                pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
 
        /* mutex initialization are all done here so we
         * can recall function without having locking issues */
@@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
         * ignore it */
        vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 
-       if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+       if (rdev->flags & RADEON_IS_PX)
                runtime = true;
        vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
        if (runtime)
index 7dddfdce85e6be56f5d6bab8787fad82aa120809..90f739478a1b5257b488d120245d77d4cf561a70 100644 (file)
@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
 
+       if (radeon_ttm_tt_has_userptr(bo->ttm))
+               return -EPERM;
        return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
 }
 
index af4df81c4e0c79c721dca5dc610da235d53651f9..e6abc09b67e3e63e82fea3bbda2b1c91234c525c 100644 (file)
@@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
        { 0, 0, 0, 0 },
 };
 
index 5acf346e048e3bb45fc33d97eeecd1321bf2ef9d..99eb1c1a3b7ba5905584bc9afea020b101806dcb 100644 (file)
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
        struct mlx5_core_dev *mdev = dev->mdev;
        struct mlx5_hca_vport_context *rep;
-       int max_mtu;
-       int oper_mtu;
+       u16 max_mtu;
+       u16 oper_mtu;
        int err;
        u8 ib_link_width_oper;
        u8 vl_hw_cap;
index e8a84d12b7fffe812cd329a88da26f6922c219af..1142a93dd90b0ad8d25b6dbbd15cf8f9b4ee11b1 100644 (file)
@@ -153,6 +153,7 @@ static const struct xpad_device {
        { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+       { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
        { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
        { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
@@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
        XPAD_XBOX360_VENDOR(0x046d),            /* Logitech X-Box 360 style controllers */
        XPAD_XBOX360_VENDOR(0x0738),            /* Mad Catz X-Box 360 controllers */
        { USB_DEVICE(0x0738, 0x4540) },         /* Mad Catz Beat Pad */
+       XPAD_XBOXONE_VENDOR(0x0738),            /* Mad Catz FightStick TE 2 */
        XPAD_XBOX360_VENDOR(0x0e6f),            /* 0x0e6f X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* X-Box 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
index d5994a745ffa5b5f4d1b411fec76f0a447a60f94..9829363345372add0e3a5fd6812ff49a6d527aba 100644 (file)
@@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
        input_set_drvdata(haptics->input_dev, haptics);
 
        haptics->input_dev->name = "arizona:haptics";
-       haptics->input_dev->dev.parent = pdev->dev.parent;
        haptics->input_dev->close = arizona_haptics_close;
        __set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
 
index 3f02e0e03d128afaee7439e509f00d22f23e9e59..67aab86048ad73faa1f1af16c1ae1663cf4500f1 100644 (file)
@@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
        if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
                kpd_delay = 15625;
 
-       if (kpd_delay > 62500 || kpd_delay == 0) {
+       /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
+       if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
                dev_err(&pdev->dev, "invalid power key trigger delay\n");
                return -EINVAL;
        }
@@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
        pwr->name = "pmic8xxx_pwrkey";
        pwr->phys = "pmic8xxx_pwrkey/input0";
 
-       delay = (kpd_delay << 10) / USEC_PER_SEC;
-       delay = 1 + ilog2(delay);
+       delay = (kpd_delay << 6) / USEC_PER_SEC;
+       delay = ilog2(delay);
 
        err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
        if (err < 0) {
index 10c4e3d462f112f15ec9843093c5f988d44780b9..caa5a62c42fbe0b55e0230931d7d833922fdaab9 100644 (file)
@@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
 
        info->input_dev->name = "twl4030:vibrator";
        info->input_dev->id.version = 1;
-       info->input_dev->dev.parent = pdev->dev.parent;
        info->input_dev->close = twl4030_vibra_close;
        __set_bit(FF_RUMBLE, info->input_dev->ffbit);
 
index ea63fad48de643be287648820c52bb7911771957..53e33fab3f7afadef16e97a76271752d9d6d891c 100644 (file)
@@ -45,7 +45,6 @@
 struct vibra_info {
        struct device *dev;
        struct input_dev *input_dev;
-       struct workqueue_struct *workqueue;
        struct work_struct play_work;
        struct mutex mutex;
        int irq;
@@ -213,11 +212,7 @@ static int vibra_play(struct input_dev *input, void *data,
        info->strong_speed = effect->u.rumble.strong_magnitude;
        info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
 
-       ret = queue_work(info->workqueue, &info->play_work);
-       if (!ret) {
-               dev_info(&input->dev, "work is already on queue\n");
-               return ret;
-       }
+       schedule_work(&info->play_work);
 
        return 0;
 }
@@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
 
        info->input_dev->name = "twl6040:vibrator";
        info->input_dev->id.version = 1;
-       info->input_dev->dev.parent = pdev->dev.parent;
        info->input_dev->close = twl6040_vibra_close;
        __set_bit(FF_RUMBLE, info->input_dev->ffbit);
 
index 3a7f3a4a439635064a4e8ca78e1e7860b9072a47..7c18249d6c8e820e3fe86ffa66196164dbef2d3f 100644 (file)
@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
                goto err_free_buf;
        }
 
+       /* Sanity check that a device has an endpoint */
+       if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
+               dev_err(&usbinterface->dev,
+                       "Invalid number of endpoints\n");
+               error = -EINVAL;
+               goto err_free_urb;
+       }
+
        /*
         * The endpoint is always altsetting 0, we know this since we know
         * this device only has one interrupt endpoint
@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
         * HID report descriptor
         */
        if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
-                                    HID_DEVICE_TYPE, &hid_desc) != 0){
+                                    HID_DEVICE_TYPE, &hid_desc) != 0) {
                dev_err(&usbinterface->dev,
                        "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
                error = -EIO;
index 374c129219ef0c48c8ff3004662c413d78ad2b4c..5efadad4615bf6d68c215897b15adbf9eab80b53 100644 (file)
@@ -92,6 +92,7 @@ struct iommu_dev_data {
        struct list_head dev_data_list;   /* For global dev_data_list */
        struct protection_domain *domain; /* Domain the device is bound to */
        u16 devid;                        /* PCI Device ID */
+       u16 alias;                        /* Alias Device ID */
        bool iommu_v2;                    /* Device can make use of IOMMUv2 */
        bool passthrough;                 /* Device is identity mapped */
        struct {
@@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
        return container_of(dom, struct protection_domain, domain);
 }
 
+static inline u16 get_device_id(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+
+       return PCI_DEVID(pdev->bus->number, pdev->devfn);
+}
+
 static struct iommu_dev_data *alloc_dev_data(u16 devid)
 {
        struct iommu_dev_data *dev_data;
@@ -203,6 +211,68 @@ out_unlock:
        return dev_data;
 }
 
+static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+       *(u16 *)data = alias;
+       return 0;
+}
+
+static u16 get_alias(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       u16 devid, ivrs_alias, pci_alias;
+
+       devid = get_device_id(dev);
+       ivrs_alias = amd_iommu_alias_table[devid];
+       pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
+
+       if (ivrs_alias == pci_alias)
+               return ivrs_alias;
+
+       /*
+        * DMA alias showdown
+        *
+        * The IVRS is fairly reliable in telling us about aliases, but it
+        * can't know about every screwy device.  If we don't have an IVRS
+        * reported alias, use the PCI reported alias.  In that case we may
+        * still need to initialize the rlookup and dev_table entries if the
+        * alias is to a non-existent device.
+        */
+       if (ivrs_alias == devid) {
+               if (!amd_iommu_rlookup_table[pci_alias]) {
+                       amd_iommu_rlookup_table[pci_alias] =
+                               amd_iommu_rlookup_table[devid];
+                       memcpy(amd_iommu_dev_table[pci_alias].data,
+                              amd_iommu_dev_table[devid].data,
+                              sizeof(amd_iommu_dev_table[pci_alias].data));
+               }
+
+               return pci_alias;
+       }
+
+       pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+               "for device %s[%04x:%04x], kernel reported alias "
+               "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+               PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+               PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+               PCI_FUNC(pci_alias));
+
+       /*
+        * If we don't have a PCI DMA alias and the IVRS alias is on the same
+        * bus, then the IVRS table may know about a quirk that we don't.
+        */
+       if (pci_alias == devid &&
+           PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+               pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+               pdev->dma_alias_devfn = ivrs_alias & 0xff;
+               pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+                       PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
+                       dev_name(dev));
+       }
+
+       return ivrs_alias;
+}
+
 static struct iommu_dev_data *find_dev_data(u16 devid)
 {
        struct iommu_dev_data *dev_data;
@@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
        return dev_data;
 }
 
-static inline u16 get_device_id(struct device *dev)
-{
-       struct pci_dev *pdev = to_pci_dev(dev);
-
-       return PCI_DEVID(pdev->bus->number, pdev->devfn);
-}
-
 static struct iommu_dev_data *get_dev_data(struct device *dev)
 {
        return dev->archdata.iommu;
@@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
        if (!dev_data)
                return -ENOMEM;
 
+       dev_data->alias = get_alias(dev);
+
        if (pci_iommuv2_capable(pdev)) {
                struct amd_iommu *iommu;
 
@@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
        u16 devid, alias;
 
        devid = get_device_id(dev);
-       alias = amd_iommu_alias_table[devid];
+       alias = get_alias(dev);
 
        memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
        memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
@@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
        int ret;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
 
        ret = iommu_flush_dte(iommu, dev_data->devid);
        if (!ret && alias != dev_data->devid)
@@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
        bool ats;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
        ats   = dev_data->ats.enabled;
 
        /* Update data structures */
@@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
                return;
 
        iommu = amd_iommu_rlookup_table[dev_data->devid];
-       alias = amd_iommu_alias_table[dev_data->devid];
+       alias = dev_data->alias;
 
        /* decrease reference counters */
        dev_data->domain->dev_iommu[iommu->index] -= 1;
index 2409e3bd3df21e973db1231360b7a50fba66efa1..7c39ac4b9c537df09128a0c106d3bfbd009f7e7f 100644 (file)
@@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        if (smmu_domain->smmu)
                goto out_unlock;
 
+       /* We're bypassing these SIDs, so don't allocate an actual context */
+       if (domain->type == IOMMU_DOMAIN_DMA) {
+               smmu_domain->smmu = smmu;
+               goto out_unlock;
+       }
+
        /*
         * Mapping the requested stage onto what we support is surprisingly
         * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
        void __iomem *cb_base;
        int irq;
 
-       if (!smmu)
+       if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
                return;
 
        /*
@@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
        struct arm_smmu_device *smmu = smmu_domain->smmu;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
-       /* Devices in an IOMMU group may already be configured */
-       ret = arm_smmu_master_configure_smrs(smmu, cfg);
-       if (ret)
-               return ret == -EEXIST ? 0 : ret;
-
        /*
         * FIXME: This won't be needed once we have IOMMU-backed DMA ops
-        * for all devices behind the SMMU.
+        * for all devices behind the SMMU. Note that we need to take
+        * care configuring SMRs for devices both a platform_device and
+        * and a PCI device (i.e. a PCI host controller)
         */
        if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
                return 0;
 
+       /* Devices in an IOMMU group may already be configured */
+       ret = arm_smmu_master_configure_smrs(smmu, cfg);
+       if (ret)
+               return ret == -EEXIST ? 0 : ret;
+
        for (i = 0; i < cfg->num_streamids; ++i) {
                u32 idx, s2cr;
 
index 94a30da0cfacac7d48e7e157b7ae7cd803708b50..4dffccf532a2173ce17f289aeeb0f69bdfac4dc3 100644 (file)
@@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
        gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
        /* Update the pcpu_masks */
-       for (i = 0; i < gic_vpes; i++)
+       for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
                clear_bit(irq, pcpu_masks[i].pcpu_mask);
        set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
 
@@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
        spin_lock_irqsave(&gic_lock, flags);
        gic_map_to_pin(intr, gic_cpu_pin);
        gic_map_to_vpe(intr, vpe);
-       for (i = 0; i < gic_vpes; i++)
+       for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
                clear_bit(intr, pcpu_masks[i].pcpu_mask);
        set_bit(intr, pcpu_masks[vpe].pcpu_mask);
        spin_unlock_irqrestore(&gic_lock, flags);
index 0d29b5a6356d729a6ceb987bccc8c2dbe6779823..99e5f9751e8b1746835b28c4bd4e2a5d1b53fd14 100644 (file)
@@ -715,6 +715,9 @@ base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
        if (!maddr || maddr->family != AF_ISDN)
                return -EINVAL;
 
+       if (addr_len < sizeof(struct sockaddr_mISDN))
+               return -EINVAL;
+
        lock_sock(sk);
 
        if (_pms(sk)->dev) {
index 2a1ba62b7da20ff921ec36005a8b4d40ec17756b..befd67df08e1f26410e0817cab1506183464b522 100644 (file)
@@ -62,9 +62,8 @@ config DUMMY
          this device is consigned into oblivion) with a configurable IP
          address. It is most commonly used in order to make your currently
          inactive SLIP address seem like a real address for local programs.
-         If you use SLIP or PPP, you might want to say Y here. Since this
-         thing often comes in handy, the default is Y. It won't enlarge your
-         kernel either. What a deal. Read about it in the Network
+         If you use SLIP or PPP, you might want to say Y here. It won't
+         enlarge your kernel. What a deal. Read about it in the Network
          Administrator's Guide, available from
          <http://www.tldp.org/docs.html#guide>.
 
@@ -195,6 +194,7 @@ config GENEVE
 
 config MACSEC
        tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
+       select CRYPTO
        select CRYPTO_AES
        select CRYPTO_GCM
        ---help---
index 50454be86570d61c40863ca8cad899c2034a316b..a2904029cccc2949b90d66cc8fb7d3fcbc11f103 100644 (file)
@@ -2181,27 +2181,10 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
                               struct net_device *bridge)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       u16 fid;
        int i, err;
 
        mutex_lock(&ps->smi_mutex);
 
-       /* Get or create the bridge FID and assign it to the port */
-       for (i = 0; i < ps->num_ports; ++i)
-               if (ps->ports[i].bridge_dev == bridge)
-                       break;
-
-       if (i < ps->num_ports)
-               err = _mv88e6xxx_port_fid_get(ds, i, &fid);
-       else
-               err = _mv88e6xxx_fid_new(ds, &fid);
-       if (err)
-               goto unlock;
-
-       err = _mv88e6xxx_port_fid_set(ds, port, fid);
-       if (err)
-               goto unlock;
-
        /* Assign the bridge and remap each port's VLANTable */
        ps->ports[port].bridge_dev = bridge;
 
@@ -2213,7 +2196,6 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
                }
        }
 
-unlock:
        mutex_unlock(&ps->smi_mutex);
 
        return err;
@@ -2223,16 +2205,10 @@ void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        struct net_device *bridge = ps->ports[port].bridge_dev;
-       u16 fid;
        int i;
 
        mutex_lock(&ps->smi_mutex);
 
-       /* Give the port a fresh Filtering Information Database */
-       if (_mv88e6xxx_fid_new(ds, &fid) ||
-           _mv88e6xxx_port_fid_set(ds, port, fid))
-               netdev_warn(ds->ports[port], "failed to assign a new FID\n");
-
        /* Unassign the bridge and remap each port's VLANTable */
        ps->ports[port].bridge_dev = NULL;
 
@@ -2476,9 +2452,9 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * the other bits clear.
         */
        reg = 1 << port;
-       /* Disable learning for DSA and CPU ports */
-       if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
-               reg = PORT_ASSOC_VECTOR_LOCKED_PORT;
+       /* Disable learning for CPU port */
+       if (dsa_is_cpu_port(ds, port))
+               reg = 0;
 
        ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
        if (ret)
@@ -2558,11 +2534,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        if (ret)
                goto abort;
 
-       /* Port based VLAN map: give each port its own address
+       /* Port based VLAN map: give each port the same default address
         * database, and allow bidirectional communication between the
         * CPU and DSA port(s), and the other ports.
         */
-       ret = _mv88e6xxx_port_fid_set(ds, port, port + 1);
+       ret = _mv88e6xxx_port_fid_set(ds, port, 0);
        if (ret)
                goto abort;
 
index 8f76f4558a88c15b0a14bb6ec3d9fb769fea9d0a..2ff465848b6553ecc0a646421e30cded0e8fb053 100644 (file)
@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = -EIO;
 
-       netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
+       netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
        netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
 
        /* Init PHY as early as possible due to power saving issue  */
index 99b30a952b38736a825efc5149aa9027f6fae64d..38db2e4d7d540d27748ca05748c412b4a4d0215b 100644 (file)
@@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core)
                dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
        }
 
+       /* This (reset &) enable is not preset in specs or reference driver but
+        * Broadcom does it in arch PCI code when enabling fake PCI device.
+        */
+       bcma_core_enable(core, 0);
+
        /* Allocation and references */
        net_dev = alloc_etherdev(sizeof(*bgmac));
        if (!net_dev)
index 4fbb093e0d844031dd86cc3d7253eb44d10686d6..9a03c142b742502051b7e98a0a30c600684d8f80 100644 (file)
 #define  BGMAC_CMDCFG_TAI                      0x00000200
 #define  BGMAC_CMDCFG_HD                       0x00000400      /* Set if in half duplex mode */
 #define  BGMAC_CMDCFG_HD_SHIFT                 10
-#define  BGMAC_CMDCFG_SR_REV0                  0x00000800      /* Set to reset mode, for other revs */
-#define  BGMAC_CMDCFG_SR_REV4                  0x00002000      /* Set to reset mode, only for core rev 4 */
-#define  BGMAC_CMDCFG_SR(rev)  ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
+#define  BGMAC_CMDCFG_SR_REV0                  0x00000800      /* Set to reset mode, for core rev 0-3 */
+#define  BGMAC_CMDCFG_SR_REV4                  0x00002000      /* Set to reset mode, for core rev >= 4 */
+#define  BGMAC_CMDCFG_SR(rev)  ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
 #define  BGMAC_CMDCFG_ML                       0x00008000      /* Set to activate mac loopback mode */
 #define  BGMAC_CMDCFG_AE                       0x00400000
 #define  BGMAC_CMDCFG_CFE                      0x00800000
index cf6445d148ca5098c5ed4a7ab443022fe212ae27..44ad1490b4726175ea8f5697b3a759e5cc7befcb 100644 (file)
@@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
                else
                        p = (char *)priv;
                p += s->stat_offset;
-               data[i] = *(u32 *)p;
+               if (sizeof(unsigned long) != sizeof(u32) &&
+                   s->stat_sizeof == sizeof(unsigned long))
+                       data[i] = *(unsigned long *)p;
+               else
+                       data[i] = *(u32 *)p;
        }
 }
 
index 967951582e033d8a34a3463d5047f15bb7c26219..d20539a6d162bd5f0dbbd9c79d89e6784e37b969 100644 (file)
@@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx)
                }
 
                lmac++;
-               if (lmac == MAX_LMAC_PER_BGX)
+               if (lmac == MAX_LMAC_PER_BGX) {
+                       of_node_put(node);
                        break;
+               }
        }
-       of_node_put(node);
        return 0;
 
 defer:
index 984a3cc26f86ddb7a194b4738a3dd7e018b0bc33..326d4009525e1abf5cae7674c29b65a7d76af66a 100644 (file)
@@ -1451,6 +1451,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 *valp);
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+              unsigned int vf, unsigned int iqtype, unsigned int iqid,
+              unsigned int fl0id, unsigned int fl1id);
 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
               unsigned int vf, unsigned int iqtype, unsigned int iqid,
               unsigned int fl0id, unsigned int fl1id);
index 13b144bcf725ec29d42dbdc1b33fc0c39c1fe919..6278e5a74b74d3f538f60e327e635d9ba9b9c0cd 100644 (file)
@@ -2981,14 +2981,28 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
 void t4_free_sge_resources(struct adapter *adap)
 {
        int i;
-       struct sge_eth_rxq *eq = adap->sge.ethrxq;
-       struct sge_eth_txq *etq = adap->sge.ethtxq;
+       struct sge_eth_rxq *eq;
+       struct sge_eth_txq *etq;
+
+       /* stop all Rx queues in order to start them draining */
+       for (i = 0; i < adap->sge.ethqsets; i++) {
+               eq = &adap->sge.ethrxq[i];
+               if (eq->rspq.desc)
+                       t4_iq_stop(adap, adap->mbox, adap->pf, 0,
+                                  FW_IQ_TYPE_FL_INT_CAP,
+                                  eq->rspq.cntxt_id,
+                                  eq->fl.size ? eq->fl.cntxt_id : 0xffff,
+                                  0xffff);
+       }
 
        /* clean up Ethernet Tx/Rx queues */
-       for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+       for (i = 0; i < adap->sge.ethqsets; i++) {
+               eq = &adap->sge.ethrxq[i];
                if (eq->rspq.desc)
                        free_rspq_fl(adap, &eq->rspq,
                                     eq->fl.size ? &eq->fl : NULL);
+
+               etq = &adap->sge.ethtxq[i];
                if (etq->q.desc) {
                        t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
                                       etq->q.cntxt_id);
index cc1736bece0faabe22f04a828f7d1740bf9462de..71586a3e0f613a83118281e5eb80f369d4c567c9 100644 (file)
@@ -2557,6 +2557,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
+#define VPD_SIZE           0x800
 #define VPD_BASE           0x400
 #define VPD_BASE_OLD       0
 #define VPD_LEN            1024
@@ -2594,6 +2595,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
        if (!vpd)
                return -ENOMEM;
 
+       /* We have two VPD data structures stored in the adapter VPD area.
+        * By default, Linux calculates the size of the VPD area by traversing
+        * the first VPD area at offset 0x0, so we need to tell the OS what
+        * our real VPD size is.
+        */
+       ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
+       if (ret < 0)
+               goto out;
+
        /* Card information normally starts at VPD_BASE but early cards had
         * it at 0.
         */
@@ -6939,6 +6949,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *     t4_iq_stop - stop an ingress queue and its FLs
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *     @pf: the PF owning the queues
+ *     @vf: the VF owning the queues
+ *     @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ *     @iqid: ingress queue id
+ *     @fl0id: FL0 queue id or 0xffff if no attached FL0
+ *     @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *     Stops an ingress queue and its associated FLs, if any.  This causes
+ *     any current or future data/messages destined for these queues to be
+ *     tossed.
+ */
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+              unsigned int vf, unsigned int iqtype, unsigned int iqid,
+              unsigned int fl0id, unsigned int fl1id)
+{
+       struct fw_iq_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+                                 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+                                 FW_IQ_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
+       c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+       c.iqid = cpu_to_be16(iqid);
+       c.fl0id = cpu_to_be16(fl0id);
+       c.fl1id = cpu_to_be16(fl1id);
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_iq_free - free an ingress queue and its FLs
  *     @adap: the adapter
index 62ccebc5f7288ceb79262b7c01f6162fd675ac20..8cf943db56627db406b7a5c76b701d72fbc540f5 100644 (file)
@@ -1223,18 +1223,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                if (err)
                        return err;
 
-               /* verify upper 16 bits are zero */
-               if (vid >> 16)
-                       return FM10K_ERR_PARAM;
-
                set = !(vid & FM10K_VLAN_CLEAR);
                vid &= ~FM10K_VLAN_CLEAR;
 
-               err = fm10k_iov_select_vid(vf_info, (u16)vid);
-               if (err < 0)
-                       return err;
+               /* if the length field has been set, this is a multi-bit
+                * update request. For multi-bit requests, simply disallow
+                * them when the pf_vid has been set. In this case, the PF
+                * should have already cleared the VLAN_TABLE, and if we
+                * allowed them, it could allow a rogue VF to receive traffic
+                * on a VLAN it was not assigned. In the single-bit case, we
+                * need to modify requests for VLAN 0 to use the default PF or
+                * SW vid when assigned.
+                */
 
-               vid = err;
+               if (vid >> 16) {
+                       /* prevent multi-bit requests when PF has
+                        * administratively set the VLAN for this VF
+                        */
+                       if (vf_info->pf_vid)
+                               return FM10K_ERR_PARAM;
+               } else {
+                       err = fm10k_iov_select_vid(vf_info, (u16)vid);
+                       if (err < 0)
+                               return err;
+
+                       vid = err;
+               }
 
                /* update VSI info for VF in regards to VLAN table */
                err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
index 084d0ab316b796b5d5af2ab87b7a5bee193b5a84..6a49b7ae511c1d0440326ba2cd43d477d6738a7c 100644 (file)
@@ -2594,35 +2594,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 }
 
 /**
- * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40e_chk_linearize(struct sk_buff *skb)
 {
        const struct skb_frag_struct *frag, *stale;
-       int gso_size, nr_frags, sum;
-
-       /* check to see if TSO is enabled, if so we may get a repreive */
-       gso_size = skb_shinfo(skb)->gso_size;
-       if (unlikely(!gso_size))
-               return true;
+       int nr_frags, sum;
 
-       /* no need to check if number of frags is less than 8 */
+       /* no need to check if number of frags is less than 7 */
        nr_frags = skb_shinfo(skb)->nr_frags;
-       if (nr_frags < I40E_MAX_BUFFER_TXD)
+       if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
                return false;
 
        /* We need to walk through the list and validate that each group
         * of 6 fragments totals at least gso_size.  However we don't need
-        * to perform such validation on the first or last 6 since the first
-        * 6 cannot inherit any data from a descriptor before them, and the
-        * last 6 cannot inherit any data from a descriptor after them.
+        * to perform such validation on the last 6 since the last 6 cannot
+        * inherit any data from a descriptor after them.
         */
-       nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+       nr_frags -= I40E_MAX_BUFFER_TXD - 2;
        frag = &skb_shinfo(skb)->frags[0];
 
        /* Initialize size to the negative value of gso_size minus 1.  We
@@ -2631,21 +2630,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
         * descriptors for a single transmit as the header and previous
         * fragment are already consuming 2 descriptors.
         */
-       sum = 1 - gso_size;
+       sum = 1 - skb_shinfo(skb)->gso_size;
 
-       /* Add size of frags 1 through 5 to create our initial sum */
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
+       /* Add size of frags 0 through 4 to create our initial sum */
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
 
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
        stale = &skb_shinfo(skb)->frags[0];
        for (;;) {
-               sum += skb_frag_size(++frag);
+               sum += skb_frag_size(frag++);
 
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
@@ -2655,7 +2654,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
                if (!--nr_frags)
                        break;
 
-               sum -= skb_frag_size(++stale);
+               sum -= skb_frag_size(stale++);
        }
 
        return false;
index cdd5dc00aec519972dd3af6166666a0d3c65766f..a9bd70537d6547b550a591b7775a8fcf4c76f186 100644 (file)
@@ -413,10 +413,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-       /* we can only support up to 8 data buffers for a single send */
-       if (likely(count <= I40E_MAX_BUFFER_TXD))
+       /* Both TSO and single send will work if count is less than 8 */
+       if (likely(count < I40E_MAX_BUFFER_TXD))
                return false;
 
-       return __i40e_chk_linearize(skb);
+       if (skb_is_gso(skb))
+               return __i40e_chk_linearize(skb);
+
+       /* we can support up to 8 data buffers for a single send */
+       return count != I40E_MAX_BUFFER_TXD;
 }
 #endif /* _I40E_TXRX_H_ */
index ebcc25c05796d7d42372395ba9f8d3bf838d547a..cea97daa844c9a362417d253d10273b364b0c7a9 100644 (file)
@@ -1796,35 +1796,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
 }
 
 /**
- * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
+ * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
  * @skb:      send buffer
  *
- * Note: Our HW can't scatter-gather more than 8 fragments to build
- * a packet on the wire and so we need to figure out the cases where we
- * need to linearize the skb.
+ * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
+ * and so we need to figure out the cases where we need to linearize the skb.
+ *
+ * For TSO we need to count the TSO header and segment payload separately.
+ * As such we need to check cases where we have 7 fragments or more as we
+ * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
+ * the segment payload in the first descriptor, and another 7 for the
+ * fragments.
  **/
 bool __i40evf_chk_linearize(struct sk_buff *skb)
 {
        const struct skb_frag_struct *frag, *stale;
-       int gso_size, nr_frags, sum;
-
-       /* check to see if TSO is enabled, if so we may get a repreive */
-       gso_size = skb_shinfo(skb)->gso_size;
-       if (unlikely(!gso_size))
-               return true;
+       int nr_frags, sum;
 
-       /* no need to check if number of frags is less than 8 */
+       /* no need to check if number of frags is less than 7 */
        nr_frags = skb_shinfo(skb)->nr_frags;
-       if (nr_frags < I40E_MAX_BUFFER_TXD)
+       if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
                return false;
 
        /* We need to walk through the list and validate that each group
         * of 6 fragments totals at least gso_size.  However we don't need
-        * to perform such validation on the first or last 6 since the first
-        * 6 cannot inherit any data from a descriptor before them, and the
-        * last 6 cannot inherit any data from a descriptor after them.
+        * to perform such validation on the last 6 since the last 6 cannot
+        * inherit any data from a descriptor after them.
         */
-       nr_frags -= I40E_MAX_BUFFER_TXD - 1;
+       nr_frags -= I40E_MAX_BUFFER_TXD - 2;
        frag = &skb_shinfo(skb)->frags[0];
 
        /* Initialize size to the negative value of gso_size minus 1.  We
@@ -1833,21 +1832,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
         * descriptors for a single transmit as the header and previous
         * fragment are already consuming 2 descriptors.
         */
-       sum = 1 - gso_size;
+       sum = 1 - skb_shinfo(skb)->gso_size;
 
-       /* Add size of frags 1 through 5 to create our initial sum */
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
-       sum += skb_frag_size(++frag);
+       /* Add size of frags 0 through 4 to create our initial sum */
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
+       sum += skb_frag_size(frag++);
 
        /* Walk through fragments adding latest fragment, testing it, and
         * then removing stale fragments from the sum.
         */
        stale = &skb_shinfo(skb)->frags[0];
        for (;;) {
-               sum += skb_frag_size(++frag);
+               sum += skb_frag_size(frag++);
 
                /* if sum is negative we failed to make sufficient progress */
                if (sum < 0)
@@ -1857,7 +1856,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
                if (!--nr_frags)
                        break;
 
-               sum -= skb_frag_size(++stale);
+               sum -= skb_frag_size(stale++);
        }
 
        return false;
index c1dd8c5c966697bc79ae47babbab8a598d20ef18..0429553fe8870cb7f38ae07ab13bb0c94d07dbe0 100644 (file)
@@ -395,10 +395,14 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  **/
 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
 {
-       /* we can only support up to 8 data buffers for a single send */
-       if (likely(count <= I40E_MAX_BUFFER_TXD))
+       /* Both TSO and single send will work if count is less than 8 */
+       if (likely(count < I40E_MAX_BUFFER_TXD))
                return false;
 
-       return __i40evf_chk_linearize(skb);
+       if (skb_is_gso(skb))
+               return __i40evf_chk_linearize(skb);
+
+       /* we can support up to 8 data buffers for a single send */
+       return count != I40E_MAX_BUFFER_TXD;
 }
 #endif /* _I40E_TXRX_H_ */
index f69584a9b47fbef008a1ec138feba9f10504d997..c761194bb32352be84c2610464a9778d1a365c34 100644 (file)
@@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_STATS:
                return bitmap_iterator_count(&it) +
                        (priv->tx_ring_num * 2) +
-                       (priv->rx_ring_num * 2);
+                       (priv->rx_ring_num * 3);
        case ETH_SS_TEST:
                return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
@@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
        for (i = 0; i < priv->rx_ring_num; i++) {
                data[index++] = priv->rx_ring[i]->packets;
                data[index++] = priv->rx_ring[i]->bytes;
+               data[index++] = priv->rx_ring[i]->dropped;
        }
        spin_unlock_bh(&priv->stats_lock);
 
@@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev,
                                "rx%d_packets", i);
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "rx%d_bytes", i);
+                       sprintf(data + (index++) * ETH_GSTRING_LEN,
+                               "rx%d_dropped", i);
                }
                break;
        case ETH_SS_PRIV_FLAGS:
index 3904b5fc0b7c904548763ffbb2d31fbcccfe5b6c..20b6c2e678b8879525ae41c995d5bc7692bfdec5 100644 (file)
@@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        u64 in_mod = reset << 8 | port;
        int err;
        int i, counter_index;
+       unsigned long sw_rx_dropped = 0;
 
        mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
        if (IS_ERR(mailbox))
@@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
        for (i = 0; i < priv->rx_ring_num; i++) {
                stats->rx_packets += priv->rx_ring[i]->packets;
                stats->rx_bytes += priv->rx_ring[i]->bytes;
+               sw_rx_dropped += priv->rx_ring[i]->dropped;
                priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
                priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
                priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete;
@@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
                                          &mlx4_en_stats->MCAST_prio_1,
                                          NUM_PRIORITIES);
        stats->collisions = 0;
-       stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+       stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
+                           sw_rx_dropped;
        stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
        stats->rx_over_errors = 0;
        stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
index 86bcfe510e4e5b930b94de462f5ff9a76d4183c4..b723e3bcab39e83653be5367da514bd876b9d0cd 100644 (file)
@@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
                gfp_t gfp = _gfp;
 
                if (order)
-                       gfp |= __GFP_COMP | __GFP_NOWARN;
+                       gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC;
                page = alloc_pages(gfp, order);
                if (likely(page))
                        break;
@@ -126,7 +126,9 @@ out:
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
                                page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
                        page = page_alloc[i].page;
-                       set_page_count(page, 1);
+                       /* Revert changes done by mlx4_alloc_pages */
+                       page_ref_sub(page, page_alloc[i].page_size /
+                                          priv->frag_info[i].frag_stride - 1);
                        put_page(page);
                }
        }
@@ -176,7 +178,9 @@ out:
                dma_unmap_page(priv->ddev, page_alloc->dma,
                               page_alloc->page_size, PCI_DMA_FROMDEVICE);
                page = page_alloc->page;
-               set_page_count(page, 1);
+               /* Revert changes done by mlx4_alloc_pages */
+               page_ref_sub(page, page_alloc->page_size /
+                                  priv->frag_info[i].frag_stride - 1);
                put_page(page);
                page_alloc->page = NULL;
        }
@@ -939,7 +943,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                /* GRO not possible, complete processing here */
                skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
                if (!skb) {
-                       priv->stats.rx_dropped++;
+                       ring->dropped++;
                        goto next;
                }
 
index c0d7b729623636e241f76213596c664b1e70c047..a386f047c1afb24530e88a48a7183041e29fdb06 100644 (file)
@@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
        u32 packets = 0;
        u32 bytes = 0;
        int factor = priv->cqe_factor;
-       u64 timestamp = 0;
        int done = 0;
        int budget = priv->tx_work_limit;
        u32 last_nr_txbb;
@@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
                new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
 
                do {
+                       u64 timestamp = 0;
+
                        txbbs_skipped += last_nr_txbb;
                        ring_index = (ring_index + last_nr_txbb) & size_mask;
-                       if (ring->tx_info[ring_index].ts_requested)
+
+                       if (unlikely(ring->tx_info[ring_index].ts_requested))
                                timestamp = mlx4_en_get_cqe_ts(cqe);
 
                        /* free next descriptor */
index 358f7230da589fc6e0b8b605255f126ae546d7db..12c77a70abdb451c475680c05bb2b7cf86436cdf 100644 (file)
@@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap
        return 0;
 }
 
+static int mlx4_pci_enable_device(struct mlx4_dev *dev)
+{
+       struct pci_dev *pdev = dev->persist->pdev;
+       int err = 0;
+
+       mutex_lock(&dev->persist->pci_status_mutex);
+       if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
+               err = pci_enable_device(pdev);
+               if (!err)
+                       dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
+       }
+       mutex_unlock(&dev->persist->pci_status_mutex);
+
+       return err;
+}
+
+static void mlx4_pci_disable_device(struct mlx4_dev *dev)
+{
+       struct pci_dev *pdev = dev->persist->pdev;
+
+       mutex_lock(&dev->persist->pci_status_mutex);
+       if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
+               pci_disable_device(pdev);
+               dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
+       }
+       mutex_unlock(&dev->persist->pci_status_mutex);
+}
+
 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
                         int total_vfs, int *nvfs, struct mlx4_priv *priv,
                         int reset_flow)
@@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
 
        pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
 
-       err = pci_enable_device(pdev);
+       err = mlx4_pci_enable_device(&priv->dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
                return err;
@@ -3715,7 +3743,7 @@ err_release_regions:
        pci_release_regions(pdev);
 
 err_disable_pdev:
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(&priv->dev);
        pci_set_drvdata(pdev, NULL);
        return err;
 }
@@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        priv->pci_dev_data = id->driver_data;
        mutex_init(&dev->persist->device_state_mutex);
        mutex_init(&dev->persist->interface_state_mutex);
+       mutex_init(&dev->persist->pci_status_mutex);
 
        ret = devlink_register(devlink, &pdev->dev);
        if (ret)
@@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
        }
 
        pci_release_regions(pdev);
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(dev);
        devlink_unregister(devlink);
        kfree(dev->persist);
        devlink_free(devlink);
@@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
        if (state == pci_channel_io_perm_failure)
                return PCI_ERS_RESULT_DISCONNECT;
 
-       pci_disable_device(pdev);
+       mlx4_pci_disable_device(persist->dev);
        return PCI_ERS_RESULT_NEED_RESET;
 }
 
@@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev  *dev  = persist->dev;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int               ret;
-       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
-       int total_vfs;
+       int err;
 
        mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
-       ret = pci_enable_device(pdev);
-       if (ret) {
-               mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret);
+       err = mlx4_pci_enable_device(dev);
+       if (err) {
+               mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
        pci_set_master(pdev);
        pci_restore_state(pdev);
        pci_save_state(pdev);
+       return PCI_ERS_RESULT_RECOVERED;
+}
 
+static void mlx4_pci_resume(struct pci_dev *pdev)
+{
+       struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+       struct mlx4_dev  *dev  = persist->dev;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+       int total_vfs;
+       int err;
+
+       mlx4_err(dev, "%s was called\n", __func__);
        total_vfs = dev->persist->num_vfs;
        memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
 
        mutex_lock(&persist->interface_state_mutex);
        if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
-               ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
+               err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
                                    priv, 1);
-               if (ret) {
-                       mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n",
-                                __func__,  ret);
+               if (err) {
+                       mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
+                                __func__,  err);
                        goto end;
                }
 
-               ret = restore_current_port_types(dev, dev->persist->
+               err = restore_current_port_types(dev, dev->persist->
                                                 curr_port_type, dev->persist->
                                                 curr_port_poss_type);
-               if (ret)
-                       mlx4_err(dev, "could not restore original port types (%d)\n", ret);
+               if (err)
+                       mlx4_err(dev, "could not restore original port types (%d)\n", err);
        }
 end:
        mutex_unlock(&persist->interface_state_mutex);
 
-       return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
 
 static void mlx4_shutdown(struct pci_dev *pdev)
@@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev)
 static const struct pci_error_handlers mlx4_err_handler = {
        .error_detected = mlx4_pci_err_detected,
        .slot_reset     = mlx4_pci_slot_reset,
+       .resume         = mlx4_pci_resume,
 };
 
 static struct pci_driver mlx4_driver = {
index ef9683101eada7081163b7f6bf1473c18bd63da2..c9d7fc5159f2fb84e62496d810828e65324e7403 100644 (file)
@@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx {
        struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
        int                     init_port_ref[MLX4_MAX_PORTS + 1];
        u16                     max_mtu[MLX4_MAX_PORTS + 1];
+       u8                      pptx;
+       u8                      pprx;
        int                     disable_mcast_ref[MLX4_MAX_PORTS + 1];
        struct mlx4_resource_tracker res_tracker;
        struct workqueue_struct *comm_wq;
index d12ab6a733446c52ba2afb037768bb2dd43f3515..63b1aeae2c037cd585b1473db97134644e188e03 100644 (file)
@@ -323,6 +323,7 @@ struct mlx4_en_rx_ring {
        unsigned long csum_ok;
        unsigned long csum_none;
        unsigned long csum_complete;
+       unsigned long dropped;
        int hwtstamp_rx_filter;
        cpumask_var_t affinity_mask;
 };
index 211c65087997dd5a92cc53a13f9fd3869927d09d..087b23b320cb44ff88dce362f47437de725dfeb5 100644 (file)
@@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                        }
 
                        gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+                       /* Slave cannot change Global Pause configuration */
+                       if (slave != mlx4_master_func_num(dev) &&
+                           ((gen_context->pptx != master->pptx) ||
+                            (gen_context->pprx != master->pprx))) {
+                               gen_context->pptx = master->pptx;
+                               gen_context->pprx = master->pprx;
+                               mlx4_warn(dev,
+                                         "denying Global Pause change for slave:%d\n",
+                                         slave);
+                       } else {
+                               master->pptx = gen_context->pptx;
+                               master->pprx = gen_context->pprx;
+                       }
                        break;
                case MLX4_SET_PORT_GID_TABLE:
                        /* change to MULTIPLE entries: number of guest's gids
index 879e6276c4736cb6f14b49db220e102c66636092..e80ce94b5dcf0e139b629732da3658892ca8590d 100644 (file)
@@ -609,7 +609,7 @@ enum mlx5e_link_mode {
        MLX5E_100GBASE_KR4       = 22,
        MLX5E_100GBASE_LR4       = 23,
        MLX5E_100BASE_TX         = 24,
-       MLX5E_100BASE_T          = 25,
+       MLX5E_1000BASE_T         = 25,
        MLX5E_10GBASE_T          = 26,
        MLX5E_25GBASE_CR         = 27,
        MLX5E_25GBASE_KR         = 28,
index 68834b715f6c114c89d6df339cb9cc81a72b8f75..3476ab844634bd5f4b46f74a484f24c2c62d228d 100644 (file)
@@ -138,10 +138,10 @@ static const struct {
        [MLX5E_100BASE_TX]   = {
                .speed      = 100,
        },
-       [MLX5E_100BASE_T]    = {
-               .supported  = SUPPORTED_100baseT_Full,
-               .advertised = ADVERTISED_100baseT_Full,
-               .speed      = 100,
+       [MLX5E_1000BASE_T]    = {
+               .supported  = SUPPORTED_1000baseT_Full,
+               .advertised = ADVERTISED_1000baseT_Full,
+               .speed      = 1000,
        },
        [MLX5E_10GBASE_T]    = {
                .supported  = SUPPORTED_10000baseT_Full,
index e0adb604f461e00e64952f70a661b62e1af787cc..67d548b70e142a376d0fc802a4576ee965f92520 100644 (file)
@@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
        return 0;
 }
 
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
-       int hw_mtu;
+       u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
        int err;
 
-       err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+       err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
        if (err)
                return err;
 
-       mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+       /* Update vport context MTU */
+       mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
+       return 0;
+}
 
-       if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
-               netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
-                           __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u16 hw_mtu = 0;
+       int err;
 
-       netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+       err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+       if (err || !hw_mtu) /* fallback to port oper mtu */
+               mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+       *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       u16 mtu;
+       int err;
+
+       err = mlx5e_set_mtu(priv, netdev->mtu);
+       if (err)
+               return err;
+
+       mlx5e_query_mtu(priv, &mtu);
+       if (mtu != netdev->mtu)
+               netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+                           __func__, mtu, netdev->mtu);
+
+       netdev->mtu = mtu;
        return 0;
 }
 
@@ -1999,22 +2025,27 @@ static int mlx5e_set_features(struct net_device *netdev,
        return err;
 }
 
+#define MXL5_HW_MIN_MTU 64
+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
+
 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
        bool was_opened;
-       int max_mtu;
+       u16 max_mtu;
+       u16 min_mtu;
        int err = 0;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
        max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+       min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
 
-       if (new_mtu > max_mtu) {
+       if (new_mtu > max_mtu || new_mtu < min_mtu) {
                netdev_err(netdev,
-                          "%s: Bad MTU (%d) > (%d) Max\n",
-                          __func__, new_mtu, max_mtu);
+                          "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
+                          __func__, new_mtu, min_mtu, max_mtu);
                return -EINVAL;
        }
 
@@ -2602,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        schedule_work(&priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
        flush_scheduled_work();
-       unregister_netdev(netdev);
+       if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
+               netif_device_detach(netdev);
+               mutex_lock(&priv->state_lock);
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+                       mlx5e_close_locked(netdev);
+               mutex_unlock(&priv->state_lock);
+       } else {
+               unregister_netdev(netdev);
+       }
+
        mlx5e_tc_cleanup(priv);
        mlx5e_vxlan_cleanup(priv);
        mlx5e_destroy_flow_tables(priv);
@@ -2615,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
-       free_netdev(netdev);
+
+       if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
+               free_netdev(netdev);
 }
 
 static void *mlx5e_get_netdev(void *vpriv)
index 5121be4675d14de3ea8f7722434945caeb307eb9..89cce97d46c606301135d6b4f3fb994a4744b684 100644 (file)
@@ -1065,33 +1065,6 @@ unlock_fg:
        return rule;
 }
 
-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
-                                                 u8 match_criteria_enable,
-                                                 u32 *match_criteria,
-                                                 u32 *match_value,
-                                                 u8 action,
-                                                 u32 flow_tag,
-                                                 struct mlx5_flow_destination *dest)
-{
-       struct mlx5_flow_rule *rule;
-       struct mlx5_flow_group *g;
-
-       g = create_autogroup(ft, match_criteria_enable, match_criteria);
-       if (IS_ERR(g))
-               return (void *)g;
-
-       rule = add_rule_fg(g, match_value,
-                          action, flow_tag, dest);
-       if (IS_ERR(rule)) {
-               /* Remove assumes refcount > 0 and autogroup creates a group
-                * with a refcount = 0.
-                */
-               tree_get_node(&g->node);
-               tree_remove_node(&g->node);
-       }
-       return rule;
-}
-
 static struct mlx5_flow_rule *
 _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
                    u8 match_criteria_enable,
@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
                                goto unlock;
                }
 
-       rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
-                                  match_value, action, flow_tag, dest);
+       g = create_autogroup(ft, match_criteria_enable, match_criteria);
+       if (IS_ERR(g)) {
+               rule = (void *)g;
+               goto unlock;
+       }
+
+       rule = add_rule_fg(g, match_value,
+                          action, flow_tag, dest);
+       if (IS_ERR(rule)) {
+               /* Remove assumes refcount > 0 and autogroup creates a group
+                * with a refcount = 0.
+                */
+               unlock_ref_node(&ft->node);
+               tree_get_node(&g->node);
+               tree_remove_node(&g->node);
+               return rule;
+       }
 unlock:
        unlock_ref_node(&ft->node);
        return rule;
@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
 {
        struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
        int prio;
-       static struct fs_prio *fs_prio;
+       struct fs_prio *fs_prio;
        struct mlx5_flow_namespace *ns;
 
        if (!root_ns)
index 3f3b2fae4991025a1018f4e4e4d87c88b6a30f1a..6892746fd10de9b5d355ef422f3de3c4934cb5b7 100644 (file)
@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        int err;
 
        mutex_lock(&dev->intf_state_mutex);
-       if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+       if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
                dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
                         __func__);
                goto out;
@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        if (err)
                pr_info("failed request module on %s\n", MLX5_IB_MOD);
 
-       dev->interface_state = MLX5_INTERFACE_STATE_UP;
+       clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+       set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
 out:
        mutex_unlock(&dev->intf_state_mutex);
 
@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        }
 
        mutex_lock(&dev->intf_state_mutex);
-       if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+       if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
                dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
                         __func__);
                goto out;
@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        mlx5_cmd_cleanup(dev);
 
 out:
-       dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+       clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+       set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
        mutex_unlock(&dev->intf_state_mutex);
        return err;
 }
@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
        .resume         = mlx5_pci_resume
 };
 
+static void shutdown(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+       struct mlx5_priv *priv = &dev->priv;
+
+       dev_info(&pdev->dev, "Shutdown was called\n");
+       /* Notify mlx5 clients that the kernel is being shut down */
+       set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
+       mlx5_unload_one(dev, priv);
+       mlx5_pci_disable_device(dev);
+}
+
 static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1011) },                      /* Connect-IB */
        { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},   /* Connect-IB VF */
@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
        { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4 VF */
        { PCI_VDEVICE(MELLANOX, 0x1015) },                      /* ConnectX-4LX */
        { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},   /* ConnectX-4LX VF */
+       { PCI_VDEVICE(MELLANOX, 0x1017) },                      /* ConnectX-5 */
+       { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},   /* ConnectX-5 VF */
        { 0, }
 };
 
@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
        .id_table       = mlx5_core_pci_table,
        .probe          = init_one,
        .remove         = remove_one,
+       .shutdown       = shutdown,
        .err_handler    = &mlx5_err_handler,
        .sriov_configure   = mlx5_core_sriov_configure,
 };
index ae378c575deb28073063b7d216c39664df4c4ae5..53cc1e2c693b1ab3b015a52a0e34a7045f5ffd7c 100644 (file)
@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
-                               int *max_mtu, int *oper_mtu, u8 port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+                               u16 *max_mtu, u16 *oper_mtu, u8 port)
 {
        u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
        u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
                *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
 }
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
 {
        u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
        u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
 
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
                             u8 port)
 {
        mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
 
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
                              u8 port)
 {
        mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
index bd518405859ed3974f7049196503353c7ee647fa..b69dadcfb897a25a51b89a9caed4d1454d603d8e 100644 (file)
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
 }
 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
 
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+       u32 *out;
+       int err;
+
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+       if (!err)
+               *mtu = MLX5_GET(query_nic_vport_context_out, out,
+                               nic_vport_context.mtu);
+
+       kvfree(out);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
+
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+       int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+       void *in;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+       MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+
+       err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+       kvfree(in);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
+
 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
                                  u32 vport,
                                  enum mlx5_list_type list_type,
index 518af329502ddff20c8fc322ebed3303b7b0b9a9..7869465435fa81e9c9ebd9e5ae780b01efb20087 100644 (file)
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
        return false;
 }
 
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+}
+
 /* This function reuses the buffer(from an offset) from
  * consumer index to producer index in the bd ring
  */
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
        curr_cons->data = NULL;
 }
 
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+                                   struct qede_dev *edev, u8 count)
+{
+       struct sw_rx_data *curr_cons;
+
+       for (; count > 0; count--) {
+               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+               qede_reuse_page(edev, rxq, curr_cons);
+               qede_rx_bd_ring_consume(rxq);
+       }
+}
+
 static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
                                         struct qede_rx_queue *rxq,
                                         struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
        curr_cons->page_offset += rxq->rx_buf_seg_size;
 
        if (curr_cons->page_offset == PAGE_SIZE) {
-               if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                       /* Since we failed to allocate new buffer
+                        * current buffer can be used again.
+                        */
+                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
                        return -ENOMEM;
+               }
 
                dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
                               PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
                           len_on_bd);
 
        if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
-               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+               /* Incr page ref count to reuse on allocation failure
+                * so that it doesn't get freed while freeing SKB.
+                */
+               atomic_inc(&current_bd->data->_count);
                goto out;
        }
 
@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
        return 0;
 
 out:
+       tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+       qede_recycle_rx_bd_ring(rxq, edev, 1);
        return -ENOMEM;
 }
 
@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
        tpa_info->skb = netdev_alloc_skb(edev->ndev,
                                         le16_to_cpu(cqe->len_on_first_bd));
        if (unlikely(!tpa_info->skb)) {
+               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
                tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-               return;
+               goto cons_buf;
        }
 
        skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
        /* This is needed in order to enable forwarding support */
        qede_set_gro_params(edev, tpa_info->skb, cqe);
 
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
        if (likely(cqe->ext_bd_len_list[0]))
                qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
                                   le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1007,7 +1041,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct tcphdr *th;
 
-       skb_set_network_header(skb, 0);
        skb_set_transport_header(skb, sizeof(struct iphdr));
        th = tcp_hdr(skb);
 
@@ -1022,7 +1055,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb)
        struct ipv6hdr *iph = ipv6_hdr(skb);
        struct tcphdr *th;
 
-       skb_set_network_header(skb, 0);
        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
        th = tcp_hdr(skb);
 
@@ -1037,8 +1069,21 @@ static void qede_gro_receive(struct qede_dev *edev,
                             struct sk_buff *skb,
                             u16 vlan_tag)
 {
+       /* FW can send a single MTU sized packet from gro flow
+        * due to aggregation timeout/last segment etc. which
+        * is not expected to be a gro packet. If a skb has zero
+        * frags then simply push it in the stack as non gso skb.
+        */
+       if (unlikely(!skb->data_len)) {
+               skb_shinfo(skb)->gso_type = 0;
+               skb_shinfo(skb)->gso_size = 0;
+               goto send_skb;
+       }
+
 #ifdef CONFIG_INET
        if (skb_shinfo(skb)->gso_size) {
+               skb_set_network_header(skb, 0);
+
                switch (skb->protocol) {
                case htons(ETH_P_IP):
                        qede_gro_ip_csum(skb);
@@ -1053,6 +1098,8 @@ static void qede_gro_receive(struct qede_dev *edev,
                }
        }
 #endif
+
+send_skb:
        skb_record_rx_queue(skb, fp->rss_id);
        qede_skb_receive(edev, fp, skb, vlan_tag);
 }
@@ -1244,17 +1291,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                                  "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
                                  sw_comp_cons, parse_flag);
                        rxq->rx_hw_errors++;
-                       qede_reuse_page(edev, rxq, sw_rx_data);
-                       goto next_rx;
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+                       goto next_cqe;
                }
 
                skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
                if (unlikely(!skb)) {
                        DP_NOTICE(edev,
                                  "Build_skb failed, dropping incoming packet\n");
-                       qede_reuse_page(edev, rxq, sw_rx_data);
+                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
                        rxq->rx_alloc_errors++;
-                       goto next_rx;
+                       goto next_cqe;
                }
 
                /* Copy data into SKB */
@@ -1288,11 +1335,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                        if (unlikely(qede_realloc_rx_buffer(edev, rxq,
                                                            sw_rx_data))) {
                                DP_ERR(edev, "Failed to allocate rx buffer\n");
+                               /* Incr page ref count to reuse on allocation
+                                * failure so that it doesn't get freed while
+                                * freeing SKB.
+                                */
+
+                               atomic_inc(&sw_rx_data->data->_count);
                                rxq->rx_alloc_errors++;
+                               qede_recycle_rx_bd_ring(rxq, edev,
+                                                       fp_cqe->bd_num);
+                               dev_kfree_skb_any(skb);
                                goto next_cqe;
                        }
                }
 
+               qede_rx_bd_ring_consume(rxq);
+
                if (fp_cqe->bd_num != 1) {
                        u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
                        u8 num_frags;
@@ -1303,18 +1361,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                             num_frags--) {
                                u16 cur_size = pkt_len > rxq->rx_buf_size ?
                                                rxq->rx_buf_size : pkt_len;
+                               if (unlikely(!cur_size)) {
+                                       DP_ERR(edev,
+                                              "Still got %d BDs for mapping jumbo, but length became 0\n",
+                                              num_frags);
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
+                                       goto next_cqe;
+                               }
 
-                               WARN_ONCE(!cur_size,
-                                         "Still got %d BDs for mapping jumbo, but length became 0\n",
-                                         num_frags);
-
-                               if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+                               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+                                       qede_recycle_rx_bd_ring(rxq, edev,
+                                                               num_frags);
+                                       dev_kfree_skb_any(skb);
                                        goto next_cqe;
+                               }
 
-                               rxq->sw_rx_cons++;
                                sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
                                sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-                               qed_chain_consume(&rxq->rx_bd_ring);
+                               qede_rx_bd_ring_consume(rxq);
+
                                dma_unmap_page(&edev->pdev->dev,
                                               sw_rx_data->mapping,
                                               PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1397,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                                pkt_len -= cur_size;
                        }
 
-                       if (pkt_len)
+                       if (unlikely(pkt_len))
                                DP_ERR(edev,
                                       "Mapped all BDs of jumbo, but still have %d bytes\n",
                                       pkt_len);
@@ -1349,10 +1416,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
                skb_record_rx_queue(skb, fp->rss_id);
 
                qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
-               qed_chain_consume(&rxq->rx_bd_ring);
-next_rx:
-               rxq->sw_rx_cons++;
 next_rx_only:
                rx_pkt++;
 
@@ -2257,7 +2320,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
                struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
 
-               if (replace_buf) {
+               if (replace_buf->data) {
                        dma_unmap_page(&edev->pdev->dev,
                                       dma_unmap_addr(replace_buf, mapping),
                                       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2440,7 @@ err:
 static int qede_alloc_mem_rxq(struct qede_dev *edev,
                              struct qede_rx_queue *rxq)
 {
-       int i, rc, size, num_allocated;
+       int i, rc, size;
 
        rxq->num_rx_buffers = edev->q_num_rx_buffers;
 
@@ -2394,6 +2457,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
        rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
        if (!rxq->sw_rx_ring) {
                DP_ERR(edev, "Rx buffers ring allocation failed\n");
+               rc = -ENOMEM;
                goto err;
        }
 
@@ -2421,26 +2485,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
        /* Allocate buffers for the Rx ring */
        for (i = 0; i < rxq->num_rx_buffers; i++) {
                rc = qede_alloc_rx_buffer(edev, rxq);
-               if (rc)
-                       break;
-       }
-       num_allocated = i;
-       if (!num_allocated) {
-               DP_ERR(edev, "Rx buffers allocation failed\n");
-               goto err;
-       } else if (num_allocated < rxq->num_rx_buffers) {
-               DP_NOTICE(edev,
-                         "Allocated less buffers than desired (%d allocated)\n",
-                         num_allocated);
+               if (rc) {
+                       DP_ERR(edev,
+                              "Rx buffers allocation failed at index %d\n", i);
+                       goto err;
+               }
        }
 
-       qede_alloc_sge_mem(edev, rxq);
-
-       return 0;
-
+       rc = qede_alloc_sge_mem(edev, rxq);
 err:
-       qede_free_mem_rxq(edev, rxq);
-       return -ENOMEM;
+       return rc;
 }
 
 static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2577,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
        }
 
        return 0;
-
 err:
-       qede_free_mem_fp(edev, fp);
-       return -ENOMEM;
+       return rc;
 }
 
 static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2601,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
                struct qede_fastpath *fp = &edev->fp_array[rss_id];
 
                rc = qede_alloc_mem_fp(edev, fp);
-               if (rc)
-                       break;
-       }
-
-       if (rss_id != QEDE_RSS_CNT(edev)) {
-               /* Failed allocating memory for all the queues */
-               if (!rss_id) {
+               if (rc) {
                        DP_ERR(edev,
-                              "Failed to allocate memory for the leading queue\n");
-                       rc = -ENOMEM;
-               } else {
-                       DP_NOTICE(edev,
-                                 "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
-                                 QEDE_RSS_CNT(edev), rss_id);
+                              "Failed to allocate memory for fastpath - rss id = %d\n",
+                              rss_id);
+                       qede_free_mem_load(edev);
+                       return rc;
                }
-               edev->num_rss = rss_id;
        }
 
        return 0;
index 55007f1e6bbcc77981414e9db1e73d8db8502988..caf6ddb7ea76f89f0a5ea1fd87e8363b8f89878a 100644 (file)
@@ -37,8 +37,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 63
-#define QLCNIC_LINUX_VERSIONID  "5.3.63"
+#define _QLCNIC_LINUX_SUBVERSION 64
+#define QLCNIC_LINUX_VERSIONID  "5.3.64"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
index 087e14a3fba726cd0637cbd12f2138b7dbe8adbf..9e2a0bd8f5a88803b4d1e260e18ffff9f119d541 100644 (file)
@@ -1691,6 +1691,9 @@ static int ravb_set_gti(struct net_device *ndev)
        rate = clk_get_rate(clk);
        clk_put(clk);
 
+       if (!rate)
+               return -EINVAL;
+
        inc = 1000000000ULL << 20;
        do_div(inc, rate);
 
index 004e2d7560fd86e289980a2526a8e6e9f4fb77c3..ceea74cc22293f6dc26141fc9a0d33ed718520ce 100644 (file)
@@ -2194,17 +2194,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
                                   __func__);
                        return ret;
                }
-               ret = sh_eth_dev_init(ndev, false);
+               ret = sh_eth_dev_init(ndev, true);
                if (ret < 0) {
                        netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
                                   __func__);
                        return ret;
                }
 
-               mdp->irq_enabled = true;
-               sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
-               /* Setting the Rx mode will start the Rx process. */
-               sh_eth_write(ndev, EDRRR_R, EDRRR);
                netif_device_attach(ndev);
        }
 
index f0d797ab74d8f2ea927c6956525b728b9c20ddca..afb90d129cb6059d3fc6e5fe579520a9954ace0c 100644 (file)
@@ -34,6 +34,9 @@
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
 
+#define SYSMGR_FPGAGRP_MODULE_REG  0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+
 #define EMAC_SPLITTER_CTRL_REG                 0x0
 #define EMAC_SPLITTER_CTRL_SPEED_MASK          0x3
 #define EMAC_SPLITTER_CTRL_SPEED_10            0x2
@@ -46,7 +49,6 @@ struct socfpga_dwmac {
        u32     reg_shift;
        struct  device *dev;
        struct regmap *sys_mgr_base_addr;
-       struct reset_control *stmmac_rst;
        void __iomem *splitter_base;
        bool f2h_ptp_ref_clk;
 };
@@ -89,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
        struct device_node *np_splitter;
        struct resource res_splitter;
 
-       dwmac->stmmac_rst = devm_reset_control_get(dev,
-                                                 STMMAC_RESOURCE_NAME);
-       if (IS_ERR(dwmac->stmmac_rst)) {
-               dev_info(dev, "Could not get reset control!\n");
-               if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
-                       return -EPROBE_DEFER;
-               dwmac->stmmac_rst = NULL;
-       }
-
        dwmac->interface = of_get_phy_mode(np);
 
        sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -148,7 +141,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
        int phymode = dwmac->interface;
        u32 reg_offset = dwmac->reg_offset;
        u32 reg_shift = dwmac->reg_shift;
-       u32 ctrl, val;
+       u32 ctrl, val, module;
 
        switch (phymode) {
        case PHY_INTERFACE_MODE_RGMII:
@@ -175,39 +168,39 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
        ctrl |= val << reg_shift;
 
-       if (dwmac->f2h_ptp_ref_clk)
+       if (dwmac->f2h_ptp_ref_clk) {
                ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
-       else
+               regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+                           &module);
+               module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+               regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+                            module);
+       } else {
                ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2));
+       }
 
        regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
-       return 0;
-}
-
-static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
-{
-       struct socfpga_dwmac    *dwmac = priv;
 
-       /* On socfpga platform exit, assert and hold reset to the
-        * enet controller - the default state after a hard reset.
-        */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
+       return 0;
 }
 
 static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
 {
-       struct socfpga_dwmac    *dwmac = priv;
+       struct socfpga_dwmac *dwmac = priv;
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct stmmac_priv *stpriv = NULL;
        int ret = 0;
 
-       if (ndev)
-               stpriv = netdev_priv(ndev);
+       if (!ndev)
+               return -EINVAL;
+
+       stpriv = netdev_priv(ndev);
+       if (!stpriv)
+               return -EINVAL;
 
        /* Assert reset to the enet controller before changing the phy mode */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
+       if (stpriv->stmmac_rst)
+               reset_control_assert(stpriv->stmmac_rst);
 
        /* Setup the phy mode in the system manager registers according to
         * devicetree configuration
@@ -217,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
        /* Deassert reset for the phy configuration to be sampled by
         * the enet controller, and operation to start in requested mode
         */
-       if (dwmac->stmmac_rst)
-               reset_control_deassert(dwmac->stmmac_rst);
+       if (stpriv->stmmac_rst)
+               reset_control_deassert(stpriv->stmmac_rst);
 
        /* Before the enet controller is suspended, the phy is suspended.
         * This causes the phy clock to be gated. The enet controller is
@@ -235,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
         * control register 0, and can be modified by the phy driver
         * framework.
         */
-       if (stpriv && stpriv->phydev)
+       if (stpriv->phydev)
                phy_resume(stpriv->phydev);
 
        return ret;
@@ -275,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
 
        plat_dat->bsp_priv = dwmac;
        plat_dat->init = socfpga_dwmac_init;
-       plat_dat->exit = socfpga_dwmac_exit;
        plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
 
-       ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
-       if (ret)
-               return ret;
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (!ret)
+               ret = socfpga_dwmac_init(pdev, dwmac);
 
-       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       return ret;
 }
 
 static const struct of_device_id socfpga_dwmac_match[] = {
index 42fdfd4d9d4f9873c8b26f1518aa56b3c5e719b0..bbb77cd8ad6776df47d01549cceb7165735c15f0 100644 (file)
@@ -1251,12 +1251,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
        int i, ret;
        u32 reg;
 
+       pm_runtime_get_sync(&priv->pdev->dev);
+
        if (!cpsw_common_res_usage_state(priv))
                cpsw_intr_disable(priv);
        netif_carrier_off(ndev);
 
-       pm_runtime_get_sync(&priv->pdev->dev);
-
        reg = priv->version;
 
        dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
index 5d9abedd6b757ca65000e5dea8155acf0eac9412..58d58f002559f627040a8bfb3fa452f671c62e44 100644 (file)
@@ -1878,8 +1878,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
                pdata->hw_ram_addr = auxdata->hw_ram_addr;
        }
 
-       pdev->dev.platform_data = pdata;
-
        return  pdata;
 }
 
@@ -2101,6 +2099,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
        cpdma_ctlr_destroy(priv->dma);
 
        unregister_netdev(ndev);
+       pm_runtime_disable(&pdev->dev);
        free_netdev(ndev);
 
        return 0;
index 84d3e5ca8817197a15165d2c61f8a0f58550c5b6..c6385617bfb29a38565e9708953c3024ae3af994 100644 (file)
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        macsec_skb_cb(skb)->valid = false;
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (!skb)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
        if (!req) {
                kfree_skb(skb);
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
                skb = skb_unshare(skb, GFP_ATOMIC);
                if (!skb) {
                        aead_request_free(req);
-                       return NULL;
+                       return ERR_PTR(-ENOMEM);
                }
        } else {
                /* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
        dev_hold(dev);
        ret = crypto_aead_decrypt(req);
        if (ret == -EINPROGRESS) {
-               return NULL;
+               return ERR_PTR(ret);
        } else if (ret != 0) {
                /* decryption/authentication failed
                 * 10.6 if validateFrames is disabled, deliver anyway
                 */
                if (ret != -EBADMSG) {
                        kfree_skb(skb);
-                       skb = NULL;
+                       skb = ERR_PTR(ret);
                }
        } else {
                macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
            secy->validate_frames != MACSEC_VALIDATE_DISABLED)
                skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
 
-       if (!skb) {
-               macsec_rxsa_put(rx_sa);
+       if (IS_ERR(skb)) {
+               /* the decrypt callback needs the reference */
+               if (PTR_ERR(skb) != -EINPROGRESS)
+                       macsec_rxsa_put(rx_sa);
                rcu_read_unlock();
                *pskb = NULL;
                return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@ deliver:
                            macsec_extra_len(macsec_skb_cb(skb)->has_sci));
        macsec_reset_skb(skb, secy->netdev);
 
-       macsec_rxsa_put(rx_sa);
+       if (rx_sa)
+               macsec_rxsa_put(rx_sa);
        count_rx(dev, skb->len);
 
        rcu_read_unlock();
@@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        }
 
        rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
-       if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
-                      secy->icv_len)) {
+       if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+                                secy->key_len, secy->icv_len)) {
+               kfree(rx_sa);
                rtnl_unlock();
                return -ENOMEM;
        }
@@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
        tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
        if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
                                 secy->key_len, secy->icv_len)) {
+               kfree(tx_sa);
                rtnl_unlock();
                return -ENOMEM;
        }
@@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
                return 1;
 
        if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
-           nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+           nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
+                       MACSEC_DEFAULT_CIPHER_ID) ||
            nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
            nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
            nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
        if (!hdr)
                return -EMSGSIZE;
 
-       rtnl_lock();
+       genl_dump_check_consistent(cb, hdr, &macsec_fam);
 
        if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
                goto nla_put_failure;
@@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
 
        nla_nest_end(skb, rxsc_list);
 
-       rtnl_unlock();
-
        genlmsg_end(skb, hdr);
 
        return 0;
 
 nla_put_failure:
-       rtnl_unlock();
        genlmsg_cancel(skb, hdr);
        return -EMSGSIZE;
 }
 
+static int macsec_generation = 1; /* protected by RTNL */
+
 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
@@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
        dev_idx = cb->args[0];
 
        d = 0;
+       rtnl_lock();
+
+       cb->seq = macsec_generation;
+
        for_each_netdev(net, dev) {
                struct macsec_secy *secy;
 
@@ -2467,6 +2476,7 @@ next:
        }
 
 done:
+       rtnl_unlock();
        cb->args[0] = d;
        return skb->len;
 }
@@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
        struct net_device *real_dev = macsec->real_dev;
        struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
 
+       macsec_generation++;
+
        unregister_netdevice_queue(dev, head);
        list_del_rcu(&macsec->secys);
-       if (list_empty(&rxd->secys))
+       if (list_empty(&rxd->secys)) {
                netdev_rx_handler_unregister(real_dev);
+               kfree(rxd);
+       }
 
        macsec_del_dev(macsec);
 }
@@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev,
 
                err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
                                                 rxd);
-               if (err < 0)
+               if (err < 0) {
+                       kfree(rxd);
                        return err;
+               }
        }
 
        list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
        if (err < 0)
                goto del_dev;
 
+       macsec_generation++;
+
        dev_hold(real_dev);
 
        return 0;
@@ -3079,7 +3097,7 @@ unregister:
 
 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
 {
-       u64 csid = DEFAULT_CIPHER_ID;
+       u64 csid = MACSEC_DEFAULT_CIPHER_ID;
        u8 icv_len = DEFAULT_ICV_LEN;
        int flag;
        bool es, scb, sci;
@@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
                icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
 
        switch (csid) {
-       case DEFAULT_CIPHER_ID:
-       case DEFAULT_CIPHER_ALT:
+       case MACSEC_DEFAULT_CIPHER_ID:
+       case MACSEC_DEFAULT_CIPHER_ALT:
                if (icv_len < MACSEC_MIN_ICV_LEN ||
                    icv_len > MACSEC_MAX_ICV_LEN)
                        return -EINVAL;
@@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
            nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
                return -EINVAL;
 
-       if ((data[IFLA_MACSEC_PROTECT] &&
-            nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
+       if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
+            nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
            !data[IFLA_MACSEC_WINDOW])
                return -EINVAL;
 
@@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb,
 
        if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
            nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
-           nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+           nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
+                       MACSEC_DEFAULT_CIPHER_ID) ||
            nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
            nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
            nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
index b5d50d45872893ffcbdef86f8317d0512b2e2a32..93ffedfa299412f78af2c72fedc991101a52a451 100644 (file)
@@ -441,7 +441,7 @@ static int ks8995_probe(struct spi_device *spi)
                return -ENOMEM;
 
        mutex_init(&ks->lock);
-       ks->spi = spi_dev_get(spi);
+       ks->spi = spi;
        ks->chip = &ks8995_chip[variant];
 
        if (ks->spi->dev.of_node) {
index bdd83d95ec0aa677b8443476ef2ee66018aff701..96a5028621c8b320c2d6feca5911f40cde10771e 100644 (file)
@@ -617,8 +617,13 @@ static const struct usb_device_id mbim_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info,
        },
-       /* Huawei E3372 fails unless NDP comes after the IP packets */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+
+       /* Some Huawei devices, ME906s-158 (12d1:15c1) and E3372
+        * (12d1:157d), are known to fail unless the NDP is placed
+        * after the IP packets.  Applying the quirk to all Huawei
+        * devices is broader than necessary, but harmless.
+        */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
          .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
        },
        /* default entry */
index b2348f67b00a7044189f6650000eea470a2ae386..db8022ae415bd234c19c8348dc6f90697d6ad840 100644 (file)
@@ -1152,12 +1152,16 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
                union Vmxnet3_GenericDesc *gdesc)
 {
        if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
-               /* typical case: TCP/UDP over IP and both csums are correct */
-               if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
-                                                       VMXNET3_RCD_CSUM_OK) {
+               if (gdesc->rcd.v4 &&
+                   (le32_to_cpu(gdesc->dword[3]) &
+                    VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
+                       BUG_ON(gdesc->rcd.frg);
+               } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
+                                            (1 << VMXNET3_RCD_TUC_SHIFT))) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
-                       BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
                        BUG_ON(gdesc->rcd.frg);
                } else {
                        if (gdesc->rcd.csum) {
index 729c344e677499b6f737aa622d6925c07d647a17..c4825392d64b6c8de17822532d113689f88d8bb7 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.6.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.7.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040600
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040700
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index 9a9fabb900c19e5f7dc483a63f1b71fcd1a96e5a..8a8f1e58b4155808e2b28abb91d85ea772c7fd63 100644 (file)
@@ -60,41 +60,6 @@ struct pcpu_dstats {
        struct u64_stats_sync   syncp;
 };
 
-static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
-{
-       return dst;
-}
-
-static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
-{
-       return ip_local_out(net, sk, skb);
-}
-
-static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
-{
-       /* TO-DO: return max ethernet size? */
-       return dst->dev->mtu;
-}
-
-static void vrf_dst_destroy(struct dst_entry *dst)
-{
-       /* our dst lives forever - or until the device is closed */
-}
-
-static unsigned int vrf_default_advmss(const struct dst_entry *dst)
-{
-       return 65535 - 40;
-}
-
-static struct dst_ops vrf_dst_ops = {
-       .family         = AF_INET,
-       .local_out      = vrf_ip_local_out,
-       .check          = vrf_ip_check,
-       .mtu            = vrf_v4_mtu,
-       .destroy        = vrf_dst_destroy,
-       .default_advmss = vrf_default_advmss,
-};
-
 /* neighbor handling is done with actual device; do not want
  * to flip skb->dev for those ndisc packets. This really fails
  * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
@@ -349,46 +314,6 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_ip6_check(struct dst_entry *dst, u32 cookie)
-{
-       return dst;
-}
-
-static struct dst_ops vrf_dst_ops6 = {
-       .family         = AF_INET6,
-       .local_out      = ip6_local_out,
-       .check          = vrf_ip6_check,
-       .mtu            = vrf_v4_mtu,
-       .destroy        = vrf_dst_destroy,
-       .default_advmss = vrf_default_advmss,
-};
-
-static int init_dst_ops6_kmem_cachep(void)
-{
-       vrf_dst_ops6.kmem_cachep = kmem_cache_create("vrf_ip6_dst_cache",
-                                                    sizeof(struct rt6_info),
-                                                    0,
-                                                    SLAB_HWCACHE_ALIGN,
-                                                    NULL);
-
-       if (!vrf_dst_ops6.kmem_cachep)
-               return -ENOMEM;
-
-       return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-       kmem_cache_destroy(vrf_dst_ops6.kmem_cachep);
-}
-
-static int vrf_input6(struct sk_buff *skb)
-{
-       skb->dev->stats.rx_errors++;
-       kfree_skb(skb);
-       return 0;
-}
-
 /* modelled after ip6_finish_output2 */
 static int vrf_finish_output6(struct net *net, struct sock *sk,
                              struct sk_buff *skb)
@@ -429,67 +354,34 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 }
 
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
-       dst_destroy(&vrf->rt6->dst);
-       free_percpu(vrf->rt6->rt6i_pcpu);
+       dst_release(&vrf->rt6->dst);
        vrf->rt6 = NULL;
 }
 
 static int vrf_rt6_create(struct net_device *dev)
 {
        struct net_vrf *vrf = netdev_priv(dev);
-       struct dst_entry *dst;
+       struct net *net = dev_net(dev);
        struct rt6_info *rt6;
-       int cpu;
        int rc = -ENOMEM;
 
-       rt6 = dst_alloc(&vrf_dst_ops6, dev, 0,
-                       DST_OBSOLETE_NONE,
-                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+       rt6 = ip6_dst_alloc(net, dev,
+                           DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
        if (!rt6)
                goto out;
 
-       dst = &rt6->dst;
-
-       rt6->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_KERNEL);
-       if (!rt6->rt6i_pcpu) {
-               dst_destroy(dst);
-               goto out;
-       }
-       for_each_possible_cpu(cpu) {
-               struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu);
-               *p =  NULL;
-       }
-
-       memset(dst + 1, 0, sizeof(*rt6) - sizeof(*dst));
-
-       INIT_LIST_HEAD(&rt6->rt6i_siblings);
-       INIT_LIST_HEAD(&rt6->rt6i_uncached);
-
-       rt6->dst.input  = vrf_input6;
        rt6->dst.output = vrf_output6;
-
-       rt6->rt6i_table = fib6_get_table(dev_net(dev), vrf->tb_id);
-
-       atomic_set(&rt6->dst.__refcnt, 2);
-
+       rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
+       dst_hold(&rt6->dst);
        vrf->rt6 = rt6;
        rc = 0;
 out:
        return rc;
 }
 #else
-static int init_dst_ops6_kmem_cachep(void)
-{
-       return 0;
-}
-
-static void free_dst_ops6_kmem_cachep(void)
-{
-}
-
-static void vrf_rt6_destroy(struct net_vrf *vrf)
+static void vrf_rt6_release(struct net_vrf *vrf)
 {
 }
 
@@ -557,11 +449,11 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
 
-static void vrf_rtable_destroy(struct net_vrf *vrf)
+static void vrf_rtable_release(struct net_vrf *vrf)
 {
        struct dst_entry *dst = (struct dst_entry *)vrf->rth;
 
-       dst_destroy(dst);
+       dst_release(dst);
        vrf->rth = NULL;
 }
 
@@ -570,22 +462,10 @@ static struct rtable *vrf_rtable_create(struct net_device *dev)
        struct net_vrf *vrf = netdev_priv(dev);
        struct rtable *rth;
 
-       rth = dst_alloc(&vrf_dst_ops, dev, 2,
-                       DST_OBSOLETE_NONE,
-                       (DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+       rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
        if (rth) {
                rth->dst.output = vrf_output;
-               rth->rt_genid   = rt_genid_ipv4(dev_net(dev));
-               rth->rt_flags   = 0;
-               rth->rt_type    = RTN_UNICAST;
-               rth->rt_is_input = 0;
-               rth->rt_iif     = 0;
-               rth->rt_pmtu    = 0;
-               rth->rt_gateway = 0;
-               rth->rt_uses_gateway = 0;
                rth->rt_table_id = vrf->tb_id;
-               INIT_LIST_HEAD(&rth->rt_uncached);
-               rth->rt_uncached_list = NULL;
        }
 
        return rth;
@@ -673,8 +553,8 @@ static void vrf_dev_uninit(struct net_device *dev)
        struct net_device *port_dev;
        struct list_head *iter;
 
-       vrf_rtable_destroy(vrf);
-       vrf_rt6_destroy(vrf);
+       vrf_rtable_release(vrf);
+       vrf_rt6_release(vrf);
 
        netdev_for_each_lower_dev(dev, port_dev, iter)
                vrf_del_slave(dev, port_dev);
@@ -704,7 +584,7 @@ static int vrf_dev_init(struct net_device *dev)
        return 0;
 
 out_rth:
-       vrf_rtable_destroy(vrf);
+       vrf_rtable_release(vrf);
 out_stats:
        free_percpu(dev->dstats);
        dev->dstats = NULL;
@@ -737,7 +617,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
                struct net_vrf *vrf = netdev_priv(dev);
 
                rth = vrf->rth;
-               atomic_inc(&rth->dst.__refcnt);
+               dst_hold(&rth->dst);
        }
 
        return rth;
@@ -788,7 +668,7 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
                struct net_vrf *vrf = netdev_priv(dev);
 
                rt = vrf->rt6;
-               atomic_inc(&rt->dst.__refcnt);
+               dst_hold(&rt->dst);
        }
 
        return (struct dst_entry *)rt;
@@ -946,19 +826,6 @@ static int __init vrf_init_module(void)
 {
        int rc;
 
-       vrf_dst_ops.kmem_cachep =
-               kmem_cache_create("vrf_ip_dst_cache",
-                                 sizeof(struct rtable), 0,
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
-
-       if (!vrf_dst_ops.kmem_cachep)
-               return -ENOMEM;
-
-       rc = init_dst_ops6_kmem_cachep();
-       if (rc != 0)
-               goto error2;
-
        register_netdevice_notifier(&vrf_notifier_block);
 
        rc = rtnl_link_register(&vrf_link_ops);
@@ -969,22 +836,10 @@ static int __init vrf_init_module(void)
 
 error:
        unregister_netdevice_notifier(&vrf_notifier_block);
-       free_dst_ops6_kmem_cachep();
-error2:
-       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
        return rc;
 }
 
-static void __exit vrf_cleanup_module(void)
-{
-       rtnl_link_unregister(&vrf_link_ops);
-       unregister_netdevice_notifier(&vrf_notifier_block);
-       kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
-       free_dst_ops6_kmem_cachep();
-}
-
 module_init(vrf_init_module);
-module_exit(vrf_cleanup_module);
 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
 MODULE_LICENSE("GPL");
index 72380af9dc523e56055eecccd5402de62972412e..b0603e796ad8d0d766e79c7deaf5de4c24c06ec1 100644 (file)
@@ -5680,11 +5680,12 @@ static int b43_bcma_probe(struct bcma_device *core)
        INIT_WORK(&wl->firmware_load, b43_request_firmware);
        schedule_work(&wl->firmware_load);
 
-bcma_out:
        return err;
 
 bcma_err_wireless_exit:
        ieee80211_free_hw(wl->hw);
+bcma_out:
+       kfree(dev);
        return err;
 }
 
@@ -5712,8 +5713,8 @@ static void b43_bcma_remove(struct bcma_device *core)
        b43_rng_exit(wl);
 
        b43_leds_unregister(wl);
-
        ieee80211_free_hw(wl->hw);
+       kfree(wldev->dev);
 }
 
 static struct bcma_driver b43_bcma_driver = {
@@ -5796,6 +5797,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
 
        b43_leds_unregister(wl);
        b43_wireless_exit(dev, wl);
+       kfree(dev);
 }
 
 static struct ssb_driver b43_ssb_driver = {
index 76e649c680a16bb5930f7c6d137aa429b779360d..a50f4df7eae78d1e3682f7b1429c9b3bc42b05fb 100644 (file)
@@ -1147,6 +1147,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        /* the fw is stopped, the aux sta is dead: clean up driver state */
        iwl_mvm_del_aux_sta(mvm);
 
+       iwl_free_fw_paging(mvm);
+
        /*
         * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
         * won't be called in this case).
index 5e8ab796d5bc06b86fcbdab0f48edb2698074579..d278399097dc5bedb72ae8bed05da5db263cd6f2 100644 (file)
@@ -761,8 +761,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
-       iwl_free_fw_paging(mvm);
-
        iwl_mvm_tof_clean(mvm);
 
        ieee80211_free_hw(mvm->hw);
index eb39c7e09781e9dd04a159f04449a86bc93cc1b5..b2b79354d5c02be2b172b508a80af29ffec5a880 100644 (file)
@@ -732,8 +732,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
         */
        val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
        if (val & (BIT(1) | BIT(17))) {
-               IWL_INFO(trans,
-                        "can't access the RSA semaphore it is write protected\n");
+               IWL_DEBUG_INFO(trans,
+                              "can't access the RSA semaphore it is write protected\n");
                return 0;
        }
 
index 95dcbff4673b1490c72fec5ee9bc55bcfa5f7c20..6a8245c4ea48c4c6cc463647f1229726bea6a5d9 100644 (file)
@@ -2488,9 +2488,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
                for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
                        rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
 
-                       RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
-                                "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
-                                rtldm->thermalvalue, thermal_value);
+               RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+                        "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+                        rtldm->thermalvalue, thermal_value);
                /*Record last Power Tracking Thermal Value*/
                rtldm->thermalvalue = thermal_value;
        }
index 01b9d0a00abcb06186bdb80d44c5aa9e5f05838c..d11cdbb8fba3edab6d0bfc69490c72b0c40394dd 100644 (file)
@@ -275,6 +275,19 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
 }
 EXPORT_SYMBOL(pci_write_vpd);
 
+/**
+ * pci_set_vpd_size - Set size of Vital Product Data space
+ * @dev:       pci device struct
+ * @len:       size of vpd space
+ */
+int pci_set_vpd_size(struct pci_dev *dev, size_t len)
+{
+       if (!dev->vpd || !dev->vpd->ops)
+               return -ENODEV;
+       return dev->vpd->ops->set_size(dev, len);
+}
+EXPORT_SYMBOL(pci_set_vpd_size);
+
 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
 
 /**
@@ -498,9 +511,23 @@ out:
        return ret ? ret : count;
 }
 
+static int pci_vpd_set_size(struct pci_dev *dev, size_t len)
+{
+       struct pci_vpd *vpd = dev->vpd;
+
+       if (len == 0 || len > PCI_VPD_MAX_SIZE)
+               return -EIO;
+
+       vpd->valid = 1;
+       vpd->len = len;
+
+       return 0;
+}
+
 static const struct pci_vpd_ops pci_vpd_ops = {
        .read = pci_vpd_read,
        .write = pci_vpd_write,
+       .set_size = pci_vpd_set_size,
 };
 
 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
@@ -533,9 +560,24 @@ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
        return ret;
 }
 
+static int pci_vpd_f0_set_size(struct pci_dev *dev, size_t len)
+{
+       struct pci_dev *tdev = pci_get_slot(dev->bus,
+                                           PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+       int ret;
+
+       if (!tdev)
+               return -ENODEV;
+
+       ret = pci_set_vpd_size(tdev, len);
+       pci_dev_put(tdev);
+       return ret;
+}
+
 static const struct pci_vpd_ops pci_vpd_f0_ops = {
        .read = pci_vpd_f0_read,
        .write = pci_vpd_f0_write,
+       .set_size = pci_vpd_f0_set_size,
 };
 
 int pci_vpd_init(struct pci_dev *dev)
index eb5a2755a1646649a7e1aaea263a1cf118a45af4..2f817fa4c661e72274e873c38f460cefe04faa79 100644 (file)
@@ -32,7 +32,7 @@
 #define to_imx6_pcie(x)        container_of(x, struct imx6_pcie, pp)
 
 struct imx6_pcie {
-       struct gpio_desc        *reset_gpio;
+       int                     reset_gpio;
        struct clk              *pcie_bus;
        struct clk              *pcie_phy;
        struct clk              *pcie;
@@ -309,10 +309,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
        usleep_range(200, 500);
 
        /* Some boards don't have PCIe reset GPIO. */
-       if (imx6_pcie->reset_gpio) {
-               gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+       if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+               gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
                msleep(100);
-               gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+               gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
        }
        return 0;
 
@@ -523,6 +523,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
 {
        struct imx6_pcie *imx6_pcie;
        struct pcie_port *pp;
+       struct device_node *np = pdev->dev.of_node;
        struct resource *dbi_base;
        struct device_node *node = pdev->dev.of_node;
        int ret;
@@ -544,8 +545,15 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
                return PTR_ERR(pp->dbi_base);
 
        /* Fetch GPIOs */
-       imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
-                                                       GPIOD_OUT_LOW);
+       imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+                                           GPIOF_OUT_INIT_LOW, "PCIe reset");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get reset gpio\n");
+                       return ret;
+               }
+       }
 
        /* Fetch clocks */
        imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
index d0fb93481573ad3282e917e09d4852c723ecbd15..a814bbb80fcb3d1ddbf35508b7dcf5808f3d0e5d 100644 (file)
@@ -97,6 +97,7 @@ static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
 struct pci_vpd_ops {
        ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
        ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+       int (*set_size)(struct pci_dev *dev, size_t len);
 };
 
 struct pci_vpd {
index 32346b5a8a119789c1857c90ec13902616ace318..f70090897fdf19c9777c332401dd01de7b1efc52 100644 (file)
@@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
                        break;
                case CPU_PM_EXIT:
                case CPU_PM_ENTER_FAILED:
-                        /* Restore and enable the counter */
-                       armpmu_start(event, PERF_EF_RELOAD);
+                        /*
+                         * Restore and enable the counter.
+                         * armpmu_start() indirectly calls
+                         *
+                         * perf_event_update_userpage()
+                         *
+                         * that requires RCU read locking to be functional,
+                         * wrap the call within RCU_NONIDLE to make the
+                         * RCU subsystem aware this cpu is not idle from
+                         * an RCU perspective for the armpmu_start() call
+                         * duration.
+                         */
+                       RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
                        break;
                default:
                        break;
index 77e2d02e6bee228ac033a28cccea90047439ed86..793ecb6d87bcaa2a56a914b2745bea6453929637 100644 (file)
@@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
        if (!np)
                return -ENODEV;
 
+       if (!dev->parent || !dev->parent->of_node)
+               return -ENODEV;
+
        dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
        if (IS_ERR(dp))
                return -ENOMEM;
@@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
                return ret;
        }
 
-       dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+       dp->grf = syscon_node_to_regmap(dev->parent->of_node);
        if (IS_ERR(dp->grf)) {
-               dev_err(dev, "rk3288-dp needs rockchip,grf property\n");
+               dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
                return PTR_ERR(dp->grf);
        }
 
index 887b4c27195f430766030e088b33ea2b037a7967..6ebcf3e41c467233c7d198cc72a7b93a4f7fe4b1 100644 (file)
@@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
        struct regmap *grf;
        unsigned int reg_offset;
 
-       grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
+       if (!dev->parent || !dev->parent->of_node)
+               return -ENODEV;
+
+       grf = syscon_node_to_regmap(dev->parent->of_node);
        if (IS_ERR(grf)) {
                dev_err(dev, "Missing rockchip,grf property\n");
                return PTR_ERR(grf);
index debe1219d76d61b1c3908bf4820de8644dd65d9c..fc8cbf6117234ba31541b3f0153d0d23e8bc644b 100644 (file)
@@ -2,6 +2,7 @@ config PINCTRL_IMX
        bool
        select PINMUX
        select PINCONF
+       select REGMAP
 
 config PINCTRL_IMX1_CORE
        bool
index 2bbe6f7964a79d9099bd512f2ac430f0b2d96861..6ab8c3ccdeea4cd8b82504f707382f4a6e3c65ce 100644 (file)
@@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
        struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
        int eint_num, virq, eint_offset;
        unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
-       static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
+       static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
+                                               128000, 256000};
        const struct mtk_desc_pin *pin;
        struct irq_data *d;
 
@@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
        if (!mtk_eint_can_en_debounce(pctl, eint_num))
                return -ENOSYS;
 
-       dbnc = ARRAY_SIZE(dbnc_arr);
-       for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
-               if (debounce <= dbnc_arr[i]) {
+       dbnc = ARRAY_SIZE(debounce_time);
+       for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
+               if (debounce <= debounce_time[i]) {
                        dbnc = i;
                        break;
                }
index fb126d56ad40d4c381230978f4da49baca049d40..cf9bafa10acfb51ad42af96cf6580d6de48c1b1d 100644 (file)
@@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
 
                /* Parse pins in each row from LSB */
                while (mask) {
-                       bit_pos = ffs(mask);
+                       bit_pos = __ffs(mask);
                        pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
-                       mask_pos = ((pcs->fmask) << (bit_pos - 1));
+                       mask_pos = ((pcs->fmask) << bit_pos);
                        val_pos = val & mask_pos;
                        submask = mask & mask_pos;
 
@@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
        ret = of_property_read_u32(np, "pinctrl-single,function-mask",
                                   &pcs->fmask);
        if (!ret) {
-               pcs->fshift = ffs(pcs->fmask) - 1;
+               pcs->fshift = __ffs(pcs->fmask);
                pcs->fmax = pcs->fmask >> pcs->fshift;
        } else {
                /* If mask property doesn't exist, function mux is invalid. */
index 10ce6cba4455c206e8ab23906db520b880d4a95a..09356684c32f6a78f03dbc5ef1dd7addc828bf08 100644 (file)
@@ -127,8 +127,10 @@ static int lis3lv02d_acpi_read(struct lis3lv02d *lis3, int reg, u8 *ret)
        arg0.integer.value = reg;
 
        status = acpi_evaluate_integer(dev->handle, "ALRD", &args, &lret);
+       if (ACPI_FAILURE(status))
+               return -EINVAL;
        *ret = lret;
-       return (status != AE_OK) ? -EINVAL : 0;
+       return 0;
 }
 
 /**
@@ -173,6 +175,7 @@ static int lis3lv02d_dmi_matched(const struct dmi_system_id *dmi)
 DEFINE_CONV(normal, 1, 2, 3);
 DEFINE_CONV(y_inverted, 1, -2, 3);
 DEFINE_CONV(x_inverted, -1, 2, 3);
+DEFINE_CONV(x_inverted_usd, -1, 2, -3);
 DEFINE_CONV(z_inverted, 1, 2, -3);
 DEFINE_CONV(xy_swap, 2, 1, 3);
 DEFINE_CONV(xy_rotated_left, -2, 1, 3);
@@ -236,6 +239,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted),
        AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted),
        AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left),
+       AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd),
        AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left),
        AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted),
        AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap),
index f93abc8c1424ad956f62b8dbb5a6dadffd6b4c5b..a818db6aa08f22dfaf8eca3a5b02049a31363cbd 100644 (file)
@@ -91,6 +91,8 @@ static int intel_hid_pl_resume_handler(struct device *device)
 }
 
 static const struct dev_pm_ops intel_hid_pl_pm_ops = {
+       .freeze  = intel_hid_pl_suspend_handler,
+       .restore  = intel_hid_pl_resume_handler,
        .suspend  = intel_hid_pl_suspend_handler,
        .resume  = intel_hid_pl_resume_handler,
 };
index 3fb1d85c70a89d8d89ab700627691f70a63d03c1..6f497e80c9df220df88563737f33ff701569f31b 100644 (file)
@@ -687,8 +687,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
        ipcdev.acpi_io_size = size;
        dev_info(&pdev->dev, "io res: %pR\n", res);
 
-       /* This is index 0 to cover BIOS data register */
        punit_res = punit_res_array;
+       /* This is index 0 to cover BIOS data register */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_BIOS_DATA_INDEX);
        if (!res) {
@@ -698,55 +698,51 @@ static int ipc_plat_get_res(struct platform_device *pdev)
        *punit_res = *res;
        dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
 
+       /* This is index 1 to cover BIOS interface register */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_BIOS_IFACE_INDEX);
        if (!res) {
                dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
                return -ENXIO;
        }
-       /* This is index 1 to cover BIOS interface register */
        *++punit_res = *res;
        dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
 
+       /* This is index 2 to cover ISP data register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_ISP_DATA_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit ISP data\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
        }
-       /* This is index 2 to cover ISP data register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
 
+       /* This is index 3 to cover ISP interface register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_ISP_IFACE_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit ISP iface\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
        }
-       /* This is index 3 to cover ISP interface register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
 
+       /* This is index 4 to cover GTD data register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_GTD_DATA_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit GTD data\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
        }
-       /* This is index 4 to cover GTD data register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
 
+       /* This is index 5 to cover GTD interface register, optional */
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_GTD_IFACE_INDEX);
-       if (!res) {
-               dev_err(&pdev->dev, "Failed to get res of punit GTD iface\n");
-               return -ENXIO;
+       ++punit_res;
+       if (res) {
+               *punit_res = *res;
+               dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
        }
-       /* This is index 5 to cover GTD interface register */
-       *++punit_res = *res;
-       dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM,
                                    PLAT_RESOURCE_IPC_INDEX);
index bd875409a02dd3fbc13f8b61d372604e62f8abf1..a47a41fc10ad77c427158811857da4bc21cd747f 100644 (file)
@@ -227,6 +227,11 @@ static int intel_punit_get_bars(struct platform_device *pdev)
        struct resource *res;
        void __iomem *addr;
 
+       /*
+        * The following resources are required
+        * - BIOS_IPC BASE_DATA
+        * - BIOS_IPC BASE_IFACE
+        */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        addr = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(addr))
@@ -239,29 +244,40 @@ static int intel_punit_get_bars(struct platform_device *pdev)
                return PTR_ERR(addr);
        punit_ipcdev->base[BIOS_IPC][BASE_IFACE] = addr;
 
+       /*
+        * The following resources are optional
+        * - ISPDRIVER_IPC BASE_DATA
+        * - ISPDRIVER_IPC BASE_IFACE
+        * - GTDRIVER_IPC BASE_DATA
+        * - GTDRIVER_IPC BASE_IFACE
+        */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
-       addr = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
-       punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+       if (res) {
+               addr = devm_ioremap_resource(&pdev->dev, res);
+               if (!IS_ERR(addr))
+                       punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
+       }
 
        return 0;
 }
index 397119f83e82384913a305065a70f1ca7ae815d8..781bd10ca7ac5ba147e4e718d1d54577a7626e33 100644 (file)
@@ -659,7 +659,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig,
 static int telemetry_plt_set_sampling_period(u8 pss_period, u8 ioss_period)
 {
        u32 telem_ctrl = 0;
-       int ret;
+       int ret = 0;
 
        mutex_lock(&(telm_conf->telem_lock));
        if (ioss_period) {
index e305ab541a2227e1f94ff3e80a0da369ed385946..9255ff3ee81ac74e5fc269f256aed955132e730d 100644 (file)
@@ -7972,10 +7972,12 @@ static int fan_get_status_safe(u8 *status)
                fan_update_desired_level(s);
        mutex_unlock(&fan_mutex);
 
+       if (rc)
+               return rc;
        if (status)
                *status = s;
 
-       return rc;
+       return 0;
 }
 
 static int fan_get_speed(unsigned int *speed)
index df1f1a76a8629fdf4c1b709ff519040ffad18552..01e12d221a8b9b819b2ff396f2231a0a51492b76 100644 (file)
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
 /* Field definitions */
 #define HCI_ACCEL_MASK                 0x7fff
 #define HCI_HOTKEY_DISABLE             0x0b
-#define HCI_HOTKEY_ENABLE              0x01
+#define HCI_HOTKEY_ENABLE              0x09
 #define HCI_HOTKEY_SPECIAL_FUNCTIONS   0x10
 #define HCI_LCD_BRIGHTNESS_BITS                3
 #define HCI_LCD_BRIGHTNESS_SHIFT       (16-HCI_LCD_BRIGHTNESS_BITS)
index b2156ee5bae1a50f72b8cbf69d68741ef0082671..ecb7dbae9be9daa0ee96ede34a446a78e8e7debd 100644 (file)
@@ -863,7 +863,7 @@ out:
  * A user-initiated temperature conversion is not started by this function,
  * so the temperature is updated once every 64 seconds.
  */
-static int ds3231_hwmon_read_temp(struct device *dev, s16 *mC)
+static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC)
 {
        struct ds1307 *ds1307 = dev_get_drvdata(dev);
        u8 temp_buf[2];
@@ -892,7 +892,7 @@ static ssize_t ds3231_hwmon_show_temp(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
        int ret;
-       s16 temp;
+       s32 temp;
 
        ret = ds3231_hwmon_read_temp(dev, &temp);
        if (ret)
@@ -1531,7 +1531,7 @@ read_rtc:
                return PTR_ERR(ds1307->rtc);
        }
 
-       if (ds1307_can_wakeup_device) {
+       if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) {
                /* Disable request for an IRQ */
                want_irq = false;
                dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n");
index 1bce9cf51b1e917632fe97bcbf636293e5442adc..b83908670a9ab0a229029353b669cbd82956eca2 100644 (file)
@@ -756,15 +756,16 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
        blk_cleanup_queue(dev_info->dcssblk_queue);
        dev_info->gd->queue = NULL;
        put_disk(dev_info->gd);
-       device_unregister(&dev_info->dev);
 
        /* unload all related segments */
        list_for_each_entry(entry, &dev_info->seg_list, lh)
                segment_unload(entry->segment_name);
 
-       put_device(&dev_info->dev);
        up_write(&dcssblk_devices_sem);
 
+       device_unregister(&dev_info->dev);
+       put_device(&dev_info->dev);
+
        rc = count;
 out_buf:
        kfree(local_buf);
index 75d9896deccb96da19432dcf7b96d339651d625e..e6f54d3b89690613e94d388f1e325adfd32b25ad 100644 (file)
@@ -303,7 +303,7 @@ static void scm_blk_request(struct request_queue *rq)
                if (req->cmd_type != REQ_TYPE_FS) {
                        blk_start_request(req);
                        blk_dump_rq_flags(req, KMSG_COMPONENT " bad request");
-                       blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, -EIO);
                        continue;
                }
 
index f3bb7af4e984ed50471aa75de673c4287d297503..ead83a24bcd1ea000b89e3fb6e6b3b0531c588d3 100644 (file)
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
 {
        struct flowi6 fl;
 
+       memset(&fl, 0, sizeof(fl));
        if (saddr)
                memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
        if (daddr)
index 57e781c71e6776223b7f13fecaa3243572302cf7..837effe199071bcb319a525e4c9498b4cf2225ba 100644 (file)
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
                genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
 
                /*
-                * With CONFIG_PM disabled turn on all domains to make the
-                * hardware usable.
+                * Initially turn on all domains to make the domains usable
+                * with !CONFIG_PM and to get the hardware in sync with the
+                * software.  The unused domains will be switched off during
+                * late_init time.
                 */
-               if (!IS_ENABLED(CONFIG_PM))
-                       genpd->power_on(genpd);
+               genpd->power_on(genpd);
 
-               pm_genpd_init(genpd, NULL, true);
+               pm_genpd_init(genpd, NULL, false);
        }
 
        /*
index c37eedc35a240d0d3fe4387698f50e7884e52cfc..3c3dc4a3d52cfb4685bdcbd2c62b55d33447804d 100644 (file)
@@ -376,6 +376,8 @@ config MTK_THERMAL
        tristate "Temperature sensor driver for mediatek SoCs"
        depends on ARCH_MEDIATEK || COMPILE_TEST
        depends on HAS_IOMEM
+       depends on NVMEM || NVMEM=n
+       depends on RESET_CONTROLLER
        default y
        help
          Enable this option if you want to have support for thermal management
index 3d93b1c07ceef54d533515e9b6b47f4c39ddd8b1..507632b9648e3e193942ca3cc190c2cfab705d66 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/thermal.h>
 #include <linux/reset.h>
 #include <linux/types.h>
-#include <linux/nvmem-consumer.h>
 
 /* AUXADC Registers */
 #define AUXADC_CON0_V          0x000
@@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
 
 module_platform_driver(mtk_thermal_driver);
 
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
 MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
 MODULE_DESCRIPTION("Mediatek thermal driver");
 MODULE_LICENSE("GPL v2");
index 49ac23d3e776c8c98b8884fde954fee4d0b3af02..d8ec44b194d64a012c1dff1472c233f83f1119b0 100644 (file)
@@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
  * otherwise, it returns a corresponding ERR_PTR(). Caller must
  * check the return value with help of IS_ERR() helper.
  */
-static struct __thermal_zone *
-thermal_of_build_thermal_zone(struct device_node *np)
+static struct __thermal_zone
+__init *thermal_of_build_thermal_zone(struct device_node *np)
 {
        struct device_node *child = NULL, *gchild;
        struct __thermal_zone *tz;
index 1246aa6fcab0caeda03ace6806c4df2d46c1f164..2f1a863a8e15bc834e007fea264d262d180a04cf 100644 (file)
@@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
        capped_extra_power = 0;
        extra_power = 0;
        for (i = 0; i < num_actors; i++) {
-               u64 req_range = req_power[i] * power_range;
+               u64 req_range = (u64)req_power[i] * power_range;
 
                granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
                                                         total_req_power);
index d4b54653ecf8d7f489a6200a8491b46b52f3ba14..f1db496255556c53750578ed25fae73779a882de 100644 (file)
@@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
 {
        struct thermal_zone_device *tz = to_thermal_zone(dev);
        int trip, ret;
-       unsigned long temperature;
+       int temperature;
 
        if (!tz->ops->set_trip_temp)
                return -EPERM;
@@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
        if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
                return -EINVAL;
 
-       if (kstrtoul(buf, 10, &temperature))
+       if (kstrtoint(buf, 10, &temperature))
                return -EINVAL;
 
        ret = tz->ops->set_trip_temp(tz, trip, temperature);
@@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
 {
        struct thermal_zone_device *tz = to_thermal_zone(dev);
        int ret = 0;
-       unsigned long temperature;
+       int temperature;
 
-       if (kstrtoul(buf, 10, &temperature))
+       if (kstrtoint(buf, 10, &temperature))
                return -EINVAL;
 
        if (!tz->ops->set_emul_temp) {
index e16a49b507efbd57734b0b8f522d0e2a5dfe03ce..cf0dc51a2690bac16c108952f93dda309fa89b7e 100644 (file)
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
  */
 
 static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
-               struct inode *ptm_inode, int idx)
+               struct file *file, int idx)
 {
        /* Master must be open via /dev/ptmx */
        return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
  */
 
 static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
-               struct inode *pts_inode, int idx)
+               struct file *file, int idx)
 {
        struct tty_struct *tty;
 
        mutex_lock(&devpts_mutex);
-       tty = devpts_get_priv(pts_inode);
+       tty = devpts_get_priv(file->f_path.dentry);
        mutex_unlock(&devpts_mutex);
        /* Master must be open before slave */
        if (!tty)
@@ -663,14 +663,14 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
 /* this is called once with whichever end is closed last */
 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
 {
-       struct inode *ptmx_inode;
+       struct pts_fs_info *fsi;
 
        if (tty->driver->subtype == PTY_TYPE_MASTER)
-               ptmx_inode = tty->driver_data;
+               fsi = tty->driver_data;
        else
-               ptmx_inode = tty->link->driver_data;
-       devpts_kill_index(ptmx_inode, tty->index);
-       devpts_del_ref(ptmx_inode);
+               fsi = tty->link->driver_data;
+       devpts_kill_index(fsi, tty->index);
+       devpts_put_ref(fsi);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -720,8 +720,9 @@ static const struct tty_operations pty_unix98_ops = {
 
 static int ptmx_open(struct inode *inode, struct file *filp)
 {
+       struct pts_fs_info *fsi;
        struct tty_struct *tty;
-       struct inode *slave_inode;
+       struct dentry *dentry;
        int retval;
        int index;
 
@@ -734,54 +735,46 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        if (retval)
                return retval;
 
+       fsi = devpts_get_ref(inode, filp);
+       retval = -ENODEV;
+       if (!fsi)
+               goto out_free_file;
+
        /* find a device that is not in use. */
        mutex_lock(&devpts_mutex);
-       index = devpts_new_index(inode);
-       if (index < 0) {
-               retval = index;
-               mutex_unlock(&devpts_mutex);
-               goto err_file;
-       }
-
+       index = devpts_new_index(fsi);
        mutex_unlock(&devpts_mutex);
 
-       mutex_lock(&tty_mutex);
-       tty = tty_init_dev(ptm_driver, index);
+       retval = index;
+       if (index < 0)
+               goto out_put_ref;
 
-       if (IS_ERR(tty)) {
-               retval = PTR_ERR(tty);
-               goto out;
-       }
 
+       mutex_lock(&tty_mutex);
+       tty = tty_init_dev(ptm_driver, index);
        /* The tty returned here is locked so we can safely
           drop the mutex */
        mutex_unlock(&tty_mutex);
 
-       set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
-       tty->driver_data = inode;
+       retval = PTR_ERR(tty);
+       if (IS_ERR(tty))
+               goto out;
 
        /*
-        * In the case where all references to ptmx inode are dropped and we
-        * still have /dev/tty opened pointing to the master/slave pair (ptmx
-        * is closed/released before /dev/tty), we must make sure that the inode
-        * is still valid when we call the final pty_unix98_shutdown, thus we
-        * hold an additional reference to the ptmx inode. For the same /dev/tty
-        * last close case, we also need to make sure the super_block isn't
-        * destroyed (devpts instance unmounted), before /dev/tty is closed and
-        * on its release devpts_kill_index is called.
+        * From here on out, the tty is "live", and the index and
+        * fsi will be killed/put by the tty_release()
         */
-       devpts_add_ref(inode);
+       set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+       tty->driver_data = fsi;
 
        tty_add_file(tty, filp);
 
-       slave_inode = devpts_pty_new(inode,
-                       MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
-                       tty->link);
-       if (IS_ERR(slave_inode)) {
-               retval = PTR_ERR(slave_inode);
+       dentry = devpts_pty_new(fsi, index, tty->link);
+       if (IS_ERR(dentry)) {
+               retval = PTR_ERR(dentry);
                goto err_release;
        }
-       tty->link->driver_data = slave_inode;
+       tty->link->driver_data = dentry;
 
        retval = ptm_driver->ops->open(tty, filp);
        if (retval)
@@ -793,12 +786,14 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        return 0;
 err_release:
        tty_unlock(tty);
+       // This will also put-ref the fsi
        tty_release(inode, filp);
        return retval;
 out:
-       mutex_unlock(&tty_mutex);
-       devpts_kill_index(inode, index);
-err_file:
+       devpts_kill_index(fsi, index);
+out_put_ref:
+       devpts_put_ref(fsi);
+out_free_file:
        tty_free_file(filp);
        return retval;
 }
index e213da01a3d71b5fa6317a4990d30944ac2cd2c0..00ad2637b08c082f523e3a28c3e3472b91b82438 100644 (file)
@@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
        /*
         * Empty the RX FIFO, we are not interested in anything
         * received during the half-duplex transmission.
+        * Enable previously disabled RX interrupts.
         */
-       if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
+       if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
                serial8250_clear_fifos(p);
+
+               serial8250_rpm_get(p);
+
+               p->ier |= UART_IER_RLSI | UART_IER_RDI;
+               serial_port_out(&p->port, UART_IER, p->ier);
+
+               serial8250_rpm_put(p);
+       }
 }
 
 static void serial8250_em485_handle_stop_tx(unsigned long arg)
index 64742a086ae3385f3fb1680b912e48dcfb1770de..4d7cb9c04fce73b93db59fdaed5832310aaaff44 100644 (file)
@@ -324,7 +324,6 @@ config SERIAL_8250_EM
 config SERIAL_8250_RT288X
        bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
        depends on SERIAL_8250
-       depends on MIPS || COMPILE_TEST
        default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
        help
          Selecting this option will add support for the alternate register
index c9fdfc8bf47f85b3a8e74596f5bea496ca707691..d08baa668d5de9feeaae9617710f5466360f270a 100644 (file)
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
        iowrite32be(val, addr);
 }
 
-static const struct uartlite_reg_ops uartlite_be = {
+static struct uartlite_reg_ops uartlite_be = {
        .in = uartlite_inbe32,
        .out = uartlite_outbe32,
 };
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
        iowrite32(val, addr);
 }
 
-static const struct uartlite_reg_ops uartlite_le = {
+static struct uartlite_reg_ops uartlite_le = {
        .in = uartlite_inle32,
        .out = uartlite_outle32,
 };
 
 static inline u32 uart_in32(u32 offset, struct uart_port *port)
 {
-       const struct uartlite_reg_ops *reg_ops = port->private_data;
+       struct uartlite_reg_ops *reg_ops = port->private_data;
 
        return reg_ops->in(port->membase + offset);
 }
 
 static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
 {
-       const struct uartlite_reg_ops *reg_ops = port->private_data;
+       struct uartlite_reg_ops *reg_ops = port->private_data;
 
        reg_ops->out(val, port->membase + offset);
 }
index 9b04d72e752e5e398aef0122eabf0d9ba2d2e1c1..24d5491ef0da7c4b1c8354ac0feab068d0bfa059 100644 (file)
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
  *     Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
  */
 static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
-               struct inode *inode, int idx)
+               struct file *file, int idx)
 {
        struct tty_struct *tty;
 
        if (driver->ops->lookup)
-               tty = driver->ops->lookup(driver, inode, idx);
+               tty = driver->ops->lookup(driver, file, idx);
        else
                tty = driver->ttys[idx];
 
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
        }
 
        /* check whether we're reopening an existing tty */
-       tty = tty_driver_lookup_tty(driver, inode, index);
+       tty = tty_driver_lookup_tty(driver, filp, index);
        if (IS_ERR(tty)) {
                mutex_unlock(&tty_mutex);
                goto out;
index fa20f5a99d125aad27d28717683b5447a419fedf..34277ced26bd27e6b66c3ffda544054383d6b0ef 100644 (file)
@@ -1150,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
        phy_exit(dwc->usb2_generic_phy);
        phy_exit(dwc->usb3_generic_phy);
 
+       usb_phy_set_suspend(dwc->usb2_phy, 1);
+       usb_phy_set_suspend(dwc->usb3_phy, 1);
+       WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
+       WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+
        pinctrl_pm_select_sleep_state(dev);
 
        return 0;
@@ -1163,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
 
        pinctrl_pm_select_default_state(dev);
 
+       usb_phy_set_suspend(dwc->usb2_phy, 0);
+       usb_phy_set_suspend(dwc->usb3_phy, 0);
+       ret = phy_power_on(dwc->usb2_generic_phy);
+       if (ret < 0)
+               return ret;
+
+       ret = phy_power_on(dwc->usb3_generic_phy);
+       if (ret < 0)
+               goto err_usb2phy_power;
+
        usb_phy_init(dwc->usb3_phy);
        usb_phy_init(dwc->usb2_phy);
        ret = phy_init(dwc->usb2_generic_phy);
        if (ret < 0)
-               return ret;
+               goto err_usb3phy_power;
 
        ret = phy_init(dwc->usb3_generic_phy);
        if (ret < 0)
@@ -1200,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
 err_usb2phy_init:
        phy_exit(dwc->usb2_generic_phy);
 
+err_usb3phy_power:
+       phy_power_off(dwc->usb3_generic_phy);
+
+err_usb2phy_power:
+       phy_power_off(dwc->usb2_generic_phy);
+
        return ret;
 }
 
index 9ac37fe1b6a77956d530642db517e57eb3481fe8..cebf9e38b60acac5cf3361703cabef7b8e5563a2 100644 (file)
@@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
        file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
        if (!file) {
                ret = -ENOMEM;
-               goto err1;
+               goto err2;
        }
 
        if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
@@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
                                dwc, &dwc3_mode_fops);
                if (!file) {
                        ret = -ENOMEM;
-                       goto err1;
+                       goto err2;
                }
        }
 
@@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
                                dwc, &dwc3_testmode_fops);
                if (!file) {
                        ret = -ENOMEM;
-                       goto err1;
+                       goto err2;
                }
 
                file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
                                dwc, &dwc3_link_state_fops);
                if (!file) {
                        ret = -ENOMEM;
-                       goto err1;
+                       goto err2;
                }
        }
 
        return 0;
 
+err2:
+       kfree(dwc->regset);
+
 err1:
        debugfs_remove_recursive(root);
 
@@ -686,5 +689,5 @@ err0:
 void dwc3_debugfs_exit(struct dwc3 *dwc)
 {
        debugfs_remove_recursive(dwc->root);
-       dwc->root = NULL;
+       kfree(dwc->regset);
 }
index 22e9606d8e081c3ece06c3cf52f0e735ab44207c..55da2c7f727f95e678f37462cdba5c532d659936 100644 (file)
@@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
                dev_err(dev, "get_sync failed with err %d\n", ret);
-               goto err0;
+               goto err1;
        }
 
        dwc3_omap_map_offset(omap);
@@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
 
        ret = dwc3_omap_extcon_register(omap);
        if (ret < 0)
-               goto err2;
+               goto err1;
 
        ret = of_platform_populate(node, NULL, NULL, dev);
        if (ret) {
                dev_err(&pdev->dev, "failed to create dwc3 core\n");
-               goto err3;
+               goto err2;
        }
 
        dwc3_omap_enable_irqs(omap);
 
        return 0;
 
-err3:
+err2:
        extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
        extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
-err2:
-       dwc3_omap_disable_irqs(omap);
 
 err1:
        pm_runtime_put_sync(dev);
-
-err0:
        pm_runtime_disable(dev);
 
        return ret;
index d54a028cdfebaf69920f964bd7280427088b5b38..8e4a1b195e9bd4d2646210f970e56244adc1833c 100644 (file)
@@ -2936,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
+       if (!dwc->gadget_driver)
+               return 0;
+
        if (dwc->pullups_connected) {
                dwc3_gadget_disable_irq(dwc);
                dwc3_gadget_run_stop(dwc, true, true);
@@ -2954,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
        struct dwc3_ep          *dep;
        int                     ret;
 
+       if (!dwc->gadget_driver)
+               return 0;
+
        /* Start with SuperSpeed Default */
        dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
index de9ffd60fcfa84b59291e755edc4ca0fb9945f2f..524e233d48def51b3e957af120fea769cb54ead0 100644 (file)
@@ -651,6 +651,8 @@ static int bos_desc(struct usb_composite_dev *cdev)
                ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
                ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
                ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
+               ssp_cap->bReserved = 0;
+               ssp_cap->wReserved = 0;
 
                /* SSAC = 1 (2 attributes) */
                ssp_cap->bmAttributes = cpu_to_le32(1);
index e21ca2bd6839eb6a8b73f6d1e2061dd36939bc65..15b648cbc75c4141bb5c51bd03c2c6e5b5f6c821 100644 (file)
@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
                                                   work);
        int ret = io_data->req->status ? io_data->req->status :
                                         io_data->req->actual;
+       bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
 
        if (io_data->read && ret > 0) {
                use_mm(io_data->mm);
@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
 
        io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
 
-       if (io_data->ffs->ffs_eventfd &&
-           !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
+       if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
                eventfd_signal(io_data->ffs->ffs_eventfd, 1);
 
        usb_ep_free_request(io_data->ep, io_data->req);
 
-       io_data->kiocb->private = NULL;
        if (io_data->read)
                kfree(io_data->to_free);
        kfree(io_data->buf);
index fe274b5851c77ff5e59b7c88f205cbf09ec61114..93e66a9148b90de9a3308d3b4b49a359414c7363 100644 (file)
@@ -440,13 +440,14 @@ static int clcdfb_register(struct clcd_fb *fb)
                fb->off_ienb = CLCD_PL111_IENB;
                fb->off_cntl = CLCD_PL111_CNTL;
        } else {
-#ifdef CONFIG_ARCH_VERSATILE
-               fb->off_ienb = CLCD_PL111_IENB;
-               fb->off_cntl = CLCD_PL111_CNTL;
-#else
-               fb->off_ienb = CLCD_PL110_IENB;
-               fb->off_cntl = CLCD_PL110_CNTL;
-#endif
+               if (of_machine_is_compatible("arm,versatile-ab") ||
+                   of_machine_is_compatible("arm,versatile-pb")) {
+                       fb->off_ienb = CLCD_PL111_IENB;
+                       fb->off_cntl = CLCD_PL111_CNTL;
+               } else {
+                       fb->off_ienb = CLCD_PL110_IENB;
+                       fb->off_cntl = CLCD_PL110_CNTL;
+               }
        }
 
        fb->clk = clk_get(&fb->dev->dev, NULL);
index abfd1f6e33275b6d3b192df0c504669ab771c5e7..1954ec913ce5d188e47bc90f634c3a2909beb40a 100644 (file)
@@ -200,20 +200,16 @@ static struct omap_dss_driver sharp_ls_ops = {
 static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
                  char *desc, struct gpio_desc **gpiod)
 {
-       struct gpio_desc *gd;
        int r;
 
-       *gpiod = NULL;
-
        r = devm_gpio_request_one(dev, gpio, flags, desc);
-       if (r)
+       if (r) {
+               *gpiod = NULL;
                return r == -ENOENT ? 0 : r;
+       }
 
-       gd = gpio_to_desc(gpio);
-       if (IS_ERR(gd))
-               return PTR_ERR(gd) == -ENOENT ? 0 : PTR_ERR(gd);
+       *gpiod = gpio_to_desc(gpio);
 
-       *gpiod = gd;
        return 0;
 }
 
index 655f21f991606b5bf2bef600a54b814994dbed98..0b2954d7172d78e78ac98238dfb7b50c5dccb558 100644 (file)
@@ -128,6 +128,7 @@ static const match_table_t tokens = {
 struct pts_fs_info {
        struct ida allocated_ptys;
        struct pts_mount_opts mount_opts;
+       struct super_block *sb;
        struct dentry *ptmx_dentry;
 };
 
@@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = {
        .show_options   = devpts_show_options,
 };
 
-static void *new_pts_fs_info(void)
+static void *new_pts_fs_info(struct super_block *sb)
 {
        struct pts_fs_info *fsi;
 
@@ -369,6 +370,7 @@ static void *new_pts_fs_info(void)
        ida_init(&fsi->allocated_ptys);
        fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE;
        fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE;
+       fsi->sb = sb;
 
        return fsi;
 }
@@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
        s->s_op = &devpts_sops;
        s->s_time_gran = 1;
 
-       s->s_fs_info = new_pts_fs_info();
+       s->s_fs_info = new_pts_fs_info(s);
        if (!s->s_fs_info)
                goto fail;
 
@@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = {
  * to the System V naming convention
  */
 
-int devpts_new_index(struct inode *ptmx_inode)
+int devpts_new_index(struct pts_fs_info *fsi)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-       struct pts_fs_info *fsi;
        int index;
        int ida_ret;
 
-       if (!sb)
+       if (!fsi)
                return -ENODEV;
 
-       fsi = DEVPTS_SB(sb);
 retry:
        if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
                return -ENOMEM;
@@ -564,11 +563,8 @@ retry:
        return index;
 }
 
-void devpts_kill_index(struct inode *ptmx_inode, int idx)
+void devpts_kill_index(struct pts_fs_info *fsi, int idx)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-       struct pts_fs_info *fsi = DEVPTS_SB(sb);
-
        mutex_lock(&allocated_ptys_lock);
        ida_remove(&fsi->allocated_ptys, idx);
        pty_count--;
@@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
 /*
  * pty code needs to hold extra references in case of last /dev/tty close
  */
-
-void devpts_add_ref(struct inode *ptmx_inode)
+struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+       struct super_block *sb;
+       struct pts_fs_info *fsi;
+
+       sb = pts_sb_from_inode(ptmx_inode);
+       if (!sb)
+               return NULL;
+       fsi = DEVPTS_SB(sb);
+       if (!fsi)
+               return NULL;
 
        atomic_inc(&sb->s_active);
-       ihold(ptmx_inode);
+       return fsi;
 }
 
-void devpts_del_ref(struct inode *ptmx_inode)
+void devpts_put_ref(struct pts_fs_info *fsi)
 {
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
-
-       iput(ptmx_inode);
-       deactivate_super(sb);
+       deactivate_super(fsi->sb);
 }
 
 /**
@@ -604,22 +604,20 @@ void devpts_del_ref(struct inode *ptmx_inode)
  *
  * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
  */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
-               void *priv)
+struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
 {
        struct dentry *dentry;
-       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+       struct super_block *sb;
        struct inode *inode;
        struct dentry *root;
-       struct pts_fs_info *fsi;
        struct pts_mount_opts *opts;
        char s[12];
 
-       if (!sb)
+       if (!fsi)
                return ERR_PTR(-ENODEV);
 
+       sb = fsi->sb;
        root = sb->s_root;
-       fsi = DEVPTS_SB(sb);
        opts = &fsi->mount_opts;
 
        inode = new_inode(sb);
@@ -630,25 +628,21 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
        inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
        inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
-       init_special_inode(inode, S_IFCHR|opts->mode, device);
-       inode->i_private = priv;
+       init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
 
        sprintf(s, "%d", index);
 
-       inode_lock(d_inode(root));
-
        dentry = d_alloc_name(root, s);
        if (dentry) {
+               dentry->d_fsdata = priv;
                d_add(dentry, inode);
                fsnotify_create(d_inode(root), dentry);
        } else {
                iput(inode);
-               inode = ERR_PTR(-ENOMEM);
+               dentry = ERR_PTR(-ENOMEM);
        }
 
-       inode_unlock(d_inode(root));
-
-       return inode;
+       return dentry;
 }
 
 /**
@@ -657,24 +651,10 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
  *
  * Returns whatever was passed as priv in devpts_pty_new for a given inode.
  */
-void *devpts_get_priv(struct inode *pts_inode)
+void *devpts_get_priv(struct dentry *dentry)
 {
-       struct dentry *dentry;
-       void *priv = NULL;
-
-       BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
-       /* Ensure dentry has not been deleted by devpts_pty_kill() */
-       dentry = d_find_alias(pts_inode);
-       if (!dentry)
-               return NULL;
-
-       if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
-               priv = pts_inode->i_private;
-
-       dput(dentry);
-
-       return priv;
+       WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+       return dentry->d_fsdata;
 }
 
 /**
@@ -683,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode)
  *
  * This is an inverse operation of devpts_pty_new.
  */
-void devpts_pty_kill(struct inode *inode)
+void devpts_pty_kill(struct dentry *dentry)
 {
-       struct super_block *sb = pts_sb_from_inode(inode);
-       struct dentry *root = sb->s_root;
-       struct dentry *dentry;
+       WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
 
-       BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
-       inode_lock(d_inode(root));
-
-       dentry = d_find_alias(inode);
-
-       drop_nlink(inode);
+       dentry->d_fsdata = NULL;
+       drop_nlink(dentry->d_inode);
        d_delete(dentry);
        dput(dentry);   /* d_alloc_name() in devpts_pty_new() */
-       dput(dentry);           /* d_find_alias above */
-
-       inode_unlock(d_inode(root));
 }
 
 static int __init init_devpts_fs(void)
index e56272c919b5a688e1739cdefaa08c121e26f5a8..bf2d34c9d804334cd0c634bf4d8ed921c08cfd7a 100644 (file)
@@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        u32 val;
 
        preempt_disable();
-       if (unlikely(get_user(val, uaddr) != 0))
+       if (unlikely(get_user(val, uaddr) != 0)) {
+               preempt_enable();
                return -EFAULT;
+       }
 
-       if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
+       if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
+               preempt_enable();
                return -EFAULT;
+       }
 
        *uval = val;
        preempt_enable();
index 461a0558bca4d8d16e81b88e0286e3fa6251ab79..cebecff536a3a6aec1b06c69e32d8bb4ec2f4f29 100644 (file)
@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
 {
 #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
        return false;
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+       return false;
 #else
        return true;
 #endif
index e0ee0b3000b2da107c975137165fc989777d8a58..5871f292b596fb58349c1f4c6e0202aede55b7f8 100644 (file)
 
 #include <linux/errno.h>
 
+struct pts_fs_info;
+
 #ifdef CONFIG_UNIX98_PTYS
 
-int devpts_new_index(struct inode *ptmx_inode);
-void devpts_kill_index(struct inode *ptmx_inode, int idx);
-void devpts_add_ref(struct inode *ptmx_inode);
-void devpts_del_ref(struct inode *ptmx_inode);
+/* Look up a pts fs info and get a ref to it */
+struct pts_fs_info *devpts_get_ref(struct inode *, struct file *);
+void devpts_put_ref(struct pts_fs_info *);
+
+int devpts_new_index(struct pts_fs_info *);
+void devpts_kill_index(struct pts_fs_info *, int);
+
 /* mknod in devpts */
-struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
-               void *priv);
+struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
 /* get private structure */
-void *devpts_get_priv(struct inode *pts_inode);
+void *devpts_get_priv(struct dentry *);
 /* unlink */
-void devpts_pty_kill(struct inode *inode);
-
-#else
-
-/* Dummy stubs in the no-pty case */
-static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
-static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
-static inline void devpts_add_ref(struct inode *ptmx_inode) { }
-static inline void devpts_del_ref(struct inode *ptmx_inode) { }
-static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
-               dev_t device, int index, void *priv)
-{
-       return ERR_PTR(-EINVAL);
-}
-static inline void *devpts_get_priv(struct inode *pts_inode)
-{
-       return NULL;
-}
-static inline void devpts_pty_kill(struct inode *inode) { }
+void devpts_pty_kill(struct dentry *);
 
 #endif
 
index 8541a913f6a36effd78ef24bb0bc3cd69ce20541..d1f904c8b2cb48ef7df75141134f0b8c2f1d4057 100644 (file)
@@ -828,6 +828,11 @@ struct mlx4_vf_dev {
        u8                      n_ports;
 };
 
+enum mlx4_pci_status {
+       MLX4_PCI_STATUS_DISABLED,
+       MLX4_PCI_STATUS_ENABLED,
+};
+
 struct mlx4_dev_persistent {
        struct pci_dev         *pdev;
        struct mlx4_dev        *dev;
@@ -841,6 +846,8 @@ struct mlx4_dev_persistent {
        u8              state;
        struct mutex    interface_state_mutex; /* protect SW state */
        u8      interface_state;
+       struct mutex            pci_status_mutex; /* sync pci state */
+       enum mlx4_pci_status    pci_status;
 };
 
 struct mlx4_dev {
index dcd5ac8d3b1403875bce11aeddaf106acc0cd218..369c837d40f566827cac1fd9c0427386bcd66761 100644 (file)
@@ -519,8 +519,9 @@ enum mlx5_device_state {
 };
 
 enum mlx5_interface_state {
-       MLX5_INTERFACE_STATE_DOWN,
-       MLX5_INTERFACE_STATE_UP,
+       MLX5_INTERFACE_STATE_DOWN = BIT(0),
+       MLX5_INTERFACE_STATE_UP = BIT(1),
+       MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
 };
 
 enum mlx5_pci_status {
@@ -544,7 +545,7 @@ struct mlx5_core_dev {
        enum mlx5_device_state  state;
        /* sync interface state */
        struct mutex            intf_state_mutex;
-       enum mlx5_interface_state interface_state;
+       unsigned long           intf_state;
        void                    (*event) (struct mlx5_core_dev *dev,
                                          enum mlx5_dev_event event,
                                          unsigned long param);
index a1d145abd4eb55975f5f51e75febb3a70646fa87..b30250ab7604e4b5144220c367493e8524f4c569 100644 (file)
@@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
                                 enum mlx5_port_status *status);
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
                              u8 port);
 
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
index bd93e63236036b7086206714ec3e61cb052358c6..301da4a5e6bfa340363799310a21251c685935f0 100644 (file)
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
                                     u16 vport, u8 *addr);
 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
                                      u16 vport, u8 *addr);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
index 004b8133417dc9dd54315b1ca5f15a9cc28e9a2a..932ec74909c6fc79530f262c06eb2ff6ad0e9b21 100644 (file)
@@ -1111,6 +1111,7 @@ void pci_unlock_rescan_remove(void);
 /* Vital product data routines */
 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 
 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
index 1c33dd7da4a7d860004264e00bd406645df1ed96..4ae95f7e8597b0b43575d04aaf524cf252761e6e 100644 (file)
@@ -98,6 +98,45 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
        if (!is_a_nulls(first))
                first->pprev = &n->next;
 }
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the end of the specified hlist_nulls,
+ * while permitting racing traversals.  NOTE: tail insertion requires
+ * list traversal.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+                                       struct hlist_nulls_head *h)
+{
+       struct hlist_nulls_node *i, *last = NULL;
+
+       for (i = hlist_nulls_first_rcu(h); !is_a_nulls(i);
+            i = hlist_nulls_next_rcu(i))
+               last = i;
+
+       if (last) {
+               n->next = last->next;
+               n->pprev = &last->next;
+               rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
+       } else {
+               hlist_nulls_add_head_rcu(n, h);
+       }
+}
+
 /**
  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
  * @tpos:      the type * to use as a loop cursor.
index a55d0523f75d63bfbe43355a56dd0f08e835cdc9..1b8a5a7876ce67abbc0d6cac4004f816c31bf221 100644 (file)
@@ -352,8 +352,8 @@ struct thermal_zone_of_device_ops {
 
 struct thermal_trip {
        struct device_node *np;
-       unsigned long int temperature;
-       unsigned long int hysteresis;
+       int temperature;
+       int hysteresis;
        enum thermal_trip_type type;
 };
 
index 161052477f77009e59744339ac5377e4a9b03356..b742b5e47cc209ac60e441cc42fcf8f431239fab 100644 (file)
@@ -7,7 +7,7 @@
  * defined; unless noted otherwise, they are optional, and can be
  * filled in with a null pointer.
  *
- * struct tty_struct * (*lookup)(struct tty_driver *self, int idx)
+ * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
  *
  *     Return the tty device corresponding to idx, NULL if there is not
  *     one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
 
 struct tty_operations {
        struct tty_struct * (*lookup)(struct tty_driver *driver,
-                       struct inode *inode, int idx);
+                       struct file *filp, int idx);
        int  (*install)(struct tty_driver *driver, struct tty_struct *tty);
        void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
        int  (*open)(struct tty_struct * tty, struct file * filp);
index c0a92e2c286d6cc0942591dfc02d9b485ae6810e..74c9693d4941dd0f4a08526968a9783727249fb5 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/hardirq.h>
 #include <linux/rcupdate.h>
 #include <net/sock.h>
+#include <net/inet_sock.h>
 
 #ifdef CONFIG_CGROUP_NET_CLASSID
 struct cgroup_cls_state {
@@ -63,11 +64,13 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
         * softirqs always disables bh.
         */
        if (in_serving_softirq()) {
+               struct sock *sk = skb_to_full_sk(skb);
+
                /* If there is an sock_cgroup_classid we'll use that. */
-               if (!skb->sk)
+               if (!sk || !sk_fullsock(sk))
                        return 0;
 
-               classid = sock_cgroup_classid(&skb->sk->sk_cgrp_data);
+               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
        }
 
        return classid;
index 295d291269e2c88ed4930041597a28f7c9a7a2f7..54c779416eec654b31d278a3bd77f07b0a3e4ea8 100644 (file)
@@ -101,6 +101,9 @@ void fib6_force_start_gc(struct net *net);
 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
                                    const struct in6_addr *addr, bool anycast);
 
+struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
+                              int flags);
+
 /*
  *     support functions for ND
  *
index d0aeb97aec5d2b2c40eee41049fa6f64f5e0d503..1be050ada8c5d6bd35cd762d1ac61d9240aaebce 100644 (file)
@@ -959,6 +959,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
 int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
                                 int addr_len);
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
+void ip6_datagram_release_cb(struct sock *sk);
 
 int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
                    int *addr_len);
index 9b0a523bb4280023e4429fcc5379cb5d2d2d419a..6de665bf175041d0b11e5841fe8012196e9b9c98 100644 (file)
@@ -209,6 +209,9 @@ unsigned int inet_addr_type_dev_table(struct net *net,
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+struct rtable *rt_dst_alloc(struct net_device *dev,
+                            unsigned int flags, u16 type,
+                            bool nopolicy, bool noxfrm, bool will_cache);
 
 struct in_ifaddr;
 void fib_add_ifaddr(struct in_ifaddr *);
index 6df1ce7a411c548bda4163840a90578b6e1b4cfe..5a404c354f4c787e57a6ab6ca68b3ffc537eb1cc 100644 (file)
@@ -847,6 +847,11 @@ struct sctp_transport {
         */
        ktime_t last_time_heard;
 
+       /* When was the last time that we sent a chunk using this
+        * transport? We use this to check for idle transports
+        */
+       unsigned long last_time_sent;
+
        /* Last time(in jiffies) when cwnd is reduced due to the congestion
         * indication based on ECNE chunk.
         */
@@ -952,7 +957,8 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
                          struct sctp_sock *);
 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
 void sctp_transport_free(struct sctp_transport *);
-void sctp_transport_reset_timers(struct sctp_transport *);
+void sctp_transport_reset_t3_rtx(struct sctp_transport *);
+void sctp_transport_reset_hb_timer(struct sctp_transport *);
 int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
index 255d3e03727b737091141c1eb8b2f796afff9d9b..121ffc115c4f722750b5a1cc44e45000d15dd2ef 100644 (file)
@@ -630,7 +630,11 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
 
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
-       hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+       if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+           sk->sk_family == AF_INET6)
+               hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+       else
+               hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
 }
 
 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
index d451122e8404af871aca0ef71db2a6af37d1c2b3..51d77b2ce2b291d6ffa3c3f467ddf8c564892e48 100644 (file)
@@ -54,6 +54,8 @@ struct switchdev_attr {
        struct net_device *orig_dev;
        enum switchdev_attr_id id;
        u32 flags;
+       void *complete_priv;
+       void (*complete)(struct net_device *dev, int err, void *priv);
        union {
                struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
                u8 stp_state;                           /* PORT_STP_STATE */
@@ -75,6 +77,8 @@ struct switchdev_obj {
        struct net_device *orig_dev;
        enum switchdev_obj_id id;
        u32 flags;
+       void *complete_priv;
+       void (*complete)(struct net_device *dev, int err, void *priv);
 };
 
 /* SWITCHDEV_OBJ_ID_PORT_VLAN */
index b91370f61be64a9ef2bcff2d8f20701f5ced09b1..6db10228113fc0cd2ce876035110dd43c6f0c794 100644 (file)
@@ -552,6 +552,8 @@ void tcp_send_ack(struct sock *sk);
 void tcp_send_delayed_ack(struct sock *sk);
 void tcp_send_loss_probe(struct sock *sk);
 bool tcp_schedule_loss_probe(struct sock *sk);
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+                            const struct sk_buff *next_skb);
 
 /* tcp_input.c */
 void tcp_resume_early_retransmit(struct sock *sk);
index 2767c55a641edd64b7018a0d7693826ae60dbad9..ca64f0f50b4533df080a2daa6b1cba2fdf5f0a12 100644 (file)
@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
                                    unsigned int verb);
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
                             unsigned int *val);
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val);
 int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
                              unsigned int val);
 int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
index 2622b33fb2ec7d0e37e437198ac5d1637eed224d..6e0f5f01734cb893bfec236bd39bb012d333e510 100644 (file)
@@ -717,9 +717,13 @@ __SYSCALL(__NR_membarrier, sys_membarrier)
 __SYSCALL(__NR_mlock2, sys_mlock2)
 #define __NR_copy_file_range 285
 __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
+#define __NR_preadv2 286
+__SYSCALL(__NR_preadv2, sys_preadv2)
+#define __NR_pwritev2 287
+__SYSCALL(__NR_pwritev2, sys_pwritev2)
 
 #undef __NR_syscalls
-#define __NR_syscalls 286
+#define __NR_syscalls 288
 
 /*
  * All syscalls below here should go away really,
index b71fd0b5cbad7d375f3188114be65431def88907..813ffb2e22c9a5a9d9049aadb2aa467d734b734c 100644 (file)
@@ -96,6 +96,7 @@ header-y += cyclades.h
 header-y += cycx_cfm.h
 header-y += dcbnl.h
 header-y += dccp.h
+header-y += devlink.h
 header-y += dlmconstants.h
 header-y += dlm_device.h
 header-y += dlm.h
index 26b0d1e3e3e7c199c852130fb16008858c5b20a3..4c58d9917aa4ef75f0e0a8f1531864f66ea682bb 100644 (file)
@@ -19,8 +19,8 @@
 
 #define MACSEC_MAX_KEY_LEN 128
 
-#define DEFAULT_CIPHER_ID   0x0080020001000001ULL
-#define DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ID   0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
 
 #define MACSEC_MIN_ICV_LEN 8
 #define MACSEC_MAX_ICV_LEN 32
index 2e08f8e9b771f032a17e0305c60fcd6f0403e6f5..db2574e7b8b008395bb8fee53baf513fdc56c392 100644 (file)
@@ -1374,6 +1374,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
        }
 
        if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+           BPF_SIZE(insn->code) == BPF_DW ||
            (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
                verbose("BPF_LD_ABS uses reserved fields\n");
                return -EINVAL;
@@ -2029,7 +2030,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
                        if (IS_ERR(map)) {
                                verbose("fd %d is not pointing to valid bpf_map\n",
                                        insn->imm);
-                               fdput(f);
                                return PTR_ERR(map);
                        }
 
index 6ea42e8da861b05077d01a23e15bb140afffa605..3e3f6e49eabbc0dc62eefc02a201f95508032dab 100644 (file)
@@ -36,6 +36,7 @@
  * @target:    The target state
  * @thread:    Pointer to the hotplug thread
  * @should_run:        Thread should execute
+ * @rollback:  Perform a rollback
  * @cb_stat:   The state for a single callback (install/uninstall)
  * @cb:                Single callback function (install/uninstall)
  * @result:    Result of the operation
@@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
 #ifdef CONFIG_SMP
        struct task_struct      *thread;
        bool                    should_run;
+       bool                    rollback;
        enum cpuhp_state        cb_state;
        int                     (*cb)(unsigned int cpu);
        int                     result;
@@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
        return __cpu_notify(val, cpu, -1, NULL);
 }
 
+static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
+{
+       BUG_ON(cpu_notify(val, cpu));
+}
+
 /* Notifier wrappers for transitioning to state machine */
 static int notify_prepare(unsigned int cpu)
 {
@@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
                } else {
                        ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
                }
+       } else if (st->rollback) {
+               BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
+
+               undo_cpu_down(cpu, st, cpuhp_ap_states);
+               /*
+                * This is a momentary workaround to keep the notifier users
+                * happy. Will go away once we got rid of the notifiers.
+                */
+               cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+               st->rollback = false;
        } else {
                /* Cannot happen .... */
                BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
        read_unlock(&tasklist_lock);
 }
 
-static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
-{
-       BUG_ON(cpu_notify(val, cpu));
-}
-
 static int notify_down_prepare(unsigned int cpu)
 {
        int err, nr_calls = 0;
@@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
         */
        err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
        if (err) {
-               /* CPU didn't die: tell everyone.  Can't complain. */
-               cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
+               /* CPU refused to die */
                irq_unlock_sparse();
+               /* Unpark the hotplug thread so we can rollback there */
+               kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
                return err;
        }
        BUG_ON(cpu_online(cpu));
@@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         * to do the further cleanups.
         */
        ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
+       if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+               st->target = prev_state;
+               st->rollback = true;
+               cpuhp_kick_ap_work(cpu);
+       }
 
        hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
 out:
@@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
                .name                   = "notify:online",
                .startup                = notify_online,
                .teardown               = notify_down_prepare,
+               .skip_onerr             = true,
        },
 #endif
        /*
index a5d2e74c89e0b217df98326e5febf3caf687687c..c20f06f38ef35a4776a61c3579a6de813e466ce0 100644 (file)
@@ -1295,10 +1295,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
        if (unlikely(should_fail_futex(true)))
                ret = -EFAULT;
 
-       if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+       if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
                ret = -EFAULT;
-       else if (curval != uval)
-               ret = -EINVAL;
+       } else if (curval != uval) {
+               /*
+                * If a unconditional UNLOCK_PI operation (user space did not
+                * try the TID->0 transition) raced with a waiter setting the
+                * FUTEX_WAITERS flag between get_user() and locking the hash
+                * bucket lock, retry the operation.
+                */
+               if ((FUTEX_TID_MASK & curval) == uval)
+                       ret = -EAGAIN;
+               else
+                       ret = -EINVAL;
+       }
        if (ret) {
                raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
                return ret;
@@ -1525,8 +1535,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
        if (likely(&hb1->chain != &hb2->chain)) {
                plist_del(&q->list, &hb1->chain);
                hb_waiters_dec(hb1);
-               plist_add(&q->list, &hb2->chain);
                hb_waiters_inc(hb2);
+               plist_add(&q->list, &hb2->chain);
                q->lock_ptr = &hb2->lock;
        }
        get_futex_key_refs(key2);
@@ -2622,6 +2632,15 @@ retry:
                 */
                if (ret == -EFAULT)
                        goto pi_faulted;
+               /*
+                * A unconditional UNLOCK_PI op raced against a waiter
+                * setting the FUTEX_WAITERS bit. Try again.
+                */
+               if (ret == -EAGAIN) {
+                       spin_unlock(&hb->lock);
+                       put_futex_key(&key);
+                       goto retry;
+               }
                /*
                 * wake_futex_pi has detected invalid state. Tell user
                 * space.
index c37f34b00a115e5fd1d69a5268f031fc7cde6862..14777af8e0977aed249e724601a789c7e21ffd0f 100644 (file)
@@ -94,6 +94,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
                data = irq_get_irq_data(virq + i);
                cpumask_copy(data->common->affinity, dest);
                data->common->ipi_offset = offset;
+               irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
        }
        return virq;
 
index eb2a2c9bc3fc15d181c9e5981648974dc7aec65c..d734b750200180afc91d9f5d2015f1cb27aa6032 100644 (file)
@@ -136,10 +136,12 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
        }
 
        if (counter == qstat_pv_hash_hops) {
-               u64 frac;
+               u64 frac = 0;
 
-               frac = 100ULL * do_div(stat, kicks);
-               frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+               if (kicks) {
+                       frac = 100ULL * do_div(stat, kicks);
+                       frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
+               }
 
                /*
                 * Return a X.XX decimal number
index 253bc77eda3bd1106021a7d93ba6854964f0a935..7dbc80d01eb00ab69fc06a0d613167651a9b15f2 100644 (file)
@@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
                e->flags |= MDB_FLAGS_OFFLOAD;
 }
 
+static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
+{
+       memset(ip, 0, sizeof(struct br_ip));
+       ip->vid = entry->vid;
+       ip->proto = entry->addr.proto;
+       if (ip->proto == htons(ETH_P_IP))
+               ip->u.ip4 = entry->addr.u.ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+       else
+               ip->u.ip6 = entry->addr.u.ip6;
+#endif
+}
+
 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                            struct net_device *dev)
 {
@@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
                + nla_total_size(sizeof(struct br_mdb_entry));
 }
 
-static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
-                           int type, struct net_bridge_port_group *pg)
+struct br_mdb_complete_info {
+       struct net_bridge_port *port;
+       struct br_ip ip;
+};
+
+static void br_mdb_complete(struct net_device *dev, int err, void *priv)
 {
+       struct br_mdb_complete_info *data = priv;
+       struct net_bridge_port_group __rcu **pp;
+       struct net_bridge_port_group *p;
+       struct net_bridge_mdb_htable *mdb;
+       struct net_bridge_mdb_entry *mp;
+       struct net_bridge_port *port = data->port;
+       struct net_bridge *br = port->br;
+
+       if (err)
+               goto err;
+
+       spin_lock_bh(&br->multicast_lock);
+       mdb = mlock_dereference(br->mdb, br);
+       mp = br_mdb_ip_get(mdb, &data->ip);
+       if (!mp)
+               goto out;
+       for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+            pp = &p->next) {
+               if (p->port != port)
+                       continue;
+               p->flags |= MDB_PG_FLAGS_OFFLOAD;
+       }
+out:
+       spin_unlock_bh(&br->multicast_lock);
+err:
+       kfree(priv);
+}
+
+static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
+                           struct br_mdb_entry *entry, int type)
+{
+       struct br_mdb_complete_info *complete_info;
        struct switchdev_obj_port_mdb mdb = {
                .obj = {
                        .id = SWITCHDEV_OBJ_ID_PORT_MDB,
@@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
 
        mdb.obj.orig_dev = port_dev;
        if (port_dev && type == RTM_NEWMDB) {
-               err = switchdev_port_obj_add(port_dev, &mdb.obj);
-               if (!err && pg)
-                       pg->flags |= MDB_PG_FLAGS_OFFLOAD;
+               complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
+               if (complete_info) {
+                       complete_info->port = p;
+                       __mdb_entry_to_br_ip(entry, &complete_info->ip);
+                       mdb.obj.complete_priv = complete_info;
+                       mdb.obj.complete = br_mdb_complete;
+                       switchdev_port_obj_add(port_dev, &mdb.obj);
+               }
        } else if (port_dev && type == RTM_DELMDB) {
                switchdev_port_obj_del(port_dev, &mdb.obj);
        }
@@ -291,21 +345,21 @@ errout:
        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 }
 
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
-                  int type)
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+                  struct br_ip *group, int type, u8 flags)
 {
        struct br_mdb_entry entry;
 
        memset(&entry, 0, sizeof(entry));
-       entry.ifindex = pg->port->dev->ifindex;
-       entry.addr.proto = pg->addr.proto;
-       entry.addr.u.ip4 = pg->addr.u.ip4;
+       entry.ifindex = port->dev->ifindex;
+       entry.addr.proto = group->proto;
+       entry.addr.u.ip4 = group->u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
-       entry.addr.u.ip6 = pg->addr.u.ip6;
+       entry.addr.u.ip6 = group->u.ip6;
 #endif
-       entry.vid = pg->addr.vid;
-       __mdb_entry_fill_flags(&entry, pg->flags);
-       __br_mdb_notify(dev, &entry, type, pg);
+       entry.vid = group->vid;
+       __mdb_entry_fill_flags(&entry, flags);
+       __br_mdb_notify(dev, port, &entry, type);
 }
 
 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
 }
 
 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
-                           struct br_ip *group, unsigned char state,
-                           struct net_bridge_port_group **pg)
+                           struct br_ip *group, unsigned char state)
 {
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
@@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        if (unlikely(!p))
                return -ENOMEM;
        rcu_assign_pointer(*pp, p);
-       *pg = p;
        if (state == MDB_TEMPORARY)
                mod_timer(&p->timer, now + br->multicast_membership_interval);
 
@@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 }
 
 static int __br_mdb_add(struct net *net, struct net_bridge *br,
-                       struct br_mdb_entry *entry,
-                       struct net_bridge_port_group **pg)
+                       struct br_mdb_entry *entry)
 {
        struct br_ip ip;
        struct net_device *dev;
@@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
                return -EINVAL;
 
-       memset(&ip, 0, sizeof(ip));
-       ip.vid = entry->vid;
-       ip.proto = entry->addr.proto;
-       if (ip.proto == htons(ETH_P_IP))
-               ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
-       else
-               ip.u.ip6 = entry->addr.u.ip6;
-#endif
+       __mdb_entry_to_br_ip(entry, &ip);
 
        spin_lock_bh(&br->multicast_lock);
-       ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
+       ret = br_mdb_add_group(br, p, &ip, entry->state);
        spin_unlock_bh(&br->multicast_lock);
        return ret;
 }
@@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
-       struct net_bridge_port_group *pg;
        struct net_bridge_vlan_group *vg;
        struct net_device *dev, *pdev;
        struct br_mdb_entry *entry;
@@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_vlan_enabled(br) && vg && entry->vid == 0) {
                list_for_each_entry(v, &vg->vlan_list, vlist) {
                        entry->vid = v->vid;
-                       err = __br_mdb_add(net, br, entry, &pg);
+                       err = __br_mdb_add(net, br, entry);
                        if (err)
                                break;
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+                       __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
                }
        } else {
-               err = __br_mdb_add(net, br, entry, &pg);
+               err = __br_mdb_add(net, br, entry);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+                       __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
        }
 
        return err;
@@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
        if (!netif_running(br->dev) || br->multicast_disabled)
                return -EINVAL;
 
-       memset(&ip, 0, sizeof(ip));
-       ip.vid = entry->vid;
-       ip.proto = entry->addr.proto;
-       if (ip.proto == htons(ETH_P_IP))
-               ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
-       else
-               ip.u.ip6 = entry->addr.u.ip6;
-#endif
+       __mdb_entry_to_br_ip(entry, &ip);
 
        spin_lock_bh(&br->multicast_lock);
        mdb = mlock_dereference(br->mdb, br);
@@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
                        entry->vid = v->vid;
                        err = __br_mdb_del(br, entry);
                        if (!err)
-                               __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+                               __br_mdb_notify(dev, p, entry, RTM_DELMDB);
                }
        } else {
                err = __br_mdb_del(br, entry);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+                       __br_mdb_notify(dev, p, entry, RTM_DELMDB);
        }
 
        return err;
index a4c15df2b7920301f7d12918dc21a1d6f2474352..191ea66e4d929c8b1237f0d13e783e8ea415ed93 100644 (file)
@@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
-               br_mdb_notify(br->dev, p, RTM_DELMDB);
+               br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+                             p->flags);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
                if (!mp->ports && !mp->mglist &&
@@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
-       br_mdb_notify(br->dev, p, RTM_NEWMDB);
+       br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
 
 found:
        mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -1461,7 +1462,8 @@ br_multicast_leave_group(struct net_bridge *br,
                        hlist_del_init(&p->mglist);
                        del_timer(&p->timer);
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
-                       br_mdb_notify(br->dev, p, RTM_DELMDB);
+                       br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+                                     p->flags);
 
                        if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
index 1b5d145dfcbf28beb5d7dfd3a28fcdaf919e26aa..d9da857182efb5ee390241c90c6a58113f378221 100644 (file)
@@ -560,8 +560,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
                            unsigned char flags);
 void br_mdb_init(void);
 void br_mdb_uninit(void);
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
-                  int type);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+                  struct br_ip *group, int type, u8 flags);
 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
                   int type);
 
index 8570bc7744c25cc92e2301f8686cd21a1f26baab..5a61f35412a0595845da4feb2cbd0bc09a1cdbeb 100644 (file)
@@ -370,7 +370,11 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
            left - sizeof(struct ebt_entry_match) < m->match_size)
                return -EINVAL;
 
-       match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+               request_module("ebt_%s", m->u.name);
+               match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
+       }
        if (IS_ERR(match))
                return PTR_ERR(match);
        m->u.match = match;
index d04c2d1c8c87d79e89fa9f2a4de97ea9262a7787..e561f9f07d6daa3a13de919334699cc9b82773a7 100644 (file)
@@ -4502,13 +4502,16 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
                __skb_push(skb, offset);
                err = __vlan_insert_tag(skb, skb->vlan_proto,
                                        skb_vlan_tag_get(skb));
-               if (err)
+               if (err) {
+                       __skb_pull(skb, offset);
                        return err;
+               }
+
                skb->protocol = skb->vlan_proto;
                skb->mac_len += VLAN_HLEN;
-               __skb_pull(skb, offset);
 
                skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
+               __skb_pull(skb, offset);
        }
        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
        return 0;
index 607a14f20d88011e6de8540b21a69e6527d49df0..b1dc096d22f8c83e771b1df68d7815661ac51bae 100644 (file)
@@ -1034,10 +1034,13 @@ source_ok:
        if (!fld.daddr) {
                fld.daddr = fld.saddr;
 
-               err = -EADDRNOTAVAIL;
                if (dev_out)
                        dev_put(dev_out);
+               err = -EINVAL;
                dev_out = init_net.loopback_dev;
+               if (!dev_out->dn_ptr)
+                       goto out;
+               err = -EADDRNOTAVAIL;
                dev_hold(dev_out);
                if (!fld.daddr) {
                        fld.daddr =
@@ -1110,6 +1113,8 @@ source_ok:
                if (dev_out == NULL)
                        goto out;
                dn_db = rcu_dereference_raw(dev_out->dn_ptr);
+               if (!dn_db)
+                       goto e_inval;
                /* Possible improvement - check all devices for local addr */
                if (dn_dev_islocal(dev_out, fld.daddr)) {
                        dev_put(dev_out);
@@ -1151,6 +1156,8 @@ select_source:
                        dev_put(dev_out);
                dev_out = init_net.loopback_dev;
                dev_hold(dev_out);
+               if (!dev_out->dn_ptr)
+                       goto e_inval;
                fld.flowidn_oif = dev_out->ifindex;
                if (res.fi)
                        dn_fib_info_put(res.fi);
index 8a9246deccfeb0c7ed6a4272c60a907750e907e8..63566ec54794db8213c2c935d75c2ba32dd71d61 100644 (file)
@@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
        if (ifa->ifa_flags & IFA_F_SECONDARY) {
                prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
                if (!prim) {
-                       pr_warn("%s: bug: prim == NULL\n", __func__);
+                       /* if the device has been deleted, we don't perform
+                        * address promotion
+                        */
+                       if (!in_dev->dead)
+                               pr_warn("%s: bug: prim == NULL\n", __func__);
                        return;
                }
                if (iprim && iprim != prim) {
index dd8c80dc32a2216d3705b75cc761bdb156806dc9..8f8713b4388fbfa9a0d36298603995b02718d21d 100644 (file)
@@ -81,6 +81,12 @@ static int __init arptable_filter_init(void)
                return ret;
        }
 
+       ret = arptable_filter_table_init(&init_net);
+       if (ret) {
+               unregister_pernet_subsys(&arptable_filter_net_ops);
+               kfree(arpfilter_ops);
+       }
+
        return ret;
 }
 
index 02c62299d717b9f6c38a5227e3b3ae376e0015b6..60398a9370e7ec45110fc59bc573d83e35df7351 100644 (file)
@@ -1438,9 +1438,9 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
 #endif
 }
 
-static struct rtable *rt_dst_alloc(struct net_device *dev,
-                                  unsigned int flags, u16 type,
-                                  bool nopolicy, bool noxfrm, bool will_cache)
+struct rtable *rt_dst_alloc(struct net_device *dev,
+                           unsigned int flags, u16 type,
+                           bool nopolicy, bool noxfrm, bool will_cache)
 {
        struct rtable *rt;
 
@@ -1468,6 +1468,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
 
        return rt;
 }
+EXPORT_SYMBOL(rt_dst_alloc);
 
 /* called in rcu_read_lock() section */
 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2045,6 +2046,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                 */
                if (fi && res->prefixlen < 4)
                        fi = NULL;
+       } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
+                  (orig_oif != dev_out->ifindex)) {
+               /* For local routes that require a particular output interface
+                * we do not want to cache the result.  Caching the result
+                * causes incorrect behaviour when there are multiple source
+                * addresses on the interface, the end result being that if the
+                * intended recipient is waiting on that interface for the
+                * packet he won't receive it because it will be delivered on
+                * the loopback interface and the IP_PKTINFO ipi_ifindex will
+                * be set to the loopback interface as well.
+                */
+               fi = NULL;
        }
 
        fnhe = NULL;
index e6e65f79ade82132e1a2e5578ba01f43aed15140..c124c3c12f7c5c848d11e2a59d9e17ba93597411 100644 (file)
@@ -1309,6 +1309,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
        if (skb == tcp_highest_sack(sk))
                tcp_advance_highest_sack(sk, skb);
 
+       tcp_skb_collapse_tstamp(prev, skb);
        tcp_unlink_write_queue(skb, sk);
        sk_wmem_free_skb(sk, skb);
 
@@ -3098,7 +3099,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
 
        shinfo = skb_shinfo(skb);
        if ((shinfo->tx_flags & SKBTX_ACK_TSTAMP) &&
-           between(shinfo->tskey, prior_snd_una, tcp_sk(sk)->snd_una - 1))
+           !before(shinfo->tskey, prior_snd_una) &&
+           before(shinfo->tskey, tcp_sk(sk)->snd_una))
                __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
 }
 
index 7d2dc015cd19a64505c968df43c544adabb65e0e..441ae9da3a233fd96385853197fd3938959449e3 100644 (file)
@@ -2441,6 +2441,20 @@ u32 __tcp_select_window(struct sock *sk)
        return window;
 }
 
+void tcp_skb_collapse_tstamp(struct sk_buff *skb,
+                            const struct sk_buff *next_skb)
+{
+       const struct skb_shared_info *next_shinfo = skb_shinfo(next_skb);
+       u8 tsflags = next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
+
+       if (unlikely(tsflags)) {
+               struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+               shinfo->tx_flags |= tsflags;
+               shinfo->tskey = next_shinfo->tskey;
+       }
+}
+
 /* Collapses two adjacent SKB's during retransmission. */
 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 {
@@ -2484,6 +2498,8 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
 
        tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
 
+       tcp_skb_collapse_tstamp(skb, next_skb);
+
        sk_wmem_free_skb(sk, next_skb);
 }
 
index 08eed5e16df0d26340bbfabd96ac6dad77895465..a2e7f55a1f6103e7a84d8ca2dd821841fb51f9bc 100644 (file)
@@ -339,8 +339,13 @@ found:
 
                hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
                spin_lock(&hslot2->lock);
-               hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
-                                        &hslot2->head);
+               if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
+                       sk->sk_family == AF_INET6)
+                       hlist_nulls_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                                &hslot2->head);
+               else
+                       hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+                                                &hslot2->head);
                hslot2->count++;
                spin_unlock(&hslot2->lock);
        }
index 27aed1afcf81c0a516bb6768e20b27a6794fb1cc..8ec4b3089e206e10dd60d36b2643911e2e23daba 100644 (file)
@@ -3176,35 +3176,9 @@ static void addrconf_gre_config(struct net_device *dev)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
-/* If the host route is cached on the addr struct make sure it is associated
- * with the proper table. e.g., enslavement can change and if so the cached
- * host route needs to move to the new table.
- */
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
-                                 struct inet6_ifaddr *ifp)
-{
-       if (ifp->rt) {
-               u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
-
-               if (tb_id != ifp->rt->rt6i_table->tb6_id) {
-                       ip6_del_rt(ifp->rt);
-                       ifp->rt = NULL;
-               }
-       }
-}
-#else
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
-                                 struct inet6_ifaddr *ifp)
-{
-}
-#endif
-
 static int fixup_permanent_addr(struct inet6_dev *idev,
                                struct inet6_ifaddr *ifp)
 {
-       l3mdev_check_host_rt(idev, ifp);
-
        if (!ifp->rt) {
                struct rt6_info *rt;
 
@@ -3255,6 +3229,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_notifier_changeupper_info *info;
        struct inet6_dev *idev = __in6_dev_get(dev);
        int run_pending = 0;
        int err;
@@ -3303,6 +3278,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        break;
 
                if (event == NETDEV_UP) {
+                       /* restore routes for permanent addresses */
+                       addrconf_permanent_addr(dev);
+
                        if (!addrconf_qdisc_ok(dev)) {
                                /* device is not ready yet. */
                                pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
@@ -3336,9 +3314,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        run_pending = 1;
                }
 
-               /* restore routes for permanent addresses */
-               addrconf_permanent_addr(dev);
-
                switch (dev->type) {
 #if IS_ENABLED(CONFIG_IPV6_SIT)
                case ARPHRD_SIT:
@@ -3413,6 +3388,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                if (idev)
                        addrconf_type_change(dev, event);
                break;
+
+       case NETDEV_CHANGEUPPER:
+               info = ptr;
+
+               /* flush all routes if dev is linked to or unlinked from
+                * an L3 master device (e.g., VRF)
+                */
+               if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+                       addrconf_ifdown(dev, 0);
        }
 
        return NOTIFY_OK;
@@ -3438,6 +3422,12 @@ static void addrconf_type_change(struct net_device *dev, unsigned long event)
                ipv6_mc_unmap(idev);
 }
 
+static bool addr_is_local(const struct in6_addr *addr)
+{
+       return ipv6_addr_type(addr) &
+               (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
+}
+
 static int addrconf_ifdown(struct net_device *dev, int how)
 {
        struct net *net = dev_net(dev);
@@ -3495,7 +3485,8 @@ restart:
                                 * address is retained on a down event
                                 */
                                if (!keep_addr ||
-                                   !(ifa->flags & IFA_F_PERMANENT)) {
+                                   !(ifa->flags & IFA_F_PERMANENT) ||
+                                   addr_is_local(&ifa->addr)) {
                                        hlist_del_init_rcu(&ifa->addr_lst);
                                        goto restart;
                                }
@@ -3539,17 +3530,23 @@ restart:
 
        INIT_LIST_HEAD(&del_list);
        list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+               struct rt6_info *rt = NULL;
+
                addrconf_del_dad_work(ifa);
 
                write_unlock_bh(&idev->lock);
                spin_lock_bh(&ifa->lock);
 
-               if (keep_addr && (ifa->flags & IFA_F_PERMANENT)) {
+               if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+                   !addr_is_local(&ifa->addr)) {
                        /* set state to skip the notifier below */
                        state = INET6_IFADDR_STATE_DEAD;
                        ifa->state = 0;
                        if (!(ifa->flags & IFA_F_NODAD))
                                ifa->flags |= IFA_F_TENTATIVE;
+
+                       rt = ifa->rt;
+                       ifa->rt = NULL;
                } else {
                        state = ifa->state;
                        ifa->state = INET6_IFADDR_STATE_DEAD;
@@ -3560,6 +3557,9 @@ restart:
 
                spin_unlock_bh(&ifa->lock);
 
+               if (rt)
+                       ip6_del_rt(rt);
+
                if (state != INET6_IFADDR_STATE_DEAD) {
                        __ipv6_ifa_notify(RTM_DELADDR, ifa);
                        inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
@@ -5325,10 +5325,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                        if (rt)
                                ip6_del_rt(rt);
                }
-               dst_hold(&ifp->rt->dst);
-
-               ip6_del_rt(ifp->rt);
-
+               if (ifp->rt) {
+                       dst_hold(&ifp->rt->dst);
+                       ip6_del_rt(ifp->rt);
+               }
                rt_genid_bump_ipv6(net);
                break;
        }
index 428162155280ca2af782ea9fd9fa26e0d1666d89..9dd3882fe6bf27f9db783a5bac69a5ddbe841b50 100644 (file)
@@ -40,18 +40,114 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
        return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
+static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       memset(fl6, 0, sizeof(*fl6));
+       fl6->flowi6_proto = sk->sk_protocol;
+       fl6->daddr = sk->sk_v6_daddr;
+       fl6->saddr = np->saddr;
+       fl6->flowi6_oif = sk->sk_bound_dev_if;
+       fl6->flowi6_mark = sk->sk_mark;
+       fl6->fl6_dport = inet->inet_dport;
+       fl6->fl6_sport = inet->inet_sport;
+       fl6->flowlabel = np->flow_label;
+
+       if (!fl6->flowi6_oif)
+               fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
+       if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr))
+               fl6->flowi6_oif = np->mcast_oif;
+
+       security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+}
+
+int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
+{
+       struct ip6_flowlabel *flowlabel = NULL;
+       struct in6_addr *final_p, final;
+       struct ipv6_txoptions *opt;
+       struct dst_entry *dst;
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flowi6 fl6;
+       int err = 0;
+
+       if (np->sndflow && (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+               flowlabel = fl6_sock_lookup(sk, np->flow_label);
+               if (!flowlabel)
+                       return -EINVAL;
+       }
+       ip6_datagram_flow_key_init(&fl6, sk);
+
+       rcu_read_lock();
+       opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+       final_p = fl6_update_dst(&fl6, opt, &final);
+       rcu_read_unlock();
+
+       dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+       if (IS_ERR(dst)) {
+               err = PTR_ERR(dst);
+               goto out;
+       }
+
+       if (fix_sk_saddr) {
+               if (ipv6_addr_any(&np->saddr))
+                       np->saddr = fl6.saddr;
+
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                       sk->sk_v6_rcv_saddr = fl6.saddr;
+                       inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+                       if (sk->sk_prot->rehash)
+                               sk->sk_prot->rehash(sk);
+               }
+       }
+
+       ip6_dst_store(sk, dst,
+                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+                     &sk->sk_v6_daddr : NULL,
+#ifdef CONFIG_IPV6_SUBTREES
+                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
+                     &np->saddr :
+#endif
+                     NULL);
+
+out:
+       fl6_sock_release(flowlabel);
+       return err;
+}
+
+void ip6_datagram_release_cb(struct sock *sk)
+{
+       struct dst_entry *dst;
+
+       if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+               return;
+
+       rcu_read_lock();
+       dst = __sk_dst_get(sk);
+       if (!dst || !dst->obsolete ||
+           dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
+       ip6_datagram_dst_update(sk, false);
+}
+EXPORT_SYMBOL_GPL(ip6_datagram_release_cb);
+
 static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct sockaddr_in6     *usin = (struct sockaddr_in6 *) uaddr;
        struct inet_sock        *inet = inet_sk(sk);
        struct ipv6_pinfo       *np = inet6_sk(sk);
-       struct in6_addr *daddr, *final_p, final;
-       struct dst_entry        *dst;
-       struct flowi6           fl6;
-       struct ip6_flowlabel    *flowlabel = NULL;
-       struct ipv6_txoptions   *opt;
+       struct in6_addr         *daddr;
        int                     addr_type;
        int                     err;
+       __be32                  fl6_flowlabel = 0;
 
        if (usin->sin6_family == AF_INET) {
                if (__ipv6_only_sock(sk))
@@ -66,15 +162,8 @@ static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int a
        if (usin->sin6_family != AF_INET6)
                return -EAFNOSUPPORT;
 
-       memset(&fl6, 0, sizeof(fl6));
-       if (np->sndflow) {
-               fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
-               if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
-                       flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
-                       if (!flowlabel)
-                               return -EINVAL;
-               }
-       }
+       if (np->sndflow)
+               fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
 
        addr_type = ipv6_addr_type(&usin->sin6_addr);
 
@@ -145,7 +234,7 @@ ipv4_connected:
        }
 
        sk->sk_v6_daddr = *daddr;
-       np->flow_label = fl6.flowlabel;
+       np->flow_label = fl6_flowlabel;
 
        inet->inet_dport = usin->sin6_port;
 
@@ -154,59 +243,13 @@ ipv4_connected:
         *      destination cache for it.
         */
 
-       fl6.flowi6_proto = sk->sk_protocol;
-       fl6.daddr = sk->sk_v6_daddr;
-       fl6.saddr = np->saddr;
-       fl6.flowi6_oif = sk->sk_bound_dev_if;
-       fl6.flowi6_mark = sk->sk_mark;
-       fl6.fl6_dport = inet->inet_dport;
-       fl6.fl6_sport = inet->inet_sport;
-
-       if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
-
-       if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
-               fl6.flowi6_oif = np->mcast_oif;
-
-       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
-       rcu_read_lock();
-       opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
-       final_p = fl6_update_dst(&fl6, opt, &final);
-       rcu_read_unlock();
-
-       dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
-       err = 0;
-       if (IS_ERR(dst)) {
-               err = PTR_ERR(dst);
+       err = ip6_datagram_dst_update(sk, true);
+       if (err)
                goto out;
-       }
-
-       /* source address lookup done in ip6_dst_lookup */
-
-       if (ipv6_addr_any(&np->saddr))
-               np->saddr = fl6.saddr;
-
-       if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
-               sk->sk_v6_rcv_saddr = fl6.saddr;
-               inet->inet_rcv_saddr = LOOPBACK4_IPV6;
-               if (sk->sk_prot->rehash)
-                       sk->sk_prot->rehash(sk);
-       }
-
-       ip6_dst_store(sk, dst,
-                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
-                     &sk->sk_v6_daddr : NULL,
-#ifdef CONFIG_IPV6_SUBTREES
-                     ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
-                     &np->saddr :
-#endif
-                     NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
 out:
-       fl6_sock_release(flowlabel);
        return err;
 }
 
index ed446639219c3ae8832a19a1f944e4dd6ddc6302..d916d6ab9ad29aeaeb4bfc57276549f3ae839f40 100644 (file)
@@ -338,9 +338,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
        return rt;
 }
 
-static struct rt6_info *ip6_dst_alloc(struct net *net,
-                                     struct net_device *dev,
-                                     int flags)
+struct rt6_info *ip6_dst_alloc(struct net *net,
+                              struct net_device *dev,
+                              int flags)
 {
        struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
@@ -364,6 +364,7 @@ static struct rt6_info *ip6_dst_alloc(struct net *net,
 
        return rt;
 }
+EXPORT_SYMBOL(ip6_dst_alloc);
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
@@ -1417,8 +1418,20 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
 
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
+       struct dst_entry *dst;
+
        ip6_update_pmtu(skb, sock_net(sk), mtu,
                        sk->sk_bound_dev_if, sk->sk_mark);
+
+       dst = __sk_dst_get(sk);
+       if (!dst || !dst->obsolete ||
+           dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
+               return;
+
+       bh_lock_sock(sk);
+       if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
+               ip6_datagram_dst_update(sk, false);
+       bh_unlock_sock(sk);
 }
 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
 
index 8125931106be670b13e186c577141bc3d1fb574b..6bc5c664fa46c048709db40d1a2a7cc5d4df63da 100644 (file)
@@ -1539,6 +1539,7 @@ struct proto udpv6_prot = {
        .sendmsg           = udpv6_sendmsg,
        .recvmsg           = udpv6_recvmsg,
        .backlog_rcv       = __udpv6_queue_rcv_skb,
+       .release_cb        = ip6_datagram_release_cb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
        .rehash            = udp_v6_rehash,
index 278f3b9356efdcd37ca3d1aacb62f03e57db7e3c..7cc1d9c22a9fa757d1f752dfcb4c8e928c4c9e11 100644 (file)
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize=*ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
                        length--;
                        continue;
                default:
+                       if (length < 2)
+                               return;
                        opsize = *ptr++;
                        if (opsize < 2) /* "silly options" */
                                return;
index 215fc08c02ab5508374b08ba9dcefc44e8d0875b..330ebd600f254d53e23a174273a504e395c0e7cf 100644 (file)
@@ -688,7 +688,7 @@ static int netlink_release(struct socket *sock)
 
        skb_queue_purge(&sk->sk_write_queue);
 
-       if (nlk->portid) {
+       if (nlk->portid && nlk->bound) {
                struct netlink_notify n = {
                                                .net = sock_net(sk),
                                                .protocol = sk->sk_protocol,
index e9dd47b2a85b9e7b65795335b5722e89ac5c3ed8..879185fe183fd0ffa2bf037725faf0a8eb5f166a 100644 (file)
@@ -461,7 +461,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
                mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
 
                if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
-                       set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+                       set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
                                      true);
                        memcpy(&flow_key->ipv6.addr.src, masked,
                               sizeof(flow_key->ipv6.addr.src));
@@ -483,7 +483,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
                                                             NULL, &flags)
                                               != NEXTHDR_ROUTING);
 
-                       set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+                       set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
                                      recalc_csum);
                        memcpy(&flow_key->ipv6.addr.dst, masked,
                               sizeof(flow_key->ipv6.addr.dst));
index 1b9d286756be7ccf3a9b6f9eaad6fe07c385f8fb..b5fea1101faaa52162d438a8cb5e91d3a83ec8bd 100644 (file)
@@ -367,6 +367,7 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
        } else if (key->eth.type == htons(ETH_P_IPV6)) {
                enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
 
+               skb_orphan(skb);
                memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
                err = nf_ct_frag6_gather(net, skb, user);
                if (err)
index f12c17f355d932d9b6a396654a66fceb5aaf0c73..18d0becbc46d0dd069dde0bcce47c8b4ee2186f9 100644 (file)
@@ -3521,6 +3521,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
        i->ifindex = mreq->mr_ifindex;
        i->alen = mreq->mr_alen;
        memcpy(i->addr, mreq->mr_address, i->alen);
+       memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
        i->count = 1;
        i->next = po->mclist;
        po->mclist = i;
index e6144b8246fd27fe49bffd228b44a44c3e7cbd81..6641bcf7c18505f1d653a32c9c5dc0455926dfd8 100644 (file)
@@ -299,7 +299,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-       __set_bit_le(off, (void *)map->m_page_addrs[i]);
+       set_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
        i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
        off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
-       __clear_bit_le(off, (void *)map->m_page_addrs[i]);
+       clear_bit_le(off, (void *)map->m_page_addrs[i]);
 }
 
 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
index 8764970f0c24179b8470a27c1ab0d45567a0d90f..310cabce23111cfaa45af97adef28f4d41d1bbda 100644 (file)
@@ -194,7 +194,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
                dp->dp_protocol_major = RDS_PROTOCOL_MAJOR(protocol_version);
                dp->dp_protocol_minor = RDS_PROTOCOL_MINOR(protocol_version);
                dp->dp_protocol_minor_mask = cpu_to_be16(RDS_IB_SUPPORTED_PROTOCOLS);
-               dp->dp_ack_seq = rds_ib_piggyb_ack(ic);
+               dp->dp_ack_seq = cpu_to_be64(rds_ib_piggyb_ack(ic));
 
                /* Advertise flow control */
                if (ic->i_flowctl) {
index f18c3502420730e87254a6b01f0a6c1e4fb96941..80742edea96fe8a237573c69d3a6349342e6db00 100644 (file)
@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        if (validate)
                skb = validate_xmit_skb_list(skb, dev);
 
-       if (skb) {
+       if (likely(skb)) {
                HARD_TX_LOCK(dev, txq, smp_processor_id());
                if (!netif_xmit_frozen_or_stopped(txq))
                        skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 
                HARD_TX_UNLOCK(dev, txq);
+       } else {
+               spin_lock(root_lock);
+               return qdisc_qlen(q);
        }
        spin_lock(root_lock);
 
index 8d3d3625130ee0fd294998554a9290d57eae56e7..084718f9b3dad09e21e41e34b989e25627058c98 100644 (file)
@@ -866,8 +866,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                                 * sender MUST assure that at least one T3-rtx
                                 * timer is running.
                                 */
-                               if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
-                                       sctp_transport_reset_timers(transport);
+                               if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+                                       sctp_transport_reset_t3_rtx(transport);
+                                       transport->last_time_sent = jiffies;
+                               }
                        }
                        break;
 
@@ -924,8 +926,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        error = sctp_outq_flush_rtx(q, packet,
                                                    rtx_timeout, &start_timer);
 
-                       if (start_timer)
-                               sctp_transport_reset_timers(transport);
+                       if (start_timer) {
+                               sctp_transport_reset_t3_rtx(transport);
+                               transport->last_time_sent = jiffies;
+                       }
 
                        /* This can happen on COOKIE-ECHO resend.  Only
                         * one chunk can get bundled with a COOKIE-ECHO.
@@ -1062,7 +1066,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        list_add_tail(&chunk->transmitted_list,
                                      &transport->transmitted);
 
-                       sctp_transport_reset_timers(transport);
+                       sctp_transport_reset_t3_rtx(transport);
+                       transport->last_time_sent = jiffies;
 
                        /* Only let one DATA chunk get bundled with a
                         * COOKIE-ECHO chunk.
index 7f0bf798205ba3c5311bf57ace66aaaa57cb0367..56f364d8f93270f31867333585fb28317f9d87ad 100644 (file)
@@ -3080,8 +3080,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
                        return SCTP_ERROR_RSRC_LOW;
 
                /* Start the heartbeat timer. */
-               if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
-                       sctp_transport_hold(peer);
+               sctp_transport_reset_hb_timer(peer);
                asoc->new_transport = peer;
                break;
        case SCTP_PARAM_DEL_IP:
index 7fe56d0acabf66cfd8fe29dfdb45f7620b470ac7..41b081a64752da3e4de5dafe6d3afac84bf4923d 100644 (file)
@@ -69,8 +69,6 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
                             sctp_cmd_seq_t *commands,
                             gfp_t gfp);
 
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-                                    struct sctp_transport *t);
 /********************************************************************
  * Helper functions
  ********************************************************************/
@@ -367,6 +365,7 @@ void sctp_generate_heartbeat_event(unsigned long data)
        struct sctp_association *asoc = transport->asoc;
        struct sock *sk = asoc->base.sk;
        struct net *net = sock_net(sk);
+       u32 elapsed, timeout;
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
@@ -378,6 +377,16 @@ void sctp_generate_heartbeat_event(unsigned long data)
                goto out_unlock;
        }
 
+       /* Check if we should still send the heartbeat or reschedule */
+       elapsed = jiffies - transport->last_time_sent;
+       timeout = sctp_transport_timeout(transport);
+       if (elapsed < timeout) {
+               elapsed = timeout - elapsed;
+               if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
+                       sctp_transport_hold(transport);
+               goto out_unlock;
+       }
+
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
@@ -507,7 +516,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
                                             0);
 
                /* Update the hb timer to resend a heartbeat every rto */
-               sctp_cmd_hb_timer_update(commands, transport);
+               sctp_transport_reset_hb_timer(transport);
        }
 
        if (transport->state != SCTP_INACTIVE &&
@@ -634,11 +643,8 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
         * hold a reference on the transport to make sure none of
         * the needed data structures go away.
         */
-       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
-
-               if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-                       sctp_transport_hold(t);
-       }
+       list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+               sctp_transport_reset_hb_timer(t);
 }
 
 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
@@ -669,15 +675,6 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
 }
 
 
-/* Helper function to update the heartbeat timer. */
-static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
-                                    struct sctp_transport *t)
-{
-       /* Update the heartbeat timer.  */
-       if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-               sctp_transport_hold(t);
-}
-
 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
                                  struct sctp_association *asoc,
@@ -742,8 +739,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
        sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
 
        /* Update the heartbeat timer.  */
-       if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
-               sctp_transport_hold(t);
+       sctp_transport_reset_hb_timer(t);
 
        if (was_unconfirmed && asoc->peer.transport_count == 1)
                sctp_transport_immediate_rtx(t);
@@ -1614,7 +1610,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
 
                case SCTP_CMD_HB_TIMER_UPDATE:
                        t = cmd->obj.transport;
-                       sctp_cmd_hb_timer_update(commands, t);
+                       sctp_transport_reset_hb_timer(t);
                        break;
 
                case SCTP_CMD_HB_TIMERS_STOP:
index 9b6b48c7524e4b441a151b80f0babec81f539d49..81b86678be4d6fccc527d3c3e509c12576b2194c 100644 (file)
@@ -183,7 +183,7 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
 /* Start T3_rtx timer if it is not already running and update the heartbeat
  * timer.  This routine is called every time a DATA chunk is sent.
  */
-void sctp_transport_reset_timers(struct sctp_transport *transport)
+void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
 {
        /* RFC 2960 6.3.2 Retransmission Timer Rules
         *
@@ -197,11 +197,18 @@ void sctp_transport_reset_timers(struct sctp_transport *transport)
                if (!mod_timer(&transport->T3_rtx_timer,
                               jiffies + transport->rto))
                        sctp_transport_hold(transport);
+}
+
+void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
+{
+       unsigned long expires;
 
        /* When a data chunk is sent, reset the heartbeat interval.  */
-       if (!mod_timer(&transport->hb_timer,
-                      sctp_transport_timeout(transport)))
-           sctp_transport_hold(transport);
+       expires = jiffies + sctp_transport_timeout(transport);
+       if (time_before(transport->hb_timer.expires, expires) &&
+           !mod_timer(&transport->hb_timer,
+                      expires + prandom_u32_max(transport->rto)))
+               sctp_transport_hold(transport);
 }
 
 /* This transport has been assigned to an association.
@@ -595,13 +602,13 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
 {
        /* RTO + timer slack +/- 50% of RTO */
-       unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+       unsigned long timeout = trans->rto >> 1;
 
        if (trans->state != SCTP_UNCONFIRMED &&
            trans->state != SCTP_PF)
                timeout += trans->hbinterval;
 
-       return timeout + jiffies;
+       return timeout;
 }
 
 /* Reset transport variables to their initial values */
index 2b9b98f1c2ff2a9092e0d84af5731ce5a458fd35..b7e01d88bdc5f74c85813b3fb9fa1bbf358abaaf 100644 (file)
@@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev,
        if (err && err != -EOPNOTSUPP)
                netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
                           err, attr->id);
+       if (attr->complete)
+               attr->complete(dev, err, attr->complete_priv);
 }
 
 static int switchdev_port_attr_set_defer(struct net_device *dev,
@@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
        if (err && err != -EOPNOTSUPP)
                netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
                           err, obj->id);
+       if (obj->complete)
+               obj->complete(dev, err, obj->complete_priv);
 }
 
 static int switchdev_port_obj_add_defer(struct net_device *dev,
@@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev,
        if (err && err != -EOPNOTSUPP)
                netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
                           err, obj->id);
+       if (obj->complete)
+               obj->complete(dev, err, obj->complete_priv);
 }
 
 static int switchdev_port_obj_del_defer(struct net_device *dev,
index 03a842870c52d22ca5c8e72ce063f78ba8391521..e2bdb07a49a2d6d95e55a051b048082934db9daf 100644 (file)
@@ -69,6 +69,7 @@ static int __net_init tipc_init_net(struct net *net)
        if (err)
                goto out_nametbl;
 
+       INIT_LIST_HEAD(&tn->dist_queue);
        err = tipc_topsrv_start(net);
        if (err)
                goto out_subscr;
index 5504d63503df406f7dd1bb1245b85132fcd193ed..eff58dc53aa12b5d3644bd364b08f60f6d6cd7be 100644 (file)
@@ -103,6 +103,9 @@ struct tipc_net {
        spinlock_t nametbl_lock;
        struct name_table *nametbl;
 
+       /* Name dist queue */
+       struct list_head dist_queue;
+
        /* Topology subscription server */
        struct tipc_server *topsrv;
        atomic_t subscription_count;
index ebe9d0ff6e9e9220621676e3384df3a28477d7a9..6b626a64b5179e9b7d8fc909be203bdf6facfbdd 100644 (file)
 
 int sysctl_tipc_named_timeout __read_mostly = 2000;
 
-/**
- * struct tipc_dist_queue - queue holding deferred name table updates
- */
-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
-
 struct distr_queue_item {
        struct distr_item i;
        u32 dtype;
@@ -229,12 +224,31 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
        kfree_rcu(p, rcu);
 }
 
+/**
+ * tipc_dist_queue_purge - remove deferred updates from a node that went down
+ */
+static void tipc_dist_queue_purge(struct net *net, u32 addr)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct distr_queue_item *e, *tmp;
+
+       spin_lock_bh(&tn->nametbl_lock);
+       list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
+               if (e->node != addr)
+                       continue;
+               list_del(&e->next);
+               kfree(e);
+       }
+       spin_unlock_bh(&tn->nametbl_lock);
+}
+
 void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
 {
        struct publication *publ, *tmp;
 
        list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
                tipc_publ_purge(net, publ, addr);
+       tipc_dist_queue_purge(net, addr);
 }
 
 /**
@@ -279,9 +293,11 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
  * tipc_named_add_backlog - add a failed name table update to the backlog
  *
  */
-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
+                                  u32 type, u32 node)
 {
        struct distr_queue_item *e;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        unsigned long now = get_jiffies_64();
 
        e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -291,7 +307,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
        e->node = node;
        e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
        memcpy(e, i, sizeof(*i));
-       list_add_tail(&e->next, &tipc_dist_queue);
+       list_add_tail(&e->next, &tn->dist_queue);
 }
 
 /**
@@ -301,10 +317,11 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
 void tipc_named_process_backlog(struct net *net)
 {
        struct distr_queue_item *e, *tmp;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
        char addr[16];
        unsigned long now = get_jiffies_64();
 
-       list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+       list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
                if (time_after(e->expires, now)) {
                        if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
                                continue;
@@ -344,7 +361,7 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
                node = msg_orignode(msg);
                while (count--) {
                        if (!tipc_update_nametbl(net, item, node, mtype))
-                               tipc_named_add_backlog(item, mtype, node);
+                               tipc_named_add_backlog(net, item, mtype, node);
                        item++;
                }
                kfree_skb(skb);
index 662bdd20a7489198fdbbcb00826f8a2782a62a50..56214736fe888090f569739e2e008c72d8785fb0 100644 (file)
@@ -1735,11 +1735,8 @@ static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
        /* Retrieve the head sk_buff from the socket's receive queue. */
        err = 0;
        skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
-       if (err)
-               return err;
-
        if (!skb)
-               return -EAGAIN;
+               return err;
 
        dg = (struct vmci_datagram *)skb->data;
        if (!dg)
@@ -2154,7 +2151,7 @@ module_exit(vmci_transport_exit);
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.3.0-k");
+MODULE_VERSION("1.0.4.0-k");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("vmware_vsock");
 MODULE_ALIAS_NETPROTO(PF_VSOCK);
index 98c924260b3d312055c598255cff8478bccba1e9..056a7307862b78f5f0b2013b41c2686b5de18064 100644 (file)
@@ -13216,7 +13216,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
        struct wireless_dev *wdev;
        struct cfg80211_beacon_registration *reg, *tmp;
 
-       if (state != NETLINK_URELEASE)
+       if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
                return NOTIFY_DONE;
 
        rcu_read_lock();
index d1a4d697333077126f99f4951c00f647d06aaf5e..03c9872c31cfe4a2a723d3e4bb0f4896b313dc32 100644 (file)
@@ -299,13 +299,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
 int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
                                int parm)
 {
-       int val;
+       unsigned int cmd, val;
 
-       if (codec->regmap)
-               regcache_cache_bypass(codec->regmap, true);
-       val = snd_hdac_read_parm(codec, nid, parm);
-       if (codec->regmap)
-               regcache_cache_bypass(codec->regmap, false);
+       cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
+       if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
+               return -1;
        return val;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
index bdbcd6b75ff61cb0495e36072eaefa2ec2478dc2..87041ddd29cbcbf2c63fca9ed90ffdbc0e5f249d 100644 (file)
@@ -453,14 +453,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
 
 static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
-                       unsigned int *val)
+                       unsigned int *val, bool uncached)
 {
-       if (!codec->regmap)
+       if (uncached || !codec->regmap)
                return hda_reg_read(codec, reg, val);
        else
                return regmap_read(codec->regmap, reg, val);
 }
 
+static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val,
+                                     bool uncached)
+{
+       int err;
+
+       err = reg_raw_read(codec, reg, val, uncached);
+       if (err == -EAGAIN) {
+               err = snd_hdac_power_up_pm(codec);
+               if (!err)
+                       err = reg_raw_read(codec, reg, val, uncached);
+               snd_hdac_power_down_pm(codec);
+       }
+       return err;
+}
+
 /**
  * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
  * @codec: the codec object
@@ -472,19 +488,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
 int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
                             unsigned int *val)
 {
-       int err;
-
-       err = reg_raw_read(codec, reg, val);
-       if (err == -EAGAIN) {
-               err = snd_hdac_power_up_pm(codec);
-               if (!err)
-                       err = reg_raw_read(codec, reg, val);
-               snd_hdac_power_down_pm(codec);
-       }
-       return err;
+       return __snd_hdac_regmap_read_raw(codec, reg, val, false);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
 
+/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
+ * cache but always via hda verbs.
+ */
+int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
+                                     unsigned int reg, unsigned int *val)
+{
+       return __snd_hdac_regmap_read_raw(codec, reg, val, true);
+}
+
 /**
  * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
  * @codec: the codec object
index 7ca5b89f088a6922e6acd09c12befae864994320..dfaf1a93fb8a3b8aba4fee3f3090a48b08ed2734 100644 (file)
@@ -826,7 +826,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                                   bool allow_powerdown)
 {
        hda_nid_t nid, changed = 0;
-       int i, state;
+       int i, state, power;
 
        for (i = 0; i < path->depth; i++) {
                nid = path->path[i];
@@ -838,7 +838,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                        state = AC_PWRST_D0;
                else
                        state = AC_PWRST_D3;
-               if (!snd_hda_check_power_state(codec, nid, state)) {
+               power = snd_hda_codec_read(codec, nid, 0,
+                                          AC_VERB_GET_POWER_STATE, 0);
+               if (power != (state | (state << 4))) {
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_POWER_STATE, state);
                        changed = nid;
index b680be0e937d9cd7b77b8e21365a73fe9a8c925b..637b8a0e2a91d3ab9b73ddc8a4b9fc4a24303e00 100644 (file)
@@ -2232,6 +2232,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+       /* Broxton-T */
+       { PCI_DEVICE(0x8086, 0x1a98),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0a0c),
          .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
index a47e8ae0eb308355416886d4753c9f950bd0cd56..80bbadc83721447754392238118eee98484616b6 100644 (file)
@@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
 {
        struct cs_spec *spec = codec->spec;
        int err;
+       int i;
 
        err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
        if (err < 0)
@@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
        if (err < 0)
                return err;
 
+       /* keep the ADCs powered up when it's dynamically switchable */
+       if (spec->gen.dyn_adc_switch) {
+               unsigned int done = 0;
+               for (i = 0; i < spec->gen.input_mux.num_items; i++) {
+                       int idx = spec->gen.dyn_adc_idx[i];
+                       if (done & (1 << idx))
+                               continue;
+                       snd_hda_gen_fix_pin_power(codec,
+                                                 spec->gen.adc_nids[idx]);
+                       done |= 1 << idx;
+               }
+       }
+
        return 0;
 }
 
index c83c1a8d9742b8f2da261f416561111cb64bd662..40933aa33afe337821570176e93cc9cbb71a8c28 100644 (file)
@@ -1858,6 +1858,8 @@ static void hdmi_set_chmap(struct hdac_device *hdac, int pcm_idx,
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
 
+       if (!per_pin)
+               return;
        mutex_lock(&per_pin->lock);
        per_pin->chmap_set = true;
        memcpy(per_pin->chmap, chmap, ARRAY_SIZE(per_pin->chmap));
index 1402ba954b3daf70650a8ceca9f0ac20eea2084e..810bceee4fd213bbd0b95acd03fa03cb15fe9c93 100644 (file)
@@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+       SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
index c5194f5b150aadb7d0b08cd5831aab2a03f2392d..d7e71f3092998cdaa3894adefdd9468c236bb24e 100644 (file)
@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
        }
 
        pcxhr_msg_thread(mgr);
+       mutex_unlock(&mgr->lock);
        return IRQ_HANDLED;
 }
index 5a95896105bc62fbd707f2feae9178d0c69f08ca..55a60d331f47400065d7c37cf32dd97d11f83ff7 100644 (file)
@@ -299,18 +299,38 @@ they mean, and suggestions for how to fix them.
 Errors in .c files
 ------------------
 
-If you're getting an objtool error in a compiled .c file, chances are
-the file uses an asm() statement which has a "call" instruction.  An
-asm() statement with a call instruction must declare the use of the
-stack pointer in its output operand.  For example, on x86_64:
+1. c_file.o: warning: objtool: funcA() falls through to next function funcB()
 
-   register void *__sp asm("rsp");
-   asm volatile("call func" : "+r" (__sp));
+   This means that funcA() doesn't end with a return instruction or an
+   unconditional jump, and that objtool has determined that the function
+   can fall through into the next function.  There could be different
+   reasons for this:
 
-Otherwise the stack frame may not get created before the call.
+   1) funcA()'s last instruction is a call to a "noreturn" function like
+      panic().  In this case the noreturn function needs to be added to
+      objtool's hard-coded global_noreturns array.  Feel free to bug the
+      objtool maintainer, or you can submit a patch.
 
-Another possible cause for errors in C code is if the Makefile removes
--fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
+   2) funcA() uses the unreachable() annotation in a section of code
+      that is actually reachable.
+
+   3) If funcA() calls an inline function, the object code for funcA()
+      might be corrupt due to a gcc bug.  For more details, see:
+      https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
+
+2. If you're getting any other objtool error in a compiled .c file, it
+   may be because the file uses an asm() statement which has a "call"
+   instruction.  An asm() statement with a call instruction must declare
+   the use of the stack pointer in its output operand.  For example, on
+   x86_64:
+
+     register void *__sp asm("rsp");
+     asm volatile("call func" : "+r" (__sp));
+
+   Otherwise the stack frame may not get created before the call.
+
+3. Another possible cause for errors in C code is if the Makefile removes
+   -fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
 
 Also see the above section for .S file errors for more information what
 the individual error messages mean.
index 7515cb2e879a1440414d51da7239aedf00d8ae0b..e8a1e69eb92c5235735d42847e4f34b2e737a972 100644 (file)
@@ -54,6 +54,7 @@ struct instruction {
        struct symbol *call_dest;
        struct instruction *jump_dest;
        struct list_head alts;
+       struct symbol *func;
 };
 
 struct alternative {
@@ -66,6 +67,7 @@ struct objtool_file {
        struct list_head insn_list;
        DECLARE_HASHTABLE(insn_hash, 16);
        struct section *rodata, *whitelist;
+       bool ignore_unreachables, c_file;
 };
 
 const char *objname;
@@ -228,7 +230,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                        }
                }
 
-               if (insn->type == INSN_JUMP_DYNAMIC)
+               if (insn->type == INSN_JUMP_DYNAMIC && list_empty(&insn->alts))
                        /* sibling call */
                        return 0;
        }
@@ -248,6 +250,7 @@ static int dead_end_function(struct objtool_file *file, struct symbol *func)
 static int decode_instructions(struct objtool_file *file)
 {
        struct section *sec;
+       struct symbol *func;
        unsigned long offset;
        struct instruction *insn;
        int ret;
@@ -281,6 +284,21 @@ static int decode_instructions(struct objtool_file *file)
                        hash_add(file->insn_hash, &insn->hash, insn->offset);
                        list_add_tail(&insn->list, &file->insn_list);
                }
+
+               list_for_each_entry(func, &sec->symbol_list, list) {
+                       if (func->type != STT_FUNC)
+                               continue;
+
+                       if (!find_insn(file, sec, func->offset)) {
+                               WARN("%s(): can't find starting instruction",
+                                    func->name);
+                               return -1;
+                       }
+
+                       func_for_each_insn(file, func, insn)
+                               if (!insn->func)
+                                       insn->func = func;
+               }
        }
 
        return 0;
@@ -664,13 +682,40 @@ static int add_func_switch_tables(struct objtool_file *file,
                                                text_rela->addend);
 
                /*
-                * TODO: Document where this is needed, or get rid of it.
-                *
                 * rare case:   jmpq *[addr](%rip)
+                *
+                * This check is for a rare gcc quirk, currently only seen in
+                * three driver functions in the kernel, only with certain
+                * obscure non-distro configs.
+                *
+                * As part of an optimization, gcc makes a copy of an existing
+                * switch jump table, modifies it, and then hard-codes the jump
+                * (albeit with an indirect jump) to use a single entry in the
+                * table.  The rest of the jump table and some of its jump
+                * targets remain as dead code.
+                *
+                * In such a case we can just crudely ignore all unreachable
+                * instruction warnings for the entire object file.  Ideally we
+                * would just ignore them for the function, but that would
+                * require redesigning the code quite a bit.  And honestly
+                * that's just not worth doing: unreachable instruction
+                * warnings are of questionable value anyway, and this is such
+                * a rare issue.
+                *
+                * kbuild reports:
+                * - https://lkml.kernel.org/r/201603231906.LWcVUpxm%25fengguang.wu@intel.com
+                * - https://lkml.kernel.org/r/201603271114.K9i45biy%25fengguang.wu@intel.com
+                * - https://lkml.kernel.org/r/201603291058.zuJ6ben1%25fengguang.wu@intel.com
+                *
+                * gcc bug:
+                * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70604
                 */
-               if (!rodata_rela)
+               if (!rodata_rela) {
                        rodata_rela = find_rela_by_dest(file->rodata,
                                                        text_rela->addend + 4);
+                       if (rodata_rela)
+                               file->ignore_unreachables = true;
+               }
 
                if (!rodata_rela)
                        continue;
@@ -732,9 +777,6 @@ static int decode_sections(struct objtool_file *file)
 {
        int ret;
 
-       file->whitelist = find_section_by_name(file->elf, "__func_stack_frame_non_standard");
-       file->rodata = find_section_by_name(file->elf, ".rodata");
-
        ret = decode_instructions(file);
        if (ret)
                return ret;
@@ -799,6 +841,7 @@ static int validate_branch(struct objtool_file *file,
        struct alternative *alt;
        struct instruction *insn;
        struct section *sec;
+       struct symbol *func = NULL;
        unsigned char state;
        int ret;
 
@@ -813,6 +856,16 @@ static int validate_branch(struct objtool_file *file,
        }
 
        while (1) {
+               if (file->c_file && insn->func) {
+                       if (func && func != insn->func) {
+                               WARN("%s() falls through to next function %s()",
+                                    func->name, insn->func->name);
+                               return 1;
+                       }
+
+                       func = insn->func;
+               }
+
                if (insn->visited) {
                        if (frame_state(insn->state) != frame_state(state)) {
                                WARN_FUNC("frame pointer state mismatch",
@@ -823,13 +876,6 @@ static int validate_branch(struct objtool_file *file,
                        return 0;
                }
 
-               /*
-                * Catch a rare case where a noreturn function falls through to
-                * the next function.
-                */
-               if (is_fentry_call(insn) && (state & STATE_FENTRY))
-                       return 0;
-
                insn->visited = true;
                insn->state = state;
 
@@ -1035,12 +1081,8 @@ static int validate_functions(struct objtool_file *file)
                                continue;
 
                        insn = find_insn(file, sec, func->offset);
-                       if (!insn) {
-                               WARN("%s(): can't find starting instruction",
-                                    func->name);
-                               warnings++;
+                       if (!insn)
                                continue;
-                       }
 
                        ret = validate_branch(file, insn, 0);
                        warnings += ret;
@@ -1056,13 +1098,14 @@ static int validate_functions(struct objtool_file *file)
                                if (insn->visited)
                                        continue;
 
-                               if (!ignore_unreachable_insn(func, insn) &&
-                                   !warnings) {
-                                       WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
-                                       warnings++;
-                               }
-
                                insn->visited = true;
+
+                               if (file->ignore_unreachables || warnings ||
+                                   ignore_unreachable_insn(func, insn))
+                                       continue;
+
+                               WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
+                               warnings++;
                        }
                }
        }
@@ -1133,6 +1176,10 @@ int cmd_check(int argc, const char **argv)
 
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
+       file.whitelist = find_section_by_name(file.elf, "__func_stack_frame_non_standard");
+       file.rodata = find_section_by_name(file.elf, ".rodata");
+       file.ignore_unreachables = false;
+       file.c_file = find_section_by_name(file.elf, ".comment");
 
        ret = decode_sections(&file);
        if (ret < 0)
index 407f11b97c8dc9dbaaf74409bbd8dab363966e24..6175784409896425154dcfd9c49560e9f2cb7e28 100644 (file)
@@ -1130,7 +1130,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
                pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
                       ret);
 
-       if (pt->synth_opts.callchain)
+       if (pt->synth_opts.last_branch)
                intel_pt_reset_last_branch_rb(ptq);
 
        return ret;
index 69bb3fc38fb2b71ba9f10070d7331bf8d2e158ce..0840684deb7d474578050f9c230fe9196e46a486 100644 (file)
@@ -3,3 +3,4 @@ psock_fanout
 psock_tpacket
 reuseport_bpf
 reuseport_bpf_cpu
+reuseport_dualstack
index c658792d47b495badb3470e5d2f1143bbb94827a..0e5340742620bc332df4ad49737fac072738d338 100644 (file)
@@ -4,7 +4,7 @@ CFLAGS = -Wall -O2 -g
 
 CFLAGS += -I../../../../usr/include/
 
-NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu
+NET_PROGS = socket psock_fanout psock_tpacket reuseport_bpf reuseport_bpf_cpu reuseport_dualstack
 
 all: $(NET_PROGS)
 %: %.c
diff --git a/tools/testing/selftests/net/reuseport_dualstack.c b/tools/testing/selftests/net/reuseport_dualstack.c
new file mode 100644 (file)
index 0000000..90958aa
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * It is possible to use SO_REUSEPORT to open multiple sockets bound to
+ * equivalent local addresses using AF_INET and AF_INET6 at the same time.  If
+ * the AF_INET6 socket has IPV6_V6ONLY set, it's clear which socket should
+ * receive a given incoming packet.  However, when it is not set, incoming v4
+ * packets should prefer the AF_INET socket(s).  This behavior was defined with
+ * the original SO_REUSEPORT implementation, but broke with
+ * e32ea7e74727 ("soreuseport: fast reuseport UDP socket selection")
+ * This test creates these mixed AF_INET/AF_INET6 sockets and asserts the
+ * AF_INET preference for v4 packets.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/in.h>
+#include <linux/unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+static const int PORT = 8888;
+
+static void build_rcv_fd(int family, int proto, int *rcv_fds, int count)
+{
+       struct sockaddr_storage addr;
+       struct sockaddr_in  *addr4;
+       struct sockaddr_in6 *addr6;
+       int opt, i;
+
+       switch (family) {
+       case AF_INET:
+               addr4 = (struct sockaddr_in *)&addr;
+               addr4->sin_family = AF_INET;
+               addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+               addr4->sin_port = htons(PORT);
+               break;
+       case AF_INET6:
+               addr6 = (struct sockaddr_in6 *)&addr;
+               addr6->sin6_family = AF_INET6;
+               addr6->sin6_addr = in6addr_any;
+               addr6->sin6_port = htons(PORT);
+               break;
+       default:
+               error(1, 0, "Unsupported family %d", family);
+       }
+
+       for (i = 0; i < count; ++i) {
+               rcv_fds[i] = socket(family, proto, 0);
+               if (rcv_fds[i] < 0)
+                       error(1, errno, "failed to create receive socket");
+
+               opt = 1;
+               if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
+                              sizeof(opt)))
+                       error(1, errno, "failed to set SO_REUSEPORT");
+
+               if (bind(rcv_fds[i], (struct sockaddr *)&addr, sizeof(addr)))
+                       error(1, errno, "failed to bind receive socket");
+
+               if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
+                       error(1, errno, "failed to listen on receive port");
+       }
+}
+
+static void send_from_v4(int proto)
+{
+       struct sockaddr_in  saddr, daddr;
+       int fd;
+
+       saddr.sin_family = AF_INET;
+       saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+       saddr.sin_port = 0;
+
+       daddr.sin_family = AF_INET;
+       daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+       daddr.sin_port = htons(PORT);
+
+       fd = socket(AF_INET, proto, 0);
+       if (fd < 0)
+               error(1, errno, "failed to create send socket");
+
+       if (bind(fd, (struct sockaddr *)&saddr, sizeof(saddr)))
+               error(1, errno, "failed to bind send socket");
+
+       if (connect(fd, (struct sockaddr *)&daddr, sizeof(daddr)))
+               error(1, errno, "failed to connect send socket");
+
+       if (send(fd, "a", 1, 0) < 0)
+               error(1, errno, "failed to send message");
+
+       close(fd);
+}
+
+static int receive_once(int epfd, int proto)
+{
+       struct epoll_event ev;
+       int i, fd;
+       char buf[8];
+
+       i = epoll_wait(epfd, &ev, 1, -1);
+       if (i < 0)
+               error(1, errno, "epoll_wait failed");
+
+       if (proto == SOCK_STREAM) {
+               fd = accept(ev.data.fd, NULL, NULL);
+               if (fd < 0)
+                       error(1, errno, "failed to accept");
+               i = recv(fd, buf, sizeof(buf), 0);
+               close(fd);
+       } else {
+               i = recv(ev.data.fd, buf, sizeof(buf), 0);
+       }
+
+       if (i < 0)
+               error(1, errno, "failed to recv");
+
+       return ev.data.fd;
+}
+
+static void test(int *rcv_fds, int count, int proto)
+{
+       struct epoll_event ev;
+       int epfd, i, test_fd;
+       uint16_t test_family;
+       socklen_t len;
+
+       epfd = epoll_create(1);
+       if (epfd < 0)
+               error(1, errno, "failed to create epoll");
+
+       ev.events = EPOLLIN;
+       for (i = 0; i < count; ++i) {
+               ev.data.fd = rcv_fds[i];
+               if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
+                       error(1, errno, "failed to register sock epoll");
+       }
+
+       send_from_v4(proto);
+
+       test_fd = receive_once(epfd, proto);
+       if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
+               error(1, errno, "failed to read socket domain");
+       if (test_family != AF_INET)
+               error(1, 0, "expected to receive on v4 socket but got v6 (%d)",
+                     test_family);
+
+       close(epfd);
+}
+
+int main(void)
+{
+       int rcv_fds[32], i;
+
+       fprintf(stderr, "---- UDP IPv4 created before IPv6 ----\n");
+       build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_DGRAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- UDP IPv6 created before IPv4 ----\n");
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_DGRAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       /* NOTE: UDP socket lookups traverse a different code path when there
+        * are > 10 sockets in a group.
+        */
+       fprintf(stderr, "---- UDP IPv4 created before IPv6 (large) ----\n");
+       build_rcv_fd(AF_INET, SOCK_DGRAM, rcv_fds, 16);
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, &(rcv_fds[16]), 16);
+       test(rcv_fds, 32, SOCK_DGRAM);
+       for (i = 0; i < 32; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- UDP IPv6 created before IPv4 (large) ----\n");
+       build_rcv_fd(AF_INET6, SOCK_DGRAM, rcv_fds, 16);
+       build_rcv_fd(AF_INET, SOCK_DGRAM, &(rcv_fds[16]), 16);
+       test(rcv_fds, 32, SOCK_DGRAM);
+       for (i = 0; i < 32; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- TCP IPv4 created before IPv6 ----\n");
+       build_rcv_fd(AF_INET, SOCK_STREAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET6, SOCK_STREAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_STREAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "---- TCP IPv6 created before IPv4 ----\n");
+       build_rcv_fd(AF_INET6, SOCK_STREAM, rcv_fds, 5);
+       build_rcv_fd(AF_INET, SOCK_STREAM, &(rcv_fds[5]), 5);
+       test(rcv_fds, 10, SOCK_STREAM);
+       for (i = 0; i < 10; ++i)
+               close(rcv_fds[i]);
+
+       fprintf(stderr, "SUCCESS\n");
+       return 0;
+}
This page took 0.203193 seconds and 5 git commands to generate.