Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux...
authorDave Airlie <airlied@gmail.com>
Thu, 29 Jan 2015 01:45:31 +0000 (11:45 +1000)
committerDave Airlie <airlied@gmail.com>
Thu, 29 Jan 2015 01:45:31 +0000 (11:45 +1000)
This backmerges drm-fixes into drm-next mainly for the amdkfd
stuff, I'm not 100% confident, but it builds and the amdkfd
folks can fix anything up.

Signed-off-by: Dave Airlie <airlied@redhat.com>
Conflicts:
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h

234 files changed:
Documentation/ABI/testing/sysfs-platform-dell-laptop [deleted file]
Documentation/devicetree/bindings/arm/arm-boards
Documentation/devicetree/bindings/arm/fw-cfg.txt [new file with mode: 0644]
Documentation/devicetree/bindings/graph.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
MAINTAINERS
Makefile
arch/alpha/kernel/pci.c
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/boot/dts/tegra20-seaboard.dts
arch/arm/kernel/entry-header.S
arch/arm/kernel/perf_event.c
arch/arm/kernel/setup.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod.h
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/mach-omap2/prcm-common.h
arch/arm/mach-omap2/prm44xx.c
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-omap2/twl-common.c
arch/arm/mach-shmobile/setup-r8a7778.c
arch/arm/mach-shmobile/setup-r8a7779.c
arch/arm64/Makefile
arch/arm64/boot/dts/Makefile
arch/arm64/boot/dts/arm/juno.dts
arch/arm64/mm/dump.c
arch/avr32/kernel/module.c
arch/cris/arch-v32/drivers/sync_serial.c
arch/cris/kernel/module.c
arch/frv/mb93090-mb00/pci-frv.c
arch/ia64/kernel/module.c
arch/ia64/pci/pci.c
arch/microblaze/pci/pci-common.c
arch/mips/net/bpf_jit.c
arch/mn10300/unit-asb2305/pci-asb2305.c
arch/mn10300/unit-asb2305/pci.c
arch/nios2/kernel/module.c
arch/nios2/kernel/signal.c
arch/parisc/kernel/module.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/xmon/xmon.c
arch/s390/kernel/module.c
arch/s390/net/bpf_jit.S
arch/s390/net/bpf_jit_comp.c
arch/sparc/kernel/pci.c
arch/sparc/net/bpf_jit_comp.c
arch/tile/kernel/module.c
arch/x86/Kconfig
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/mmu_context.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/irq.c
arch/x86/kernel/tls.c
arch/x86/kernel/tsc.c
arch/x86/kvm/emulate.c
arch/x86/mm/init.c
arch/x86/mm/mpx.c
arch/x86/mm/pat.c
arch/x86/pci/i386.c
arch/x86/pci/xen.c
arch/x86/tools/calc_run_size.pl [deleted file]
arch/x86/tools/calc_run_size.sh [new file with mode: 0644]
block/blk-mq-sysfs.c
block/blk-mq.c
drivers/acpi/pci_irq.c
drivers/block/nvme-core.c
drivers/bus/mvebu-mbus.c
drivers/clocksource/bcm_kona_timer.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/sh_tmu.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_kfd.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/i5500_temp.c [new file with mode: 0644]
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-hip04.c
drivers/irqchip/irq-mtk-sysirq.c
drivers/irqchip/irq-omap-intc.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-target.c
drivers/md/dm.c
drivers/media/pci/cx23885/cx23885-cards.c
drivers/media/pci/cx23885/cx23885-core.c
drivers/media/pci/cx23885/cx23885-dvb.c
drivers/media/pci/cx23885/cx23885.h
drivers/media/platform/omap3isp/ispvideo.c
drivers/media/platform/soc_camera/atmel-isi.c
drivers/media/platform/soc_camera/mx2_camera.c
drivers/media/platform/soc_camera/mx3_camera.c
drivers/media/platform/soc_camera/omap1_camera.c
drivers/media/platform/soc_camera/pxa_camera.c
drivers/media/platform/soc_camera/rcar_vin.c
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
drivers/media/usb/dvb-usb/cxusb.c
drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/net/can/c_can/c_can.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/of/overlay.c
drivers/of/platform.c
drivers/of/unittest-data/tests-overlay.dtsi
drivers/of/unittest.c
drivers/parisc/lba_pci.c
drivers/pci/bus.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/platform/x86/dell-laptop.c
drivers/regulator/core.c
drivers/regulator/s2mps11.c
drivers/rtc/rtc-s5m.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_lib.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-sh-msiof.c
drivers/staging/media/tlg2300/Kconfig
drivers/watchdog/cadence_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/meson_wdt.c
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/cifs/ioctl.c
include/dt-bindings/interrupt-controller/arm-gic.h
include/linux/mfd/samsung/s2mps13.h
include/linux/module.h
include/linux/moduleloader.h
include/linux/oom.h
include/linux/pci.h
include/linux/printk.h
include/linux/time.h
include/net/ip.h
include/trace/events/kvm.h
kernel/bpf/core.c
kernel/bpf/syscall.c
kernel/cgroup.c
kernel/debug/kdb/kdb_main.c
kernel/kprobes.c
kernel/module.c
kernel/params.c
kernel/sys.c
kernel/time/ntp.c
kernel/time/time.c
mm/memcontrol.c
mm/page_alloc.c
mm/vmscan.c
net/dsa/slave.c
net/ipv4/ip_forward.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/udp_diag.c
net/ipv6/ip6_fib.c
net/ipv6/route.c
net/ipv6/xfrm6_policy.c
net/llc/sysctl_net_llc.c
net/mac80211/pm.c
net/mac80211/rx.c
net/sched/cls_bpf.c
net/sctp/associola.c
net/socket.c
net/wireless/nl80211.c
net/wireless/util.c
samples/bpf/test_maps.c

diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
deleted file mode 100644 (file)
index 7969443..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-What:          /sys/class/leds/dell::kbd_backlight/als_setting
-Date:          December 2014
-KernelVersion: 3.19
-Contact:       Gabriele Mazzotta <gabriele.mzt@gmail.com>,
-               Pali Rohár <pali.rohar@gmail.com>
-Description:
-               This file allows to control the automatic keyboard
-               illumination mode on some systems that have an ambient
-               light sensor. Write 1 to this file to enable the auto
-               mode, 0 to disable it.
-
-What:          /sys/class/leds/dell::kbd_backlight/start_triggers
-Date:          December 2014
-KernelVersion: 3.19
-Contact:       Gabriele Mazzotta <gabriele.mzt@gmail.com>,
-               Pali Rohár <pali.rohar@gmail.com>
-Description:
-               This file allows to control the input triggers that
-               turn on the keyboard backlight illumination that is
-               disabled because of inactivity.
-               Read the file to see the triggers available. The ones
-               enabled are preceded by '+', those disabled by '-'.
-
-               To enable a trigger, write its name preceded by '+' to
-               this file. To disable a trigger, write its name preceded
-               by '-' instead.
-
-               For example, to enable the keyboard as trigger run:
-                   echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
-               To disable it:
-                   echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
-
-               Note that not all the available triggers can be configured.
-
-What:          /sys/class/leds/dell::kbd_backlight/stop_timeout
-Date:          December 2014
-KernelVersion: 3.19
-Contact:       Gabriele Mazzotta <gabriele.mzt@gmail.com>,
-               Pali Rohár <pali.rohar@gmail.com>
-Description:
-               This file allows to specify the interval after which the
-               keyboard illumination is disabled because of inactivity.
-               The timeouts are expressed in seconds, minutes, hours and
-               days, for which the symbols are 's', 'm', 'h' and 'd'
-               respectively.
-
-               To configure the timeout, write to this file a value along
-               with any the above units. If no unit is specified, the value
-               is assumed to be expressed in seconds.
-
-               For example, to set the timeout to 10 minutes run:
-                   echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
-
-               Note that when this file is read, the returned value might be
-               expressed in a different unit than the one used when the timeout
-               was set.
-
-               Also note that only some timeouts are supported and that
-               some systems might fall back to a specific timeout in case
-               an invalid timeout is written to this file.
index 556c8665fdbf0aa5b89e4a0818b66e30b702f3e9..b78564b2b2019e06a4fea1863191d2cab6303ee2 100644 (file)
@@ -23,7 +23,7 @@ Required nodes:
     range of 0x200 bytes.
 
 - syscon: the root node of the Integrator platforms must have a
-  system controller node pointong to the control registers,
+  system controller node pointing to the control registers,
   with the compatible string
   "arm,integrator-ap-syscon"
   "arm,integrator-cp-syscon"
diff --git a/Documentation/devicetree/bindings/arm/fw-cfg.txt b/Documentation/devicetree/bindings/arm/fw-cfg.txt
new file mode 100644 (file)
index 0000000..953fb64
--- /dev/null
@@ -0,0 +1,72 @@
+* QEMU Firmware Configuration bindings for ARM
+
+QEMU's arm-softmmu and aarch64-softmmu emulation / virtualization targets
+provide the following Firmware Configuration interface on the "virt" machine
+type:
+
+- A write-only, 16-bit wide selector (or control) register,
+- a read-write, 64-bit wide data register.
+
+QEMU exposes the control and data register to ARM guests as memory mapped
+registers; their location is communicated to the guest's UEFI firmware in the
+DTB that QEMU places at the bottom of the guest's DRAM.
+
+The guest writes a selector value (a key) to the selector register, and then
+can read the corresponding data (produced by QEMU) via the data register. If
+the selected entry is writable, the guest can rewrite it through the data
+register.
+
+The selector register takes keys in big endian byte order.
+
+The data register allows accesses with 8, 16, 32 and 64-bit width (only at
+offset 0 of the register). Accesses larger than a byte are interpreted as
+arrays, bundled together only for better performance. The bytes constituting
+such a word, in increasing address order, correspond to the bytes that would
+have been transferred by byte-wide accesses in chronological order.
+
+The interface allows guest firmware to download various parameters and blobs
+that affect how the firmware works and what tables it installs for the guest
+OS. For example, boot order of devices, ACPI tables, SMBIOS tables, kernel and
+initrd images for direct kernel booting, virtual machine UUID, SMP information,
+virtual NUMA topology, and so on.
+
+The authoritative registry of the valid selector values and their meanings is
+the QEMU source code; the structure of the data blobs corresponding to the
+individual key values is also defined in the QEMU source code.
+
+The presence of the registers can be verified by selecting the "signature" blob
+with key 0x0000, and reading four bytes from the data register. The returned
+signature is "QEMU".
+
+The outermost protocol (involving the write / read sequences of the control and
+data registers) is expected to be versioned, and/or described by feature bits.
+The interface revision / feature bitmap can be retrieved with key 0x0001. The
+blob to be read from the data register has size 4, and it is to be interpreted
+as a uint32_t value in little endian byte order. The current value
+(corresponding to the above outer protocol) is zero.
+
+The guest kernel is not expected to use these registers (although it is
+certainly allowed to); the device tree bindings are documented here because
+this is where device tree bindings reside in general.
+
+Required properties:
+
+- compatible: "qemu,fw-cfg-mmio".
+
+- reg: the MMIO region used by the device.
+  * Bytes 0x0 to 0x7 cover the data register.
+  * Bytes 0x8 to 0x9 cover the selector register.
+  * Further registers may be appended to the region in case of future interface
+    revisions / feature bits.
+
+Example:
+
+/ {
+       #size-cells = <0x2>;
+       #address-cells = <0x2>;
+
+       fw-cfg@9020000 {
+               compatible = "qemu,fw-cfg-mmio";
+               reg = <0x0 0x9020000 0x0 0xa>;
+       };
+};
index 1a69c078adf2bbf94f2714f35b43cfec1fc72fc7..fcb1c6a4787b49ba9b76b04126989c9c37a43a9a 100644 (file)
@@ -19,7 +19,7 @@ type of the connections, they just map their existence. Specific properties
 may be described by specialized bindings depending on the type of connection.
 
 To see how this binding applies to video pipelines, for example, see
-Documentation/device-tree/bindings/media/video-interfaces.txt.
+Documentation/devicetree/bindings/media/video-interfaces.txt.
 Here the ports describe data interfaces, and the links between them are
 the connecting data buses. A single port with multiple connections can
 correspond to multiple devices being connected to the same physical bus.
index c0333a97c47acd6f0696db9d995a4bb14eb5fd5f..a78a2a619ed07f6460b3584fabc9a0c891eb022d 100644 (file)
@@ -9,7 +9,6 @@ ad      Avionic Design GmbH
 adapteva       Adapteva, Inc.
 adi    Analog Devices, Inc.
 aeroflexgaisler        Aeroflex Gaisler AB
-ak     Asahi Kasei Corp.
 allwinner      Allwinner Technology Co., Ltd.
 altr   Altera Corp.
 amcc   Applied Micro Circuits Corporation (APM, formally AMCC)
@@ -20,6 +19,7 @@ amstaos       AMS-Taos Inc.
 apm    Applied Micro Circuits Corporation (APM)
 arm    ARM Ltd.
 armadeus       ARMadeus Systems SARL
+asahi-kasei    Asahi Kasei Corp.
 atmel  Atmel Corporation
 auo    AU Optronics Corporation
 avago  Avago Technologies
@@ -130,6 +130,7 @@ pixcir  PIXCIR MICROELECTRONICS Co., Ltd
 powervr        PowerVR (deprecated, use img)
 qca    Qualcomm Atheros, Inc.
 qcom   Qualcomm Technologies, Inc
+qemu   QEMU, a generic and open source machine emulator and virtualizer
 qnap   QNAP Systems, Inc.
 radxa  Radxa
 raidsonic      RaidSonic Technology GmbH
@@ -171,6 +172,7 @@ usi Universal Scientific Industrial Co., Ltd.
 v3     V3 Semiconductor
 variscite      Variscite Ltd.
 via    VIA Technologies, Inc.
+virtio Virtual I/O Device Specification, developed by the OASIS consortium
 voipac Voipac Technologies s.r.o.
 winbond Winbond Electronics corp.
 wlf    Wolfson Microelectronics
index aa97dffe59e1b1e6159d3c042bf1fd1ed0135240..f5c4567b46ba9ae2af3d0516d9c8f9d5a7c37243 100644 (file)
@@ -698,7 +698,7 @@ L:  alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://blackfin.uclinux.org/
 S:     Supported
 F:     sound/soc/blackfin/*
+
 ANALOG DEVICES INC IIO DRIVERS
 M:     Lars-Peter Clausen <lars@metafoo.de>
 M:     Michael Hennerich <Michael.Hennerich@analog.com>
@@ -4752,14 +4752,14 @@ S:      Supported
 F:     drivers/net/ethernet/ibm/ibmveth.*
 
 IBM Power Virtual SCSI Device Drivers
-M:     Nathan Fontenot <nfont@linux.vnet.ibm.com>
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/ibmvscsi/ibmvscsi*
 F:     drivers/scsi/ibmvscsi/viosrp.h
 
 IBM Power Virtual FC Device Drivers
-M:     Brian King <brking@linux.vnet.ibm.com>
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/ibmvscsi/ibmvfc*
@@ -4948,7 +4948,6 @@ K:        \b(ABS|SYN)_MT_
 INTEL C600 SERIES SAS CONTROLLER DRIVER
 M:     Intel SCU Linux support <intel-linux-scu@intel.com>
 M:     Artur Paszkiewicz <artur.paszkiewicz@intel.com>
-M:     Dave Jiang <dave.jiang@intel.com>
 L:     linux-scsi@vger.kernel.org
 T:     git git://git.code.sf.net/p/intel-sas/isci
 S:     Supported
@@ -7026,14 +7025,12 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:     Grant Likely <grant.likely@linaro.org>
 M:     Rob Herring <robh+dt@kernel.org>
 L:     devicetree@vger.kernel.org
-W:     http://fdt.secretlab.ca
-T:     git git://git.secretlab.ca/git/linux-2.6.git
+W:     http://www.devicetree.org/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
 S:     Maintained
 F:     drivers/of/
 F:     include/linux/of*.h
 F:     scripts/dtc/
-K:     of_get_property
-K:     of_match_table
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
@@ -7278,7 +7275,7 @@ S:        Maintained
 F:     drivers/pci/host/*layerscape*
 
 PCI DRIVER FOR IMX6
-M:     Richard Zhu <r65037@freescale.com>
+M:     Richard Zhu <Richard.Zhu@freescale.com>
 M:     Lucas Stach <l.stach@pengutronix.de>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
index fb93350cf6456c89649aff15a99efae74eca5cb8..95a0e827ecd30a40950643be5f2d516aac17b2f8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
index 076c35cd6cde7c2d782a46fe3942b8962369230e..98a1525fa164df0178fd4cb5fc0267129b2844e3 100644 (file)
@@ -285,8 +285,12 @@ pcibios_claim_one_bus(struct pci_bus *b)
                        if (r->parent || !r->start || !r->flags)
                                continue;
                        if (pci_has_flag(PCI_PROBE_ONLY) ||
-                           (r->flags & IORESOURCE_PCI_FIXED))
-                               pci_claim_resource(dev, i);
+                           (r->flags & IORESOURCE_PCI_FIXED)) {
+                               if (pci_claim_resource(dev, i) == 0)
+                                       continue;
+
+                               pci_claim_bridge_resource(dev, i);
+                       }
                }
        }
 
index 22771bc1643afcd7652773f058e6ace90df5d2a9..63f8b007bdc51358d53cdd57b58c8fe1a21f617a 100644 (file)
                                tx-fifo-resize;
                                maximum-speed = "super-speed";
                                dr_mode = "otg";
+                               snps,dis_u3_susphy_quirk;
+                               snps,dis_u2_susphy_quirk;
                        };
                };
 
                                tx-fifo-resize;
                                maximum-speed = "high-speed";
                                dr_mode = "otg";
+                               snps,dis_u3_susphy_quirk;
+                               snps,dis_u2_susphy_quirk;
                        };
                };
 
                                tx-fifo-resize;
                                maximum-speed = "high-speed";
                                dr_mode = "otg";
+                               snps,dis_u3_susphy_quirk;
+                               snps,dis_u2_susphy_quirk;
                        };
                };
 
index d238676a910753a4d7b2ff9e80d4599fe36da057..e4d3aecc4ed2c0fd61b1c68a93f20a24b808bd8c 100644 (file)
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fa0000 0x4000>;
-                               clocks = <&clks 106>, <&clks 36>;
+                               clocks = <&clks 106>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <36>;
                        };
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fa8000 0x4000>;
-                               clocks = <&clks 107>, <&clks 36>;
+                               clocks = <&clks 107>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <41>;
                        };
                        pwm4: pwm@53fc8000 {
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                reg = <0x53fc8000 0x4000>;
-                               clocks = <&clks 108>, <&clks 36>;
+                               clocks = <&clks 108>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <42>;
                        };
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fe0000 0x4000>;
-                               clocks = <&clks 105>, <&clks 36>;
+                               clocks = <&clks 105>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <26>;
                        };
index 8c1febd7e3f2757176d1ba0ab14450133010d177..c108bb451337ee4c5108847192d2991d78a4c68b 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               ethphy1: ethernet-phy@0 {
-                       reg = <0>;
+               ethphy1: ethernet-phy@1 {
+                       reg = <1>;
                };
 
-               ethphy2: ethernet-phy@1 {
-                       reg = <1>;
+               ethphy2: ethernet-phy@2 {
+                       reg = <2>;
                };
        };
 };
index ea282c7c0ca5645394a28e313fbad1deac339882..e2fed27122497b6f330904f43de95739b6cbe6cb 100644 (file)
                clock-frequency = <400000>;
 
                magnetometer@c {
-                       compatible = "ak,ak8975";
+                       compatible = "asahi-kasei,ak8975";
                        reg = <0xc>;
                        interrupt-parent = <&gpio>;
                        interrupts = <TEGRA_GPIO(N, 5) IRQ_TYPE_LEVEL_HIGH>;
index 4176df721bf09bace95bad96d1c194e5b6b7a038..1a0045abead7562be1e27163e0aee3c6afbe9b40 100644 (file)
        .endm
 
        .macro  restore_user_regs, fast = 0, offset = 0
-       ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
-       ldr     lr, [sp, #\offset + S_PC]!      @ get pc
+       mov     r2, sp
+       ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
+       ldr     lr, [r2, #\offset + S_PC]!      @ get pc
        msr     spsr_cxsf, r1                   @ save in spsr_svc
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        @ We must avoid clrex due to Cortex-A15 erratum #830321
-       strex   r1, r2, [sp]                    @ clear the exclusive monitor
+       strex   r1, r2, [r2]                    @ clear the exclusive monitor
 #endif
        .if     \fast
-       ldmdb   sp, {r1 - lr}^                  @ get calling r1 - lr
+       ldmdb   r2, {r1 - lr}^                  @ get calling r1 - lr
        .else
-       ldmdb   sp, {r0 - lr}^                  @ get calling r0 - lr
+       ldmdb   r2, {r0 - lr}^                  @ get calling r0 - lr
        .endif
        mov     r0, r0                          @ ARMv5T and earlier require a nop
                                                @ after ldm {}^
-       add     sp, sp, #S_FRAME_SIZE - S_PC
+       add     sp, sp, #\offset + S_FRAME_SIZE
        movs    pc, lr                          @ return & move spsr_svc into cpsr
        .endm
 
index f7c65adaa428c9eabd2c5080ae097a374c2206f5..557e128e4df08ce711d4bab89952eae0f1d6d7ea 100644 (file)
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
                ret = 1;
        }
 
-       if (left > (s64)armpmu->max_period)
-               left = armpmu->max_period;
+       /*
+        * Limit the maximum period to prevent the counter value
+        * from overtaking the one we are about to program. In
+        * effect we are reducing max_period to account for
+        * interrupt latency (and we are being very conservative).
+        */
+       if (left > (armpmu->max_period >> 1))
+               left = armpmu->max_period >> 1;
 
        local64_set(&hwc->prev_count, (u64)-left);
 
index 715ae19bc7c87302350093b6894251c4519ea957..e55408e965596964ff8c8708dcfec529a559b1bc 100644 (file)
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
 
        /*
         * Ensure that start/size are aligned to a page boundary.
-        * Size is appropriately rounded down, start is rounded up.
+        * Size is rounded down, start is rounded up.
         */
-       size -= start & ~PAGE_MASK;
        aligned_start = PAGE_ALIGN(start);
+       if (aligned_start > start + size)
+               size = 0;
+       else
+               size -= aligned_start - start;
 
 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
        if (aligned_start > ULONG_MAX) {
index 3585cb394e9b952388e4f26bfb5e950bfd0ad9eb..caa21e9b8cd9819de5ff0c0e505a8a937940dfae 100644 (file)
@@ -246,9 +246,14 @@ static int coherency_type(void)
        return type;
 }
 
+/*
+ * As a precaution, we currently completely disable hardware I/O
+ * coherency, until enough testing is done with automatic I/O
+ * synchronization barriers to validate that it is a proper solution.
+ */
 int coherency_available(void)
 {
-       return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+       return false;
 }
 
 int __init coherency_init(void)
index db57741c9c8ae69175f007ddca876c2bd8afe9cc..64e44d6d07c0c89ba5aa4fc3114111b32b261857 100644 (file)
@@ -211,6 +211,7 @@ extern struct device *omap2_get_iva_device(void);
 extern struct device *omap2_get_l3_device(void);
 extern struct device *omap4_get_dsp_device(void);
 
+unsigned int omap4_xlate_irq(unsigned int hwirq);
 void omap_gic_of_init(void);
 
 #ifdef CONFIG_CACHE_L2X0
index b7cb44abe49b35a7a03c3c9f172eca5daeb7b4a7..cc30e49a4cc278d08a44895fbf28797a94b6cfbd 100644 (file)
@@ -256,6 +256,38 @@ static int __init omap4_sar_ram_init(void)
 }
 omap_early_initcall(omap4_sar_ram_init);
 
+static struct of_device_id gic_match[] = {
+       { .compatible = "arm,cortex-a9-gic", },
+       { .compatible = "arm,cortex-a15-gic", },
+       { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int omap4_xlate_irq(unsigned int hwirq)
+{
+       struct of_phandle_args irq_data;
+       unsigned int irq;
+
+       if (!gic_node)
+               gic_node = of_find_matching_node(NULL, gic_match);
+
+       if (WARN_ON(!gic_node))
+               return hwirq;
+
+       irq_data.np = gic_node;
+       irq_data.args_count = 3;
+       irq_data.args[0] = 0;
+       irq_data.args[1] = hwirq - OMAP44XX_IRQ_GIC_START;
+       irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+       irq = irq_create_of_mapping(&irq_data);
+       if (WARN_ON(!irq))
+               irq = hwirq;
+
+       return irq;
+}
+
 void __init omap_gic_of_init(void)
 {
        struct device_node *np;
index cbb908dc5cf0e09bec45ce7fbf7814936883d9fc..9025ffffd2dc1d066fcb54a2cf44f2bf9a73525c 100644 (file)
@@ -3534,9 +3534,15 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
 
        mpu_irqs_cnt = _count_mpu_irqs(oh);
        for (i = 0; i < mpu_irqs_cnt; i++) {
+               unsigned int irq;
+
+               if (oh->xlate_irq)
+                       irq = oh->xlate_irq((oh->mpu_irqs + i)->irq);
+               else
+                       irq = (oh->mpu_irqs + i)->irq;
                (res + r)->name = (oh->mpu_irqs + i)->name;
-               (res + r)->start = (oh->mpu_irqs + i)->irq;
-               (res + r)->end = (oh->mpu_irqs + i)->irq;
+               (res + r)->start = irq;
+               (res + r)->end = irq;
                (res + r)->flags = IORESOURCE_IRQ;
                r++;
        }
index 35ca6efbec31eb533ce039761024a7260371b2a1..5b42fafcaf55102fc5631b4095b920353c4329c8 100644 (file)
@@ -676,6 +676,7 @@ struct omap_hwmod {
        spinlock_t                      _lock;
        struct list_head                node;
        struct omap_hwmod_ocp_if        *_mpu_port;
+       unsigned int                    (*xlate_irq)(unsigned int);
        u16                             flags;
        u8                              mpu_rt_idx;
        u8                              response_lat;
index c314b3c31117e8cbee248db6aec7c0feb64f4fa9..f5e68a7820251360dc1aad459e259ee1c6d217ae 100644 (file)
@@ -479,6 +479,7 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
        .class          = &omap44xx_dma_hwmod_class,
        .clkdm_name     = "l3_dma_clkdm",
        .mpu_irqs       = omap44xx_dma_system_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .main_clk       = "l3_div_ck",
        .prcm = {
                .omap4 = {
@@ -640,6 +641,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
        .class          = &omap44xx_dispc_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_dispc_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_dispc_sdma_reqs,
        .main_clk       = "dss_dss_clk",
        .prcm = {
@@ -693,6 +695,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
        .class          = &omap44xx_dsi_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_dsi1_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_dsi1_sdma_reqs,
        .main_clk       = "dss_dss_clk",
        .prcm = {
@@ -726,6 +729,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
        .class          = &omap44xx_dsi_hwmod_class,
        .clkdm_name     = "l3_dss_clkdm",
        .mpu_irqs       = omap44xx_dss_dsi2_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_dsi2_sdma_reqs,
        .main_clk       = "dss_dss_clk",
        .prcm = {
@@ -784,6 +788,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
         */
        .flags          = HWMOD_SWSUP_SIDLE,
        .mpu_irqs       = omap44xx_dss_hdmi_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .sdma_reqs      = omap44xx_dss_hdmi_sdma_reqs,
        .main_clk       = "dss_48mhz_clk",
        .prcm = {
index 3e9523084b2ace3005adbd18ceae347eaef3e66a..7c3fac035e936884febd606bcb9d0218428fd91c 100644 (file)
@@ -288,6 +288,7 @@ static struct omap_hwmod omap54xx_dma_system_hwmod = {
        .class          = &omap54xx_dma_hwmod_class,
        .clkdm_name     = "dma_clkdm",
        .mpu_irqs       = omap54xx_dma_system_irqs,
+       .xlate_irq      = omap4_xlate_irq,
        .main_clk       = "l3_iclk_div",
        .prcm = {
                .omap4 = {
index a8e4b582c527476972de36917c144570dd3665b4..6163d66102a3561890240487a592964874cb260c 100644 (file)
@@ -498,6 +498,7 @@ struct omap_prcm_irq_setup {
        u8 nr_irqs;
        const struct omap_prcm_irq *irqs;
        int irq;
+       unsigned int (*xlate_irq)(unsigned int);
        void (*read_pending_irqs)(unsigned long *events);
        void (*ocp_barrier)(void);
        void (*save_and_clear_irqen)(u32 *saved_mask);
index cc170fb81ff76dc018ad6eee2c0e9931ab575f08..408c64efb80700868fa4c8b0138a2763a78bc161 100644 (file)
@@ -49,6 +49,7 @@ static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
        .irqs                   = omap4_prcm_irqs,
        .nr_irqs                = ARRAY_SIZE(omap4_prcm_irqs),
        .irq                    = 11 + OMAP44XX_IRQ_GIC_START,
+       .xlate_irq              = omap4_xlate_irq,
        .read_pending_irqs      = &omap44xx_prm_read_pending_irqs,
        .ocp_barrier            = &omap44xx_prm_ocp_barrier,
        .save_and_clear_irqen   = &omap44xx_prm_save_and_clear_irqen,
@@ -751,8 +752,10 @@ static int omap44xx_prm_late_init(void)
                }
 
                /* Once OMAP4 DT is filled as well */
-               if (irq_num >= 0)
+               if (irq_num >= 0) {
                        omap4_prcm_irq_setup.irq = irq_num;
+                       omap4_prcm_irq_setup.xlate_irq = NULL;
+               }
        }
 
        omap44xx_prm_enable_io_wakeup();
index 779940cb6e5651d4d5c486878b5cbbef060af1dc..dea2833ca627c84ca67db08d77c24f834c3a0340 100644 (file)
@@ -187,6 +187,7 @@ int omap_prcm_event_to_irq(const char *name)
  */
 void omap_prcm_irq_cleanup(void)
 {
+       unsigned int irq;
        int i;
 
        if (!prcm_irq_setup) {
@@ -211,7 +212,11 @@ void omap_prcm_irq_cleanup(void)
        kfree(prcm_irq_setup->priority_mask);
        prcm_irq_setup->priority_mask = NULL;
 
-       irq_set_chained_handler(prcm_irq_setup->irq, NULL);
+       if (prcm_irq_setup->xlate_irq)
+               irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq);
+       else
+               irq = prcm_irq_setup->irq;
+       irq_set_chained_handler(irq, NULL);
 
        if (prcm_irq_setup->base_irq > 0)
                irq_free_descs(prcm_irq_setup->base_irq,
@@ -259,6 +264,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
        int offset, i;
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
+       unsigned int irq;
 
        if (!irq_setup)
                return -EINVAL;
@@ -298,7 +304,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
                                1 << (offset & 0x1f);
        }
 
-       irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
+       if (irq_setup->xlate_irq)
+               irq = irq_setup->xlate_irq(irq_setup->irq);
+       else
+               irq = irq_setup->irq;
+       irq_set_chained_handler(irq, omap_prcm_irq_handler);
 
        irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
                0);
index 4457e731f7a4f0029cb3fe4e3811d5396097d2f2..292eca0e78ed07e3f7358c99671f8573b16aaecf 100644 (file)
@@ -66,19 +66,24 @@ void __init omap_pmic_init(int bus, u32 clkrate,
        omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
 }
 
+#ifdef CONFIG_ARCH_OMAP4
 void __init omap4_pmic_init(const char *pmic_type,
                    struct twl4030_platform_data *pmic_data,
                    struct i2c_board_info *devices, int nr_devices)
 {
        /* PMIC part*/
+       unsigned int irq;
+
        omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
        omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
-       omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
+       irq = omap4_xlate_irq(7 + OMAP44XX_IRQ_GIC_START);
+       omap_pmic_init(1, 400, pmic_type, irq, pmic_data);
 
        /* Register additional devices on i2c1 bus if needed */
        if (devices)
                i2c_register_board_info(1, devices, nr_devices);
 }
+#endif
 
 void __init omap_pmic_late_init(void)
 {
index 170bd146ba1796b801f27e0ca74e2cfd79f0c7e8..cef8895a9b8271dcd27549b7a5f6209cc4cb9abb 100644 (file)
@@ -576,11 +576,18 @@ void __init r8a7778_init_irq_extpin(int irlm)
 void __init r8a7778_init_irq_dt(void)
 {
        void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       void __iomem *gic_dist_base = ioremap_nocache(0xfe438000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xfe430000, 0x1000);
+#endif
 
        BUG_ON(!base);
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
        irqchip_init();
-
+#endif
        /* route all interrupts to ARM */
        __raw_writel(0x73ffffff, base + INT2NTSR0);
        __raw_writel(0xffffffff, base + INT2NTSR1);
index 6156d172cf3108d79c44a397931037a53f1c5266..27dceaf9e688c174910004fc80598f4beb130075 100644 (file)
@@ -720,10 +720,17 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
 
 void __init r8a7779_init_irq_dt(void)
 {
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
+#endif
        gic_arch_extn.irq_set_wake = r8a7779_set_wake;
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
        irqchip_init();
-
+#endif
        /* route all interrupts to ARM */
        __raw_writel(0xffffffff, INT2NTSR0);
        __raw_writel(0x3fffffff, INT2NTSR1);
index 1c43cec971b5cd7196b367d1917baa25a10078bd..0666888639202f4beeb0d0086d969841a96f1700 100644 (file)
@@ -85,6 +85,7 @@ vdso_install:
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
+       $(Q)$(MAKE) $(clean)=$(boot)/dts
 
 define archhelp
   echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
index 3b8d427c398599c85c8d12e3f67f20095ec43bac..c62b0f4d9ef65bf4dfa8503d581c159e5fe9b16b 100644 (file)
@@ -3,6 +3,4 @@ dts-dirs += apm
 dts-dirs += arm
 dts-dirs += cavium
 
-always         := $(dtb-y)
 subdir-y       := $(dts-dirs)
-clean-files    := *.dtb
index cb3073e4e7a83e555992ea544abb658607512ab5..d429129ecb3d03fe3a7460ecd3ed9d02950cb193 100644 (file)
@@ -22,7 +22,7 @@
        };
 
        chosen {
-               stdout-path = &soc_uart0;
+               stdout-path = "serial0:115200n8";
        };
 
        psci {
index cf33f33333ccd230720207a071399863bd4eb0de..d54dc9ac4b70874af52e4c94054c850b576c9f0d 100644 (file)
@@ -15,6 +15,7 @@
  */
 #include <linux/debugfs.h>
 #include <linux/fs.h>
+#include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
index 2c9412908024d4ce88d8945cf953b0d24437dcaf..164efa009e5be776a52ae16b024d71c8e288ab5c 100644 (file)
 #include <linux/moduleloader.h>
 #include <linux/vmalloc.h>
 
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
 {
        vfree(mod->arch.syminfo);
        mod->arch.syminfo = NULL;
-
-       vfree(module_region);
 }
 
 static inline int check_rela(Elf32_Rela *rela, struct module *module,
@@ -291,12 +289,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
 
        return ret;
 }
-
-int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
-                   struct module *module)
-{
-       vfree(module->arch.syminfo);
-       module->arch.syminfo = NULL;
-
-       return 0;
-}
index 08a313fc22418c326987dc36c9c9203de639e270..f772068d9e797e0c236e512ac80254063de07802 100644 (file)
@@ -604,7 +604,7 @@ static ssize_t __sync_serial_read(struct file *file,
                                  struct timespec *ts)
 {
        unsigned long flags;
-       int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+       int dev = MINOR(file_inode(file)->i_rdev);
        int avail;
        struct sync_port *port;
        unsigned char *start;
index 51123f985eb5862a83bf7afcfc22e4bce2ecbda7..af04cb6b6dc9a3777930bd6974a401988cccafcc 100644 (file)
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
 }
 
 /* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
 {
        kfree(module_region);
 }
index 67b1d16857593ddc083bf45cc8fd9e19d5430994..0635bd6c2af392fc372b0e02002b1a669a88e33c 100644 (file)
@@ -94,7 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
                                r = &dev->resource[idx];
                                if (!r->start)
                                        continue;
-                               pci_claim_resource(dev, idx);
+                               pci_claim_bridge_resource(dev, idx);
                        }
                }
                pcibios_allocate_bus_resources(&bus->children);
index 24603be24c14acbbcb29874b45fb55518fed3cc7..29754aae5177a94ec257021ab00c57f688a61de8 100644 (file)
@@ -305,14 +305,12 @@ plt_target (struct plt_entry *plt)
 #endif /* !USE_BRL */
 
 void
-module_free (struct module *mod, void *module_region)
+module_arch_freeing_init (struct module *mod)
 {
-       if (mod && mod->arch.init_unw_table &&
-           module_region == mod->module_init) {
+       if (mod->arch.init_unw_table) {
                unw_remove_unwind_table(mod->arch.init_unw_table);
                mod->arch.init_unw_table = NULL;
        }
-       vfree(module_region);
 }
 
 /* Have we already seen one of these relocations? */
index 291a582777cf3dd0320f4757ea1f9c1fc39dd17c..900cc93e540961903816fd8d69769a3311fdbbf3 100644 (file)
@@ -487,45 +487,39 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
        return 0;
 }
 
-static int is_valid_resource(struct pci_dev *dev, int idx)
+void pcibios_fixup_device_resources(struct pci_dev *dev)
 {
-       unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
-       struct resource *devr = &dev->resource[idx], *busr;
+       int idx;
 
        if (!dev->bus)
-               return 0;
-
-       pci_bus_for_each_resource(dev->bus, busr, i) {
-               if (!busr || ((busr->flags ^ devr->flags) & type_mask))
-                       continue;
-               if ((devr->start) && (devr->start >= busr->start) &&
-                               (devr->end <= busr->end))
-                       return 1;
-       }
-       return 0;
-}
+               return;
 
-static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
-{
-       int i;
+       for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
 
-       for (i = start; i < limit; i++) {
-               if (!dev->resource[i].flags)
+               if (!r->flags || r->parent || !r->start)
                        continue;
-               if ((is_valid_resource(dev, i)))
-                       pci_claim_resource(dev, i);
-       }
-}
 
-void pcibios_fixup_device_resources(struct pci_dev *dev)
-{
-       pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
+               pci_claim_resource(dev, idx);
+       }
 }
 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
 
 static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 {
-       pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
+       int idx;
+
+       if (!dev->bus)
+               return;
+
+       for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
+
+               if (!r->flags || r->parent || !r->start)
+                       continue;
+
+               pci_claim_bridge_resource(dev, idx);
+       }
 }
 
 /*
index b30e41c0c0335cf2ab79e716c9c41c0ebced18e8..48528fb81effa07ef5c992c08efba2cad6a75ff0 100644 (file)
@@ -1026,6 +1026,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         pr, (pr && pr->name) ? pr->name : "nil");
 
                if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+                       struct pci_dev *dev = bus->self;
+
                        if (request_resource(pr, res) == 0)
                                continue;
                        /*
@@ -1035,6 +1037,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         */
                        if (reparent_resources(pr, res) == 0)
                                continue;
+
+                       if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+                           pci_claim_bridge_resource(dev,
+                                                i + PCI_BRIDGE_RESOURCES) == 0)
+                               continue;
+
                }
                pr_warn("PCI: Cannot allocate resource region ");
                pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1227,7 +1235,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
                                 (unsigned long long)r->end,
                                 (unsigned int)r->flags);
 
-                       pci_claim_resource(dev, i);
+                       if (pci_claim_resource(dev, i) == 0)
+                               continue;
+
+                       pci_claim_bridge_resource(dev, i);
                }
        }
 
index 9fd6834a2172ac3cd77115d604a2f07ac8370bc2..5d6139390bf830adf503d67d004a5322d8eb7ad4 100644 (file)
@@ -1388,7 +1388,7 @@ out:
 void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
-               module_free(NULL, fp->bpf_func);
+               module_memfree(fp->bpf_func);
 
        bpf_prog_unlock_free(fp);
 }
index febb9cd83177177e4c0edffa6d4c2d34b4a2b2a0..b5b036f64275b0fe0176132b74f4715f185f7503 100644 (file)
@@ -106,7 +106,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
                                if (!r->flags)
                                        continue;
                                if (!r->start ||
-                                   pci_claim_resource(dev, idx) < 0) {
+                                   pci_claim_bridge_resource(dev, idx) < 0) {
                                        printk(KERN_ERR "PCI:"
                                               " Cannot allocate resource"
                                               " region %d of bridge %s\n",
index 6b4339f8c9c2e1e757f13fddccb9053c2c292b0c..471ff398090cd88e89089b5387d1a1f4dc0dcc0a 100644 (file)
@@ -281,42 +281,37 @@ static int __init pci_check_direct(void)
        return -ENODEV;
 }
 
-static int is_valid_resource(struct pci_dev *dev, int idx)
+static void pcibios_fixup_device_resources(struct pci_dev *dev)
 {
-       unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
-       struct resource *devr = &dev->resource[idx], *busr;
-
-       if (dev->bus) {
-               pci_bus_for_each_resource(dev->bus, busr, i) {
-                       if (!busr || (busr->flags ^ devr->flags) & type_mask)
-                               continue;
-
-                       if (devr->start &&
-                           devr->start >= busr->start &&
-                           devr->end <= busr->end)
-                               return 1;
-               }
-       }
+       int idx;
 
-       return 0;
+       if (!dev->bus)
+               return;
+
+       for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
+
+               if (!r->flags || r->parent || !r->start)
+                       continue;
+
+               pci_claim_resource(dev, idx);
+       }
 }
 
-static void pcibios_fixup_device_resources(struct pci_dev *dev)
+static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
 {
-       int limit, i;
+       int idx;
 
-       if (dev->bus->number != 0)
+       if (!dev->bus)
                return;
 
-       limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ?
-               PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
+       for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+               struct resource *r = &dev->resource[idx];
 
-       for (i = 0; i < limit; i++) {
-               if (!dev->resource[i].flags)
+               if (!r->flags || r->parent || !r->start)
                        continue;
 
-               if (is_valid_resource(dev, i))
-                       pci_claim_resource(dev, i);
+               pci_claim_bridge_resource(dev, idx);
        }
 }
 
@@ -330,7 +325,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
 
        if (bus->self) {
                pci_read_bridge_bases(bus);
-               pcibios_fixup_device_resources(bus->self);
+               pcibios_fixup_bridge_resources(bus->self);
        }
 
        list_for_each_entry(dev, &bus->devices, bus_list)
index cc924a38f22a0fef473a3c6d871789fdacf8f7f9..e2e3f13f98d55a811ef30db3493dbd71a8e86eb0 100644 (file)
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
 }
 
 /* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
 {
        kfree(module_region);
 }
index f9d27883a7148729545489c14bb678122da74e88..2d0ea25be1717de06d8cd138032dc5c7c5f3970d 100644 (file)
@@ -200,7 +200,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 
        /* Set up to return from userspace; jump to fixed address sigreturn
           trampoline on kuser page.  */
-       regs->ra = (unsigned long) (0x1040);
+       regs->ra = (unsigned long) (0x1044);
 
        /* Set up registers for signal handler */
        regs->sp = (unsigned long) frame;
index 50dfafc3f2c103aa3567f2fb88aeb94865847517..5822e8e200e6be1ab110b15baf94d4581624099c 100644 (file)
@@ -298,14 +298,10 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
 }
 #endif
 
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
 {
        kfree(mod->arch.section);
        mod->arch.section = NULL;
-
-       vfree(module_region);
 }
 
 /* Additional bytes needed in front of individual sections */
index 37d512d35943400737d36be40448c063f9ee76f0..2a525c938158e7937445837ef6448354b5147758 100644 (file)
@@ -1184,6 +1184,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         pr, (pr && pr->name) ? pr->name : "nil");
 
                if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+                       struct pci_dev *dev = bus->self;
+
                        if (request_resource(pr, res) == 0)
                                continue;
                        /*
@@ -1193,6 +1195,11 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
                         */
                        if (reparent_resources(pr, res) == 0)
                                continue;
+
+                       if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+                           pci_claim_bridge_resource(dev,
+                                               i + PCI_BRIDGE_RESOURCES) == 0)
+                               continue;
                }
                pr_warning("PCI: Cannot allocate resource region "
                           "%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1401,7 +1408,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
                                 (unsigned long long)r->end,
                                 (unsigned int)r->flags);
 
-                       pci_claim_resource(dev, i);
+                       if (pci_claim_resource(dev, i) == 0)
+                               continue;
+
+                       pci_claim_bridge_resource(dev, i);
                }
        }
 
index 1ca125b9c226070eefca744857b203648055b131..d1916b577f2c9a71c3fb3a5ee419925f070412d0 100644 (file)
@@ -699,7 +699,7 @@ out:
 void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
-               module_free(NULL, fp->bpf_func);
+               module_memfree(fp->bpf_func);
 
        bpf_prog_unlock_free(fp);
 }
index b700a329c31d448444d0f48e0fdd0b9176f1a825..d2de7d5d7574ca48fb1f31aa5c6892a510107ade 100644 (file)
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
         * all cpus at boot. Get these reg values of current cpu and use the
         * same accross all cpus.
         */
-       uint64_t lpcr_val = mfspr(SPRN_LPCR);
+       uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
        uint64_t hid0_val = mfspr(SPRN_HID0);
        uint64_t hid1_val = mfspr(SPRN_HID1);
        uint64_t hid4_val = mfspr(SPRN_HID4);
index 5b150f0c5df94a39587ea6b519e12192d28bce3c..13c6e200b24ec5bc2a7927308eaf579f1a904cd5 100644 (file)
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
+       args.token = cpu_to_be32(args.token);
        args.nargs = cpu_to_be32(3);
        args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
index b89b59158b9592317479422b14cfb8e7779fa722..409d152585bea67a6aca845bb0c3e4db130b1505 100644 (file)
@@ -55,14 +55,10 @@ void *module_alloc(unsigned long size)
 }
 #endif
 
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
 {
-       if (mod) {
-               vfree(mod->arch.syminfo);
-               mod->arch.syminfo = NULL;
-       }
-       vfree(module_region);
+       vfree(mod->arch.syminfo);
+       mod->arch.syminfo = NULL;
 }
 
 static void check_rela(Elf_Rela *rela, struct module *me)
index 7e45d13816c183e46962e75c3dc4817d72d5b223..ba44c9f5534633a2da3133714f6b018ba6a69545 100644 (file)
@@ -22,8 +22,8 @@
  * skb_copy_bits takes 4 parameters:
  *   %r2 = skb pointer
  *   %r3 = offset into skb data
- *   %r4 = length to copy
- *   %r5 = pointer to temp buffer
+ *   %r4 = pointer to temp buffer
+ *   %r5 = length to copy
  */
 #define SKBDATA        %r8
 
@@ -44,8 +44,9 @@ ENTRY(sk_load_word)
 
 sk_load_word_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,4                   # 4 bytes
-       la      %r5,160(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,160(%r15)           # pointer to temp buffer
+       lghi    %r5,4                   # 4 bytes
        brasl   %r14,skb_copy_bits      # get data from skb
        l       %r5,160(%r15)           # load result from temp buffer
        ltgr    %r2,%r2                 # set cc to (%r2 != 0)
@@ -69,8 +70,9 @@ ENTRY(sk_load_half)
 
 sk_load_half_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,2                   # 2 bytes
-       la      %r5,162(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,162(%r15)           # pointer to temp buffer
+       lghi    %r5,2                   # 2 bytes
        brasl   %r14,skb_copy_bits      # get data from skb
        xc      160(2,%r15),160(%r15)
        l       %r5,160(%r15)           # load result from temp buffer
@@ -95,8 +97,9 @@ ENTRY(sk_load_byte)
 
 sk_load_byte_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,1                   # 1 bytes
-       la      %r5,163(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,163(%r15)           # pointer to temp buffer
+       lghi    %r5,1                   # 1 byte
        brasl   %r14,skb_copy_bits      # get data from skb
        xc      160(3,%r15),160(%r15)
        l       %r5,160(%r15)           # load result from temp buffer
@@ -104,11 +107,11 @@ sk_load_byte_slow:
        lgr     %r2,%r9                 # restore %r2
        br      %r8
 
-       /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
+       /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
 ENTRY(sk_load_byte_msh)
        llgfr   %r1,%r3                 # extend offset
        clr     %r11,%r3                # hlen < offset ?
-       jle     sk_load_byte_slow
+       jle     sk_load_byte_msh_slow
        lhi     %r12,0
        ic      %r12,0(%r1,%r10)        # get byte from skb
        nill    %r12,0x0f
@@ -118,8 +121,9 @@ ENTRY(sk_load_byte_msh)
 
 sk_load_byte_msh_slow:
        lgr     %r9,%r2                 # save %r2
-       lhi     %r4,2                   # 2 bytes
-       la      %r5,162(%r15)           # pointer to temp buffer
+       lgr     %r3,%r1                 # offset
+       la      %r4,163(%r15)           # pointer to temp buffer
+       lghi    %r5,1                   # 1 byte
        brasl   %r14,skb_copy_bits      # get data from skb
        xc      160(3,%r15),160(%r15)
        l       %r12,160(%r15)          # load result from temp buffer
index 524496d47ef506d0ca888356df21fd4bb7e25053..bbd1981cc15007fcdb779ca201553ed28012f20d 100644 (file)
@@ -448,15 +448,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
                mask = 0x800000; /* je */
 kbranch:       /* Emit compare if the branch targets are different */
                if (filter->jt != filter->jf) {
-                       if (K <= 16383)
-                               /* chi %r5,<K> */
-                               EMIT4_IMM(0xa75e0000, K);
-                       else if (test_facility(21))
+                       if (test_facility(21))
                                /* clfi %r5,<K> */
                                EMIT6_IMM(0xc25f0000, K);
                        else
-                               /* c %r5,<d(K)>(%r13) */
-                               EMIT4_DISP(0x5950d000, EMIT_CONST(K));
+                               /* cl %r5,<d(K)>(%r13) */
+                               EMIT4_DISP(0x5550d000, EMIT_CONST(K));
                }
 branch:                if (filter->jt == filter->jf) {
                        if (filter->jt == 0)
index b36365f49478c573d715816d53582e2dfb153227..9ce5afe167ff509288b21605a2f9a35f96ff36dc 100644 (file)
@@ -639,7 +639,10 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
                                       (unsigned long long)r->end,
                                       (unsigned int)r->flags);
 
-                       pci_claim_resource(dev, i);
+                       if (pci_claim_resource(dev, i) == 0)
+                               continue;
+
+                       pci_claim_bridge_resource(dev, i);
                }
        }
 
index f33e7c7a3bf74d48232e0c9708877cb63a6a4aaa..7931eeeb649af45af45aaa49a20fa727a6aecd40 100644 (file)
@@ -776,7 +776,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf];
                                if (unlikely(proglen + ilen > oldproglen)) {
                                        pr_err("bpb_jit_compile fatal error\n");
                                        kfree(addrs);
-                                       module_free(NULL, image);
+                                       module_memfree(image);
                                        return;
                                }
                                memcpy(image + proglen, temp, ilen);
@@ -822,7 +822,7 @@ out:
 void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
-               module_free(NULL, fp->bpf_func);
+               module_memfree(fp->bpf_func);
 
        bpf_prog_unlock_free(fp);
 }
index 96447c9160a0697f5a756f0a2acc945c4cb3efb6..2305084c9b93b72df9f5fba6f6302037d0fb43a7 100644 (file)
@@ -74,7 +74,7 @@ error:
 
 
 /* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
 {
        vfree(module_region);
 
@@ -83,7 +83,7 @@ void module_free(struct module *mod, void *module_region)
                     0, 0, 0, NULL, NULL, 0);
 
        /*
-        * FIXME: If module_region == mod->module_init, trim exception
+        * FIXME: Add module_arch_freeing_init to trim exception
         * table entries.
         */
 }
index ba397bde79482043d46e0e90d0bd6d7e71daa110..0dc9d0144a27957d2bd2cdadf3b141a3195ccab0 100644 (file)
@@ -857,7 +857,7 @@ source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
        bool "Local APIC support on uniprocessors"
-       depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
+       depends on X86_32 && !SMP && !X86_32_NON_STANDARD
        ---help---
          A local APIC (Advanced Programmable Interrupt Controller) is an
          integrated interrupt controller in the CPU. If you have a single-CPU
@@ -868,6 +868,10 @@ config X86_UP_APIC
          performance counters), and the NMI watchdog which detects hard
          lockups.
 
+config X86_UP_APIC_MSI
+       def_bool y
+       select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
+
 config X86_UP_IOAPIC
        bool "IO-APIC support on uniprocessors"
        depends on X86_UP_APIC
index d999398928bc81ba0957fa7af17b63016f73e54e..ad754b4411f7e42aeecd8ae41a2f67866f23d24f 100644 (file)
@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO)   := lzo
 suffix-$(CONFIG_KERNEL_LZ4)    := lz4
 
 RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
-            perl $(srctree)/arch/x86/tools/calc_run_size.pl)
+            $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
 quiet_cmd_mkpiggy = MKPIGGY $@
       cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
 
index dcc1c536cc212daffda92b903ea9111e36b2c64a..a950864a64dab3d558197c77bef3c56a07961494 100644 (file)
@@ -373,6 +373,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
                                  unsigned long output_len,
                                  unsigned long run_size)
 {
+       unsigned char *output_orig = output;
+
        real_mode = rmode;
 
        sanitize_boot_params(real_mode);
@@ -421,7 +423,12 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
        debug_putstr("\nDecompressing Linux... ");
        decompress(input_data, input_len, NULL, NULL, output, NULL, error);
        parse_elf(output);
-       handle_relocations(output, output_len);
+       /*
+        * 32-bit always performs relocations. 64-bit relocations are only
+        * needed if kASLR has chosen a different load address.
+        */
+       if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
+               handle_relocations(output, output_len);
        debug_putstr("done.\nBooting the kernel.\n");
        return output;
 }
index 0ab4f9fd268764114e3f252b07895c4bcaf63f90..3a45668f6dc38312bc9f5761214f076a144d00d4 100644 (file)
@@ -50,6 +50,7 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
 
 extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
                                  int trigger, int polarity);
+extern void (*__acpi_unregister_gsi)(u32 gsi);
 
 static inline void disable_acpi(void)
 {
index 50d033a8947db64f8ad0270fd55f23faed683c46..a94b82e8f156f3888e0ab90ac879e39dd05ccec1 100644 (file)
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
                gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
 }
 
-#define _LDT_empty(info)                               \
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
+#define LDT_empty(info)                                        \
        ((info)->base_addr              == 0    &&      \
         (info)->limit                  == 0    &&      \
         (info)->contents               == 0    &&      \
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
         (info)->seg_not_present        == 1    &&      \
         (info)->useable                == 0)
 
-#ifdef CONFIG_X86_64
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
-#else
-#define LDT_empty(info) (_LDT_empty(info))
-#endif
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
+static inline bool LDT_zero(const struct user_desc *info)
+{
+       return (info->base_addr         == 0 &&
+               info->limit             == 0 &&
+               info->contents          == 0 &&
+               info->read_exec_only    == 0 &&
+               info->seg_32bit         == 0 &&
+               info->limit_in_pages    == 0 &&
+               info->seg_not_present   == 0 &&
+               info->useable           == 0);
+}
 
 static inline void clear_LDT(void)
 {
index 40269a2bf6f90f6761a1a27f91d013bd5ee09017..4b75d591eb5ed1e4757ef8b658d08a2ad02a84cd 100644 (file)
@@ -130,7 +130,25 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
                              unsigned long start, unsigned long end)
 {
-       mpx_notify_unmap(mm, vma, start, end);
+       /*
+        * mpx_notify_unmap() goes and reads a rarely-hot
+        * cacheline in the mm_struct.  That can be expensive
+        * enough to be seen in profiles.
+        *
+        * The mpx_notify_unmap() call and its contents have been
+        * observed to affect munmap() performance on hardware
+        * where MPX is not present.
+        *
+        * The unlikely() optimizes for the fast case: no MPX
+        * in the CPU, or no MPX use in the process.  Even if
+        * we get this wrong (in the unlikely event that MPX
+        * is widely enabled on some system) the overhead of
+        * MPX itself (reading bounds tables) is expected to
+        * overwhelm the overhead of getting this unlikely()
+        * consistently wrong.
+        */
+       if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
+               mpx_notify_unmap(mm, vma, start, end);
 }
 
 #endif /* _ASM_X86_MMU_CONTEXT_H */
index d1626364a28a16d0a04db86134ef09119e3e9a50..b9e30daa0881b3213bb9b6be8bf2bb1803cfba7b 100644 (file)
@@ -611,20 +611,20 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 
 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
 {
-       int irq;
-
-       if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
-               *irqp = gsi;
-       } else {
-               mutex_lock(&acpi_ioapic_lock);
-               irq = mp_map_gsi_to_irq(gsi,
-                                       IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
-               mutex_unlock(&acpi_ioapic_lock);
-               if (irq < 0)
-                       return -1;
-               *irqp = irq;
+       int rc, irq, trigger, polarity;
+
+       rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+       if (rc == 0) {
+               trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+               polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+               irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+               if (irq >= 0) {
+                       *irqp = irq;
+                       return 0;
+               }
        }
-       return 0;
+
+       return -1;
 }
 EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
 
index a450373e8e91698dd235725e8d8f9f35b3cec7cc..939155ffdecec60628a06b2937604dd2f2f98813 100644 (file)
@@ -107,6 +107,7 @@ static struct clocksource hyperv_cs = {
        .rating         = 400, /* use this when running on Hyperv*/
        .read           = read_hv_clock,
        .mask           = CLOCKSOURCE_MASK(64),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
 static void __init ms_hyperv_init_platform(void)
index 2142376dc8c6ce0a2ea8c5da5e42016545005d8f..8b7b0a51e742cd26defe12b535f37a865ee3c172 100644 (file)
@@ -674,7 +674,7 @@ static inline void *alloc_tramp(unsigned long size)
 }
 static inline void tramp_free(void *tramp)
 {
-       module_free(NULL, tramp);
+       module_memfree(tramp);
 }
 #else
 /* Trampolines can only be created if modules are supported */
index 6307a0f0cf17abc93aad4490c8bcdaccfdb29541..705ef8d48e2dc464936672fb54eea908f8f03b4e 100644 (file)
@@ -127,7 +127,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        seq_puts(p, "  Machine check polls\n");
 #endif
 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
-       seq_printf(p, "%*s: ", prec, "THR");
+       seq_printf(p, "%*s: ", prec, "HYP");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
        seq_puts(p, "  Hypervisor callback interrupts\n");
index 4e942f31b1a7c9401a65fb37af093caab5ad0c2e..7fc5e843f247b358288b23e459eebfefcf6631f0 100644 (file)
@@ -29,7 +29,28 @@ static int get_free_idx(void)
 
 static bool tls_desc_okay(const struct user_desc *info)
 {
-       if (LDT_empty(info))
+       /*
+        * For historical reasons (i.e. no one ever documented how any
+        * of the segmentation APIs work), user programs can and do
+        * assume that a struct user_desc that's all zeros except for
+        * entry_number means "no segment at all".  This never actually
+        * worked.  In fact, up to Linux 3.19, a struct user_desc like
+        * this would create a 16-bit read-write segment with base and
+        * limit both equal to zero.
+        *
+        * That was close enough to "no segment at all" until we
+        * hardened this function to disallow 16-bit TLS segments.  Fix
+        * it up by interpreting these zeroed segments the way that they
+        * were almost certainly intended to be interpreted.
+        *
+        * The correct way to ask for "no segment at all" is to specify
+        * a user_desc that satisfies LDT_empty.  To keep everything
+        * working, we accept both.
+        *
+        * Note that there's a similar kludge in modify_ldt -- look at
+        * the distinction between modes 1 and 0x11.
+        */
+       if (LDT_empty(info) || LDT_zero(info))
                return true;
 
        /*
@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
        cpu = get_cpu();
 
        while (n-- > 0) {
-               if (LDT_empty(info))
+               if (LDT_empty(info) || LDT_zero(info))
                        desc->a = desc->b = 0;
                else
                        fill_ldt(desc, info);
index b7e50bba3bbbb98066fac741d826e5f6f4d7e946..505449700e0cf4e66ea6284135482ac172fe756a 100644 (file)
@@ -617,7 +617,7 @@ static unsigned long quick_pit_calibrate(void)
                        goto success;
                }
        }
-       pr_err("Fast TSC calibration failed\n");
+       pr_info("Fast TSC calibration failed\n");
        return 0;
 
 success:
index 169b09d76ddd83d3033d93d2b7eace6fada2331e..de12c1d379f16899645d96a2c3fd75663919c86d 100644 (file)
@@ -2348,7 +2348,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
         * Not recognized on AMD in compat mode (but is recognized in legacy
         * mode).
         */
-       if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
+       if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
            && !vendor_intel(ctxt))
                return emulate_ud(ctxt);
 
@@ -2359,25 +2359,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
        setup_syscalls_segments(ctxt, &cs, &ss);
 
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
-       switch (ctxt->mode) {
-       case X86EMUL_MODE_PROT32:
-               if ((msr_data & 0xfffc) == 0x0)
-                       return emulate_gp(ctxt, 0);
-               break;
-       case X86EMUL_MODE_PROT64:
-               if (msr_data == 0x0)
-                       return emulate_gp(ctxt, 0);
-               break;
-       default:
-               break;
-       }
+       if ((msr_data & 0xfffc) == 0x0)
+               return emulate_gp(ctxt, 0);
 
        ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
-       cs_sel = (u16)msr_data;
-       cs_sel &= ~SELECTOR_RPL_MASK;
+       cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
        ss_sel = cs_sel + 8;
-       ss_sel &= ~SELECTOR_RPL_MASK;
-       if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
+       if (efer & EFER_LMA) {
                cs.d = 0;
                cs.l = 1;
        }
@@ -2386,10 +2374,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
        ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
 
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
-       ctxt->_eip = msr_data;
+       ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
 
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
-       *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
+       *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
+                                                             (u32)msr_data;
 
        return X86EMUL_CONTINUE;
 }
@@ -3791,8 +3780,8 @@ static const struct opcode group5[] = {
 };
 
 static const struct opcode group6[] = {
-       DI(Prot       sldt),
-       DI(Prot       str),
+       DI(Prot | DstMem,       sldt),
+       DI(Prot | DstMem,       str),
        II(Prot | Priv | SrcMem16, em_lldt, lldt),
        II(Prot | Priv | SrcMem16, em_ltr, ltr),
        N, N, N, N,
index 08a7d313538a72dfc51227b01546bab34a8f7bee..079c3b6a3ff181277a7cb4270895f27d9a1d6f8b 100644 (file)
@@ -43,7 +43,7 @@ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
        [_PAGE_CACHE_MODE_WT]           = _PAGE_PCD,
        [_PAGE_CACHE_MODE_WP]           = _PAGE_PCD,
 };
-EXPORT_SYMBOL_GPL(__cachemode2pte_tbl);
+EXPORT_SYMBOL(__cachemode2pte_tbl);
 uint8_t __pte2cachemode_tbl[8] = {
        [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
        [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
@@ -54,7 +54,7 @@ uint8_t __pte2cachemode_tbl[8] = {
        [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
        [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
 };
-EXPORT_SYMBOL_GPL(__pte2cachemode_tbl);
+EXPORT_SYMBOL(__pte2cachemode_tbl);
 
 static unsigned long __initdata pgt_buf_start;
 static unsigned long __initdata pgt_buf_end;
index 67ebf57512229a4a29bceda04324c177a587232f..c439ec47821601c5b594bc1eec5abc529c5fd012 100644 (file)
@@ -348,6 +348,12 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
        if (!cpu_feature_enabled(X86_FEATURE_MPX))
                return MPX_INVALID_BOUNDS_DIR;
 
+       /*
+        * 32-bit binaries on 64-bit kernels are currently
+        * unsupported.
+        */
+       if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
+               return MPX_INVALID_BOUNDS_DIR;
        /*
         * The bounds directory pointer is stored in a register
         * only accessible if we first do an xsave.
index edf299c8ff6c774dea8116092a41b15102d9cf1c..7ac68698406c3b35e5ce0b0e98c73c5441e869a3 100644 (file)
@@ -234,8 +234,13 @@ void pat_init(void)
              PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
 
        /* Boot CPU check */
-       if (!boot_pat_state)
+       if (!boot_pat_state) {
                rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
+               if (!boot_pat_state) {
+                       pat_disable("PAT read returns always zero, disabled.");
+                       return;
+               }
+       }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
 
index 9b18ef315a559bca66ba227da77ac2d329475f19..349c0d32cc0b140222141cfec5fa29a8c6ddbace 100644 (file)
@@ -216,7 +216,7 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
                        continue;
                if (r->parent)  /* Already allocated */
                        continue;
-               if (!r->start || pci_claim_resource(dev, idx) < 0) {
+               if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
                        /*
                         * Something is wrong with the region.
                         * Invalidate the resource to prevent
index c489ef2c1a3915a0b22952520d89c82958a718c9..9098d880c476cf842598d32805ed25ac37fc9ae1 100644 (file)
@@ -458,6 +458,7 @@ int __init pci_xen_hvm_init(void)
         * just how GSIs get registered.
         */
        __acpi_register_gsi = acpi_register_gsi_xen_hvm;
+       __acpi_unregister_gsi = NULL;
 #endif
 
 #ifdef CONFIG_PCI_MSI
@@ -471,52 +472,6 @@ int __init pci_xen_hvm_init(void)
 }
 
 #ifdef CONFIG_XEN_DOM0
-static __init void xen_setup_acpi_sci(void)
-{
-       int rc;
-       int trigger, polarity;
-       int gsi = acpi_sci_override_gsi;
-       int irq = -1;
-       int gsi_override = -1;
-
-       if (!gsi)
-               return;
-
-       rc = acpi_get_override_irq(gsi, &trigger, &polarity);
-       if (rc) {
-               printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
-                               " sci, rc=%d\n", rc);
-               return;
-       }
-       trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
-       polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
-       printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
-                       "polarity=%d\n", gsi, trigger, polarity);
-
-       /* Before we bind the GSI to a Linux IRQ, check whether
-        * we need to override it with bus_irq (IRQ) value. Usually for
-        * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
-        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
-        * but there are oddballs where the IRQ != GSI:
-        *  ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
-        * which ends up being: gsi_to_irq[9] == 20
-        * (which is what acpi_gsi_to_irq ends up calling when starting the
-        * the ACPI interpreter and keels over since IRQ 9 has not been
-        * setup as we had setup IRQ 20 for it).
-        */
-       if (acpi_gsi_to_irq(gsi, &irq) == 0) {
-               /* Use the provided value if it's valid. */
-               if (irq >= 0)
-                       gsi_override = irq;
-       }
-
-       gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
-       printk(KERN_INFO "xen: acpi sci %d\n", gsi);
-
-       return;
-}
-
 int __init pci_xen_initial_domain(void)
 {
        int irq;
@@ -527,8 +482,8 @@ int __init pci_xen_initial_domain(void)
        x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
        pci_msi_ignore_mask = 1;
 #endif
-       xen_setup_acpi_sci();
        __acpi_register_gsi = acpi_register_gsi_xen;
+       __acpi_unregister_gsi = NULL;
        /* Pre-allocate legacy irqs */
        for (irq = 0; irq < nr_legacy_irqs(); irq++) {
                int trigger, polarity;
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
deleted file mode 100644 (file)
index 23210ba..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/perl
-#
-# Calculate the amount of space needed to run the kernel, including room for
-# the .bss and .brk sections.
-#
-# Usage:
-# objdump -h a.out | perl calc_run_size.pl
-use strict;
-
-my $mem_size = 0;
-my $file_offset = 0;
-
-my $sections=" *[0-9]+ \.(?:bss|brk) +";
-while (<>) {
-       if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
-               my $size = hex($1);
-               my $offset = hex($2);
-               $mem_size += $size;
-               if ($file_offset == 0) {
-                       $file_offset = $offset;
-               } elsif ($file_offset != $offset) {
-                       # BFD linker shows the same file offset in ELF.
-                       # Gold linker shows them as consecutive.
-                       next if ($file_offset + $mem_size == $offset + $size);
-
-                       printf STDERR "file_offset: 0x%lx\n", $file_offset;
-                       printf STDERR "mem_size: 0x%lx\n", $mem_size;
-                       printf STDERR "offset: 0x%lx\n", $offset;
-                       printf STDERR "size: 0x%lx\n", $size;
-
-                       die ".bss and .brk are non-contiguous\n";
-               }
-       }
-}
-
-if ($file_offset == 0) {
-       die "Never found .bss or .brk file offset\n";
-}
-printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
new file mode 100644 (file)
index 0000000..1a4c17b
--- /dev/null
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Calculate the amount of space needed to run the kernel, including room for
+# the .bss and .brk sections.
+#
+# Usage:
+# objdump -h a.out | sh calc_run_size.sh
+
+NUM='\([0-9a-fA-F]*[ \t]*\)'
+OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
+if [ -z "$OUT" ] ; then
+       echo "Never found .bss or .brk file offset" >&2
+       exit 1
+fi
+
+OUT=$(echo ${OUT# })
+sizeA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+sizeB=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetB=$(printf "%d" 0x${OUT%% *})
+
+run_size=$(( $offsetA + $sizeA + $sizeB ))
+
+# BFD linker shows the same file offset in ELF.
+if [ "$offsetA" -ne "$offsetB" ] ; then
+       # Gold linker shows them as consecutive.
+       endB=$(( $offsetB + $sizeB ))
+       if [ "$endB" != "$run_size" ] ; then
+               printf "sizeA: 0x%x\n" $sizeA >&2
+               printf "offsetA: 0x%x\n" $offsetA >&2
+               printf "sizeB: 0x%x\n" $sizeB >&2
+               printf "offsetB: 0x%x\n" $offsetB >&2
+               echo ".bss and .brk are non-contiguous" >&2
+               exit 1
+       fi
+fi
+
+printf "%d\n" $run_size
+exit 0
index 1630a20d5dcfa550ebe9c8815927d51b70bd9d56..6774a0e698675927be5c78dc34b0087d873b5ebe 100644 (file)
 
 static void blk_mq_sysfs_release(struct kobject *kobj)
 {
+       struct request_queue *q;
+
+       q = container_of(kobj, struct request_queue, mq_kobj);
+       free_percpu(q->queue_ctx);
+}
+
+static void blk_mq_ctx_release(struct kobject *kobj)
+{
+       struct blk_mq_ctx *ctx;
+
+       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+       kobject_put(&ctx->queue->mq_kobj);
+}
+
+static void blk_mq_hctx_release(struct kobject *kobj)
+{
+       struct blk_mq_hw_ctx *hctx;
+
+       hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+       kfree(hctx);
 }
 
 struct blk_mq_ctx_sysfs_entry {
@@ -318,13 +338,13 @@ static struct kobj_type blk_mq_ktype = {
 static struct kobj_type blk_mq_ctx_ktype = {
        .sysfs_ops      = &blk_mq_sysfs_ops,
        .default_attrs  = default_ctx_attrs,
-       .release        = blk_mq_sysfs_release,
+       .release        = blk_mq_ctx_release,
 };
 
 static struct kobj_type blk_mq_hw_ktype = {
        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
        .default_attrs  = default_hw_ctx_attrs,
-       .release        = blk_mq_sysfs_release,
+       .release        = blk_mq_hctx_release,
 };
 
 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -355,6 +375,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
                return ret;
 
        hctx_for_each_ctx(hctx, ctx, i) {
+               kobject_get(&q->mq_kobj);
                ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
                if (ret)
                        break;
index 2f95747c287eac350b45cc272fcd3d6e9c43ee09..9ee3b87c44984d336dbd4c82572fd3a4c3d35e90 100644 (file)
@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
        struct blk_mq_hw_ctx *hctx;
        unsigned int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
+       queue_for_each_hw_ctx(q, hctx, i)
                free_cpumask_var(hctx->cpumask);
-               kfree(hctx);
-       }
 }
 
 static int blk_mq_init_hctx(struct request_queue *q,
@@ -2002,11 +2000,9 @@ void blk_mq_free_queue(struct request_queue *q)
 
        percpu_ref_exit(&q->mq_usage_counter);
 
-       free_percpu(q->queue_ctx);
        kfree(q->queue_hw_ctx);
        kfree(q->mq_map);
 
-       q->queue_ctx = NULL;
        q->queue_hw_ctx = NULL;
        q->mq_map = NULL;
 
index 5277a0ee57042b26cf78e6186a17899233801273..b1def411c0b89cbf7847b767063c5c2ab528e8a8 100644 (file)
@@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
        dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
        if (gsi >= 0) {
                acpi_unregister_gsi(gsi);
-               dev->irq = 0;
                dev->irq_managed = 0;
        }
 }
index cb529e9a82dd685b5b372bea2ed272c59fae5bc5..d826bf3e62c8621e8572ca9eabb7951d42c33eb7 100644 (file)
@@ -106,7 +106,7 @@ struct nvme_queue {
        dma_addr_t cq_dma_addr;
        u32 __iomem *q_db;
        u16 q_depth;
-       u16 cq_vector;
+       s16 cq_vector;
        u16 sq_head;
        u16 sq_tail;
        u16 cq_head;
index eb7682dc123be4ef22d3b0a05e305b5aa7b11045..81bf297f1034abd697135cea8c5c1db3c932cbff 100644 (file)
@@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
 }
 
 /* Checks whether the given window number is available */
+
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
+ * isn't currently taking into account this special case, which means
+ * that when window 13 is actually used, the remap registers are left
+ * to 0, making the device using this MBus window unavailable. The
+ * quick fix for stable is to not use window 13. A follow up patch
+ * will correctly handle this window.
+*/
 static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
                                     const int win)
 {
        void __iomem *addr = mbus->mbuswins_base +
                mbus->soc->win_cfg_offset(win);
        u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+       if (win == 13)
+               return false;
+
        return !(ctrl & WIN_CTRL_ENABLE);
 }
 
index 0595dc6c453e6ee4a97cfb4dd30865f76c351366..f1e33d08dd834a27269062a0b42d8265fb39960c 100644 (file)
@@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base)
 }
 
 static void
-kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
+kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
 {
-       void __iomem *base = IOMEM(timer_base);
        int loop_limit = 4;
 
        /*
@@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
         */
 
        while (--loop_limit) {
-               *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
-               *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
-               if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
+               *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
+               *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
+               if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
                        break;
        }
        if (!loop_limit) {
index 9403061a2acc78397dd686208642c71012b328e7..83564c9cfdbe3b18dfb07a3799b73b991ffe00c7 100644 (file)
@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
        writel_relaxed(value, reg_base + offset);
 
        if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
-               stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
-               switch (offset & EXYNOS4_MCT_L_MASK) {
+               stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+               switch (offset & ~EXYNOS4_MCT_L_MASK) {
                case MCT_L_TCON_OFFSET:
                        mask = 1 << 3;          /* L_TCON write status */
                        break;
index 0f665b8f2461f00ee58bc38918c2b684b04e16d3..f150ca82bfaf106a7ef2c5a40dd12a1e098e39f0 100644 (file)
@@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
        ced->features = CLOCK_EVT_FEAT_PERIODIC;
        ced->features |= CLOCK_EVT_FEAT_ONESHOT;
        ced->rating = 200;
-       ced->cpumask = cpumask_of(0);
+       ced->cpumask = cpu_possible_mask;
        ced->set_next_event = sh_tmu_clock_event_next;
        ced->set_mode = sh_tmu_clock_event_mode;
        ced->suspend = sh_tmu_clock_event_suspend;
index 1ba8332419faee78ce45f7179b379bd5a285ffb1..5bc32c26b9890eb1f1bf2cf70b1d7e7822bf64fc 100644 (file)
@@ -183,16 +183,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->shared_resources = *gpu_resources;
 
        /* calculate max size of mqds needed for queues */
-       size = max_num_of_processes *
-               max_num_of_queues_per_process *
-               kfd->device_info->mqd_size_aligned;
+       size = max_num_of_queues_per_device *
+                       kfd->device_info->mqd_size_aligned;
 
        /*
         * calculate max size of runlist packet.
         * There can be only 2 packets at once
         */
-       size += (max_num_of_processes * sizeof(struct pm4_map_process) +
-               max_num_of_processes * max_num_of_queues_per_process *
+       size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) +
+               max_num_of_queues_per_device *
                sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
 
        /* Add size of HIQ & DIQ */
index b189f9791c90fcad75ac28bacc480fec875fc47d..ecc78ece634c9c2856f617de506be1beb9d68041 100644 (file)
@@ -135,6 +135,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 
        mutex_lock(&dqm->lock);
 
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               mutex_unlock(&dqm->lock);
+               return -EPERM;
+       }
+
        if (list_empty(&qpd->queues_list)) {
                retval = allocate_vmid(dqm, qpd, q);
                if (retval != 0) {
@@ -161,8 +168,18 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 
        list_add(&q->list, &qpd->queues_list);
        dqm->queue_count++;
+
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
                dqm->sdma_queue_count++;
+
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        mutex_unlock(&dqm->lock);
        return 0;
 }
@@ -297,6 +314,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
        if (list_empty(&qpd->queues_list))
                deallocate_vmid(dqm, qpd, q);
        dqm->queue_count--;
+
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type
+        */
+       dqm->total_queue_count--;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
 out:
        mutex_unlock(&dqm->lock);
        return retval;
@@ -470,10 +496,14 @@ int init_pipelines(struct device_queue_manager *dqm,
 
        for (i = 0; i < pipes_num; i++) {
                inx = i + first_pipe;
+               /*
+                * HPD buffer on GTT is allocated by amdkfd, no need to waste
+                * space in GTT for pipelines we don't initialize
+                */
                pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
                pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
                /* = log2(bytes/4)-1 */
-               kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+               kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
                                CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
        }
 
@@ -488,8 +518,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
 
        pr_debug("kfd: In %s\n", __func__);
 
-       retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
-
+       retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
        return retval;
 }
 
@@ -744,6 +773,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
        pr_debug("kfd: In func %s\n", __func__);
 
        mutex_lock(&dqm->lock);
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               mutex_unlock(&dqm->lock);
+               return -EPERM;
+       }
+
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        list_add(&kq->list, &qpd->priv_queue_list);
        dqm->queue_count++;
        qpd->is_debug = true;
@@ -767,6 +811,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
        dqm->queue_count--;
        qpd->is_debug = false;
        execute_queues_cpsch(dqm, false);
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
        mutex_unlock(&dqm->lock);
 }
 
@@ -793,6 +844,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 
        mutex_lock(&dqm->lock);
 
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               retval = -EPERM;
+               goto out;
+       }
+
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
                select_sdma_engine_id(q);
 
@@ -817,6 +875,14 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
                        dqm->sdma_queue_count++;
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
 
 out:
        mutex_unlock(&dqm->lock);
@@ -958,6 +1024,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
        mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
 
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type
+        */
+       dqm->total_queue_count--;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        mutex_unlock(&dqm->lock);
 
        return 0;
index e7b17b28330e1496789d421caf6c3a8274346bb1..d64f86cda34f5a155e176526d5f3a3463b96c826 100644 (file)
@@ -144,6 +144,7 @@ struct device_queue_manager {
        unsigned int            processes_count;
        unsigned int            queue_count;
        unsigned int            sdma_queue_count;
+       unsigned int            total_queue_count;
        unsigned int            next_pipe_to_allocate;
        unsigned int            *allocated_queues;
        unsigned int            sdma_bitmap;
index ac5445415667c80e78aa7549df657d89d9735d9b..3c6221905bc4effad8a006cba90c3f0159a69fbc 100644 (file)
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
 MODULE_PARM_DESC(sched_policy,
        "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
 
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
-       "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
-       "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+       "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
 
 bool kgd2kfd_init(unsigned interface_version,
                  const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
        }
 
        /* Verify module parameters */
-       if ((max_num_of_processes < 0) ||
-               (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
-               pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
-               return -1;
-       }
-
-       if ((max_num_of_queues_per_process < 0) ||
-               (max_num_of_queues_per_process >
-                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
-               pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+       if ((max_num_of_queues_per_device < 0) ||
+               (max_num_of_queues_per_device >
+                       KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+               pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
                return -1;
        }
 
index 4c25ef504f79dd6be067369a1c216b11db98fc2e..6cfe7f1f18cff0d805a75097a5a9f86e2a4fcdfd 100644 (file)
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
 
 int kfd_pasid_init(void)
 {
-       pasid_limit = max_num_of_processes;
+       pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
 
        pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
        if (!pasid_bitmap)
index 1b35a9c87437d89cf761c5d2e9e992ffe7813ae3..5a44f2fecf3826b7b90e18cf3d22b7f64c113a26 100644 (file)
 #define kfd_alloc_struct(ptr_to_struct)        \
        ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
 
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
 #define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
 
 /*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
  */
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
 
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE               \
+       (KFD_MAX_NUM_OF_PROCESSES *                     \
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
 
 #define KFD_KERNEL_QUEUE_SIZE 2048
 
index 513eeb6e402a8513f5d1b0ff1875b77d2c50e603..ca93ab0449c808321c19ce1e31c50c7f8d455b4f 100644 (file)
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
        pr_debug("kfd: in %s\n", __func__);
 
        found = find_first_zero_bit(pqm->queue_slot_bitmap,
-                       max_num_of_queues_per_process);
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
 
        pr_debug("kfd: the new slot id %lu\n", found);
 
-       if (found >= max_num_of_queues_per_process) {
+       if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
                pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
                                pqm->process->pasid);
                return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
 
        INIT_LIST_HEAD(&pqm->queues);
        pqm->queue_slot_bitmap =
-                       kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+                       kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
                                        BITS_PER_BYTE), GFP_KERNEL);
        if (pqm->queue_slot_bitmap == NULL)
                return -ENOMEM;
@@ -206,6 +206,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                pqn->kq = NULL;
                retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
                                                &q->properties.vmid);
+               pr_debug("DQM returned %d for create_queue\n", retval);
                print_queue(q);
                break;
        case KFD_QUEUE_TYPE_DIQ:
@@ -226,7 +227,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
        }
 
        if (retval != 0) {
-               pr_err("kfd: error dqm create queue\n");
+               pr_debug("Error dqm create queue\n");
                goto err_create_queue;
        }
 
@@ -245,7 +246,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 err_create_queue:
        kfree(pqn);
 err_allocate_pqn:
+       /* check if queues list is empty unregister process from device */
        clear_bit(*qid, pqm->queue_slot_bitmap);
+       if (list_empty(&pqm->queues))
+               dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
        return retval;
 }
 
index d4762799351d9d5e67efbce0987e10c0e974939c..a9041d1a8ff002f332705db5d80082761788ad19 100644 (file)
@@ -32,6 +32,8 @@
 struct tda998x_priv {
        struct i2c_client *cec;
        struct i2c_client *hdmi;
+       struct mutex mutex;
+       struct delayed_work dwork;
        uint16_t rev;
        uint8_t current_page;
        int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
        uint8_t addr = REG2ADDR(reg);
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return ret;
+               goto out;
 
        ret = i2c_master_send(client, &addr, sizeof(addr));
        if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
        if (ret < 0)
                goto fail;
 
-       return ret;
+       goto out;
 
 fail:
        dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
        return ret;
 }
 
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
        buf[0] = REG2ADDR(reg);
        memcpy(&buf[1], p, cnt);
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, cnt + 1);
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
        uint8_t buf[] = {REG2ADDR(reg), val};
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, sizeof(buf));
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
        uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, sizeof(buf));
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
        reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
 }
 
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct tda998x_priv *priv =
+                       container_of(dwork, struct tda998x_priv, dwork);
+
+       if (priv->encoder && priv->encoder->dev)
+               drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
 /*
  * only 2 interrupts may occur: screen plug/unplug and EDID read
  */
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
                priv->wq_edid_wait = 0;
                wake_up(&priv->wq_edid);
        } else if (cec != 0) {                  /* HPD change */
-               if (priv->encoder && priv->encoder->dev)
-                       drm_helper_hpd_irq_event(priv->encoder->dev);
+               schedule_delayed_work(&priv->dwork, HZ/10);
        }
        return IRQ_HANDLED;
 }
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
        /* disable all IRQs and free the IRQ handler */
        cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
        reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-       if (priv->hdmi->irq)
+       if (priv->hdmi->irq) {
                free_irq(priv->hdmi->irq, priv);
+               cancel_delayed_work_sync(&priv->dwork);
+       }
 
        i2c_unregister_device(priv->cec);
 }
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        struct device_node *np = client->dev.of_node;
        u32 video;
        int rev_lo, rev_hi, ret;
+       unsigned short cec_addr;
 
        priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
        priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
        priv->current_page = 0xff;
        priv->hdmi = client;
-       priv->cec = i2c_new_dummy(client->adapter, 0x34);
+       /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+       cec_addr = 0x34 + (client->addr & 0x03);
+       priv->cec = i2c_new_dummy(client->adapter, cec_addr);
        if (!priv->cec)
                return -ENODEV;
 
        priv->dpms = DRM_MODE_DPMS_OFF;
 
+       mutex_init(&priv->mutex);       /* protect the page access */
+
        /* wake up the device: */
        cec_write(priv, REG_CEC_ENAMODS,
                        CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        if (client->irq) {
                int irqf_trigger;
 
-               /* init read EDID waitqueue */
+               /* init read EDID waitqueue and HDP work */
                init_waitqueue_head(&priv->wq_edid);
+               INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
 
                /* clear pending interrupts */
                reg_read(priv, REG_INT_FLAGS_0);
index 7d2ff31c35a5d0d37ac5302adcb1320cc41f2448..f86eb54e7763d65341006c4ff25f4e8f8076760c 100644 (file)
@@ -845,7 +845,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
index 4be2bb7cbef3058b49864fd7a6505e97e1017d6d..ce787a9f12c01fd1f8179d54ef610c27b9be131b 100644 (file)
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
index 74f06d5405913a7e78c18af9d365c6549b591f45..279801ca5110aff68d80ea452751d7f9b0bf748f 100644 (file)
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
                return r;
        rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
        rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+       rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
        rdev->asic->gart.set_page = &r100_pci_gart_set_page;
        return radeon_gart_table_ram_alloc(rdev);
 }
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
        WREG32(RADEON_AIC_HI_ADDR, 0);
 }
 
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+       return addr;
+}
+
 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
-                           uint64_t addr, uint32_t flags)
+                           uint64_t entry)
 {
        u32 *gtt = rdev->gart.ptr;
-       gtt[i] = cpu_to_le32(lower_32_bits(addr));
+       gtt[i] = cpu_to_le32(lower_32_bits(entry));
 }
 
 void r100_pci_gart_fini(struct radeon_device *rdev)
index 064ad5569ccaac826612aedd8d035db3996106db..08d68f3e13e9887ff7b06f18e899c34fd85ae31a 100644 (file)
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
 #define R300_PTE_WRITEABLE (1 << 2)
 #define R300_PTE_READABLE  (1 << 3)
 
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
-                             uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
-       void __iomem *ptr = rdev->gart.ptr;
-
        addr = (lower_32_bits(addr) >> 8) |
                ((upper_32_bits(addr) & 0xff) << 24);
        if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
                addr |= R300_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                addr |= R300_PTE_UNSNOOPED;
+       return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+                             uint64_t entry)
+{
+       void __iomem *ptr = rdev->gart.ptr;
+
        /* on x86 we want this to be CPU endian, on powerpc
         * on powerpc without HW swappers, it'll get swapped on way
         * into VRAM - so no need for cpu_to_le32 on VRAM tables */
-       writel(addr, ((void __iomem *)ptr) + (i * 4));
+       writel(entry, ((void __iomem *)ptr) + (i * 4));
 }
 
 int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
                DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
        rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
        rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+       rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
        rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
        return radeon_gart_table_vram_alloc(rdev);
 }
index 93e407b7e7a7d0235057904fab30e3c35d67f9a3..5587603b4a891c1f2cfcf7873dd8aa9e173907df 100644 (file)
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
  * Dummy page
  */
 struct radeon_dummy_page {
+       uint64_t        entry;
        struct page     *page;
        dma_addr_t      addr;
 };
@@ -645,7 +646,7 @@ struct radeon_gart {
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
        struct page                     **pages;
-       dma_addr_t                      *pages_addr;
+       uint64_t                        *pages_entry;
        bool                            ready;
 };
 
@@ -1858,8 +1859,9 @@ struct radeon_asic {
        /* gart */
        struct {
                void (*tlb_flush)(struct radeon_device *rdev);
+               uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
                void (*set_page)(struct radeon_device *rdev, unsigned i,
-                                uint64_t addr, uint32_t flags);
+                                uint64_t entry);
        } gart;
        struct {
                int (*init)(struct radeon_device *rdev);
@@ -2867,7 +2869,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
index f811ee14a237dba8d942a65ece9e5b9d9f1c8b1c..c0ecd128b14bf584964b0787637740ff76ba845c 100644 (file)
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
                DRM_INFO("Forcing AGP to PCIE mode\n");
                rdev->flags |= RADEON_IS_PCIE;
                rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+               rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
                rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
        } else {
                DRM_INFO("Forcing AGP to PCI mode\n");
                rdev->flags |= RADEON_IS_PCI;
                rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+               rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
                rdev->asic->gart.set_page = &r100_pci_gart_set_page;
        }
        rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
        .mc_wait_for_idle = &r100_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
        .mc_wait_for_idle = &r100_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
        .mc_wait_for_idle = &rs400_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs400_gart_tlb_flush,
+               .get_page_entry = &rs400_gart_get_page_entry,
                .set_page = &rs400_gart_set_page,
        },
        .ring = {
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
        .mc_wait_for_idle = &rs600_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs600_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -689,6 +698,7 @@ static struct radeon_asic rs690_asic = {
        .mc_wait_for_idle = &rs690_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs400_gart_tlb_flush,
+               .get_page_entry = &rs400_gart_get_page_entry,
                .set_page = &rs400_gart_set_page,
        },
        .ring = {
@@ -755,6 +765,7 @@ static struct radeon_asic rv515_asic = {
        .mc_wait_for_idle = &rv515_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -821,6 +832,7 @@ static struct radeon_asic r520_asic = {
        .mc_wait_for_idle = &r520_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -915,6 +927,7 @@ static struct radeon_asic r600_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -998,6 +1011,7 @@ static struct radeon_asic rv6xx_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1087,6 +1101,7 @@ static struct radeon_asic rs780_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1189,6 +1204,7 @@ static struct radeon_asic rv770_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1305,6 +1321,7 @@ static struct radeon_asic evergreen_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1395,6 +1412,7 @@ static struct radeon_asic sumo_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1484,6 +1502,7 @@ static struct radeon_asic btc_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1617,6 +1636,7 @@ static struct radeon_asic cayman_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cayman_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -1718,6 +1738,7 @@ static struct radeon_asic trinity_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cayman_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -1849,6 +1870,7 @@ static struct radeon_asic si_asic = {
        .get_gpu_clock_counter = &si_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &si_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -2012,6 +2034,7 @@ static struct radeon_asic ci_asic = {
        .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cik_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -2121,6 +2144,7 @@ static struct radeon_asic kv_asic = {
        .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cik_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
index 4045a320a42430d42e898b1a1357660139772bda..72bdd3bf0d8e1208e6eace5d4c9cd2735f99e110 100644 (file)
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r100_asic_reset(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
-                           uint64_t addr, uint32_t flags);
+                           uint64_t entry);
 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
 int r100_irq_set(struct radeon_device *rdev);
 int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
                                struct radeon_fence *fence);
 extern int r300_cs_parse(struct radeon_cs_parser *p);
 extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
 extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
-                                    uint64_t addr, uint32_t flags);
+                                    uint64_t entry);
 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
 extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
 extern int rs400_suspend(struct radeon_device *rdev);
 extern int rs400_resume(struct radeon_device *rdev);
 void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags);
+                        uint64_t entry);
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
 void rs600_irq_disable(struct radeon_device *rdev);
 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags);
+                        uint64_t entry);
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs600_bandwidth_update(struct radeon_device *rdev);
index 0ec65168f331c73bcbfff24ee719cd6f98790602..bd7519fdd3f431cbce8c2bc6bd3e588e525be5cd 100644 (file)
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
                rdev->dummy_page.page = NULL;
                return -ENOMEM;
        }
+       rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+                                                           RADEON_GART_PAGE_DUMMY);
        return 0;
 }
 
index 84146d5901aa5aacad168255c14d72cafce5cf15..5450fa95a47efdcde9aa664c740cbe578e4f5b26 100644 (file)
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
                radeon_bo_unpin(rdev->gart.robj);
        radeon_bo_unreserve(rdev->gart.robj);
        rdev->gart.table_addr = gpu_addr;
+
+       if (!r) {
+               int i;
+
+               /* We might have dropped some GART table updates while it wasn't
+                * mapped, restore all entries
+                */
+               for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+                       radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+               mb();
+               radeon_gart_tlb_flush(rdev);
+       }
+
        return r;
 }
 
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        unsigned t;
        unsigned p;
        int i, j;
-       u64 page_base;
 
        if (!rdev->gart.ready) {
                WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        for (i = 0; i < pages; i++, p++) {
                if (rdev->gart.pages[p]) {
                        rdev->gart.pages[p] = NULL;
-                       rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
-                       page_base = rdev->gart.pages_addr[p];
                        for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+                               rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
                                if (rdev->gart.ptr) {
-                                       radeon_gart_set_page(rdev, t, page_base,
-                                                            RADEON_GART_PAGE_DUMMY);
+                                       radeon_gart_set_page(rdev, t,
+                                                            rdev->dummy_page.entry);
                                }
-                               page_base += RADEON_GPU_PAGE_SIZE;
                        }
                }
        }
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
 {
        unsigned t;
        unsigned p;
-       uint64_t page_base;
+       uint64_t page_base, page_entry;
        int i, j;
 
        if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               rdev->gart.pages_addr[p] = dma_addr[i];
                rdev->gart.pages[p] = pagelist[i];
-               if (rdev->gart.ptr) {
-                       page_base = rdev->gart.pages_addr[p];
-                       for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-                               radeon_gart_set_page(rdev, t, page_base, flags);
-                               page_base += RADEON_GPU_PAGE_SIZE;
+               page_base = dma_addr[i];
+               for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+                       page_entry = radeon_gart_get_page_entry(page_base, flags);
+                       rdev->gart.pages_entry[t] = page_entry;
+                       if (rdev->gart.ptr) {
+                               radeon_gart_set_page(rdev, t, page_entry);
                        }
+                       page_base += RADEON_GPU_PAGE_SIZE;
                }
        }
        mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
-       rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
-                                       rdev->gart.num_cpu_pages);
-       if (rdev->gart.pages_addr == NULL) {
+       rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+                                        rdev->gart.num_gpu_pages);
+       if (rdev->gart.pages_entry == NULL) {
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
        /* set GART entry to point to the dummy page by default */
-       for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
-               rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
-       }
+       for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+               rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
        return 0;
 }
 
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
  */
 void radeon_gart_fini(struct radeon_device *rdev)
 {
-       if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+       if (rdev->gart.ready) {
                /* unbind pages */
                radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
        }
        rdev->gart.ready = false;
        vfree(rdev->gart.pages);
-       vfree(rdev->gart.pages_addr);
+       vfree(rdev->gart.pages_entry);
        rdev->gart.pages = NULL;
-       rdev->gart.pages_addr = NULL;
+       rdev->gart.pages_entry = NULL;
 
        radeon_dummy_page_fini(rdev);
 }
index 7b274205eeaf05d0e2001848a4449091d0962db2..061eaa9c19c7c0d9add6d7fbf84145fc12afb5b7 100644 (file)
@@ -392,7 +392,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
                                uint32_t hpd_size, uint64_t hpd_gpu_addr)
 {
-       uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+       uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
        uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
 
        lock_srbm(kgd, mec, pipe, 0, 0);
index cde48c42b30ad4b63c27dde6741f8637840670fc..06d2246d07f19a086cddced0d1c9ffd32a2f86fd 100644 (file)
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
        uint64_t result;
 
        /* page table offset */
-       result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
-       /* in case cpu page size != gpu page size*/
-       result |= addr & (~PAGE_MASK);
+       result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+       result &= ~RADEON_GPU_PAGE_MASK;
 
        return result;
 }
index c5799f16aa4b2f27157b3946ec4c7b65ac85011f..34e3235f41d2bf63a5333e33f63e2518f3668942 100644 (file)
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
 #define RS400_PTE_WRITEABLE (1 << 2)
 #define RS400_PTE_READABLE  (1 << 3)
 
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
        uint32_t entry;
-       u32 *gtt = rdev->gart.ptr;
 
        entry = (lower_32_bits(addr) & PAGE_MASK) |
                ((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
                entry |= RS400_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                entry |= RS400_PTE_UNSNOOPED;
-       entry = cpu_to_le32(entry);
-       gtt[i] = entry;
+       return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+                        uint64_t entry)
+{
+       u32 *gtt = rdev->gart.ptr;
+       gtt[i] = cpu_to_le32(lower_32_bits(entry));
 }
 
 int rs400_mc_wait_for_idle(struct radeon_device *rdev)
index 68f154a451c014321259591dcc2e2462c5d95344..d81182ad53ec6920b4f7027595882cebf26cdcfc 100644 (file)
@@ -626,11 +626,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
        radeon_gart_table_vram_free(rdev);
 }
 
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
-       void __iomem *ptr = (void *)rdev->gart.ptr;
-
        addr = addr & 0xFFFFFFFFFFFFF000ULL;
        addr |= R600_PTE_SYSTEM;
        if (flags & RADEON_GART_PAGE_VALID)
@@ -641,7 +638,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
                addr |= R600_PTE_WRITEABLE;
        if (flags & RADEON_GART_PAGE_SNOOP)
                addr |= R600_PTE_SNOOPED;
-       writeq(addr, ptr + (i * 8));
+       return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+                        uint64_t entry)
+{
+       void __iomem *ptr = (void *)rdev->gart.ptr;
+       writeq(entry, ptr + (i * 8));
 }
 
 int rs600_irq_set(struct radeon_device *rdev)
index aa7b872b2c438d28101f081c920715b651917f1d..83207929fc627f9a813acada5740c9ad14bb6bf1 100644 (file)
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
index 7b5d22110f25e7619c37eac7fd66858fa25b93e8..6c6b655defcf4eac679913e70896810208dfd6ce 100644 (file)
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
                if (unlikely(ret != 0))
                        --dev_priv->num_3d_resources;
        } else if (unhide_svga) {
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_ENABLE,
                          vmw_read(dev_priv, SVGA_REG_ENABLE) &
                          ~SVGA_REG_ENABLE_HIDE);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
        mutex_lock(&dev_priv->release_mutex);
        if (unlikely(--dev_priv->num_3d_resources == 0))
                vmw_release_device(dev_priv);
-       else if (hide_svga) {
-               mutex_lock(&dev_priv->hw_mutex);
+       else if (hide_svga)
                vmw_write(dev_priv, SVGA_REG_ENABLE,
                          vmw_read(dev_priv, SVGA_REG_ENABLE) |
                          SVGA_REG_ENABLE_HIDE);
-               mutex_unlock(&dev_priv->hw_mutex);
-       }
 
        n3d = (int32_t) dev_priv->num_3d_resources;
        mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->dev = dev;
        dev_priv->vmw_chipset = chipset;
        dev_priv->last_read_seqno = (uint32_t) -100;
-       mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
        mutex_init(&dev_priv->binding_mutex);
        rwlock_init(&dev_priv->resource_lock);
        ttm_lock_init(&dev_priv->reservation_sem);
+       spin_lock_init(&dev_priv->hw_lock);
+       spin_lock_init(&dev_priv->waiter_lock);
+       spin_lock_init(&dev_priv->cap_lock);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i) {
                idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev_priv->enable_fb = enable_fbdev;
 
-       mutex_lock(&dev_priv->hw_mutex);
-
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
        svga_id = vmw_read(dev_priv, SVGA_REG_ID);
        if (svga_id != SVGA_ID_2) {
                ret = -ENOSYS;
                DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
-               mutex_unlock(&dev_priv->hw_mutex);
                goto out_err0;
        }
 
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                dev_priv->prim_bb_mem = dev_priv->vram_size;
 
        ret = vmw_dma_masks(dev_priv);
-       if (unlikely(ret != 0)) {
-               mutex_unlock(&dev_priv->hw_mutex);
+       if (unlikely(ret != 0))
                goto out_err0;
-       }
 
        /*
         * Limit back buffer size to VRAM size.  Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv->prim_bb_mem > dev_priv->vram_size)
                dev_priv->prim_bb_mem = dev_priv->vram_size;
 
-       mutex_unlock(&dev_priv->hw_mutex);
-
        vmw_print_capabilities(dev_priv->capabilities);
 
        if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
                if (unlikely(ret != 0))
                        return ret;
                vmw_kms_save_vga(dev_priv);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
        if (!dev_priv->enable_fb) {
                vmw_kms_restore_vga(dev_priv);
                vmw_3d_resource_dec(dev_priv, true);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
        return ret;
 }
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
                        DRM_ERROR("Unable to clean VRAM on master drop.\n");
                vmw_kms_restore_vga(dev_priv);
                vmw_3d_resource_dec(dev_priv, true);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       mutex_lock(&dev_priv->hw_mutex);
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
        (void) vmw_read(dev_priv, SVGA_REG_ID);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        /**
         * Reclaim 3d reference held by fbdev and potentially
index 4ee799b43d5dfc40a8077e60e9f34b17a8dddcf9..d26a6daa9719a23542cb8c575691f1d63851dba4 100644 (file)
@@ -399,7 +399,8 @@ struct vmw_private {
        uint32_t memory_size;
        bool has_gmr;
        bool has_mob;
-       struct mutex hw_mutex;
+       spinlock_t hw_lock;
+       spinlock_t cap_lock;
 
        /*
         * VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
        atomic_t marker_seq;
        wait_queue_head_t fence_queue;
        wait_queue_head_t fifo_queue;
-       int fence_queue_waiters; /* Protected by hw_mutex */
-       int goal_queue_waiters; /* Protected by hw_mutex */
+       spinlock_t waiter_lock;
+       int fence_queue_waiters; /* Protected by waiter_lock */
+       int goal_queue_waiters; /* Protected by waiter_lock */
        atomic_t fifo_queue_waiters;
        uint32_t last_read_seqno;
        spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
        return (struct vmw_master *) master->driver_priv;
 }
 
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
 static inline void vmw_write(struct vmw_private *dev_priv,
                             unsigned int offset, uint32_t value)
 {
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
        outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
        outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+       spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
 }
 
 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
                                unsigned int offset)
 {
-       uint32_t val;
+       unsigned long irq_flags;
+       u32 val;
 
+       spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
        outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
        val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+       spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
        return val;
 }
 
index b7594cb758afc4299493122f4f6e44c68ef4d919..945f1e0dad9278145eed43708cb820dc54a566a3 100644 (file)
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
        struct vmw_private *dev_priv;
        spinlock_t lock;
        struct list_head fence_list;
-       struct work_struct work, ping_work;
+       struct work_struct work;
        u32 user_fence_size;
        u32 fence_size;
        u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
        return "svga";
 }
 
-static void vmw_fence_ping_func(struct work_struct *work)
-{
-       struct vmw_fence_manager *fman =
-               container_of(work, struct vmw_fence_manager, ping_work);
-
-       vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
 static bool vmw_fence_enable_signaling(struct fence *f)
 {
        struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
        if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
                return false;
 
-       if (mutex_trylock(&dev_priv->hw_mutex)) {
-               vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
-               mutex_unlock(&dev_priv->hw_mutex);
-       } else
-               schedule_work(&fman->ping_work);
+       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 
        return true;
 }
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
        INIT_LIST_HEAD(&fman->fence_list);
        INIT_LIST_HEAD(&fman->cleanup_list);
        INIT_WORK(&fman->work, &vmw_fence_work_func);
-       INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
        fman->fifo_down = true;
        fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
        fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
        bool lists_empty;
 
        (void) cancel_work_sync(&fman->work);
-       (void) cancel_work_sync(&fman->ping_work);
 
        spin_lock_irqsave(&fman->lock, irq_flags);
        lists_empty = list_empty(&fman->fence_list) &&
index 09e10aefcd8eb94e6a22182b59591cdb30cbcd36..39f2b03888e7e5b7beb107cd0a32aa0345a328be 100644 (file)
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
                if (!dev_priv->has_mob)
                        return false;
 
-               mutex_lock(&dev_priv->hw_mutex);
+               spin_lock(&dev_priv->cap_lock);
                vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
                result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
-               mutex_unlock(&dev_priv->hw_mutex);
+               spin_unlock(&dev_priv->cap_lock);
 
                return (result != 0);
        }
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
        DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
 
-       mutex_lock(&dev_priv->hw_mutex);
        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
        dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        mb();
 
        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        max = ioread32(fifo_mem + SVGA_FIFO_MAX);
        min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        return vmw_fifo_send_fence(dev_priv, &dummy);
 }
 
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       static DEFINE_SPINLOCK(ping_lock);
+       unsigned long irq_flags;
 
+       /*
+        * The ping_lock is needed because we don't have an atomic
+        * test-and-set of the SVGA_FIFO_BUSY register.
+        */
+       spin_lock_irqsave(&ping_lock, irq_flags);
        if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
                iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
        }
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
-       mutex_lock(&dev_priv->hw_mutex);
-
-       vmw_fifo_ping_host_locked(dev_priv, reason);
-
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock_irqrestore(&ping_lock, irq_flags);
 }
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
-       mutex_lock(&dev_priv->hw_mutex);
-
        vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
                ;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        vmw_write(dev_priv, SVGA_REG_TRACES,
                  dev_priv->traces_state);
 
-       mutex_unlock(&dev_priv->hw_mutex);
        vmw_marker_queue_takedown(&fifo->marker_queue);
 
        if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
                return vmw_fifo_wait_noirq(dev_priv, bytes,
                                           interruptible, timeout);
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
                outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 
        if (interruptible)
                ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
        else if (likely(ret > 0))
                ret = 0;
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
                dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 
        return ret;
 }
index 37881ecf5d7a9f74c49d4e1c018abc1cc2a9dffd..69c8ce23123c96af22c44011ff2b8fcdab837584 100644 (file)
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
                (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
        compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->cap_lock);
        for (i = 0; i < max_size; ++i) {
                vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
                compat_cap->pairs[i][0] = i;
                compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->cap_lock);
 
        return 0;
 }
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                if (num > SVGA3D_DEVCAP_MAX)
                        num = SVGA3D_DEVCAP_MAX;
 
-               mutex_lock(&dev_priv->hw_mutex);
+               spin_lock(&dev_priv->cap_lock);
                for (i = 0; i < num; ++i) {
                        vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
                        *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
                }
-               mutex_unlock(&dev_priv->hw_mutex);
+               spin_unlock(&dev_priv->cap_lock);
        } else if (gb_objects) {
                ret = vmw_fill_compat_cap(dev_priv, bounce, size);
                if (unlikely(ret != 0))
index 0c423766c44119ca923825e879e3d05b7058cc90..9fe9827ee499c177e50735d1acbf84d13eb606f0 100644 (file)
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
 
 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 {
-       uint32_t busy;
 
-       mutex_lock(&dev_priv->hw_mutex);
-       busy = vmw_read(dev_priv, SVGA_REG_BUSY);
-       mutex_unlock(&dev_priv->hw_mutex);
-
-       return (busy == 0);
+       return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
 }
 
 void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 
 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (dev_priv->fence_queue_waiters++ == 0) {
                unsigned long irq_flags;
 
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (--dev_priv->fence_queue_waiters == 0) {
                unsigned long irq_flags;
 
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 
 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (dev_priv->goal_queue_waiters++ == 0) {
                unsigned long irq_flags;
 
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (--dev_priv->goal_queue_waiters == 0) {
                unsigned long irq_flags;
 
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
                return;
 
-       mutex_lock(&dev_priv->hw_mutex);
        vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
        outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
index 3725b521d9319c9b952bf3920bfcd3a27c61dac3..8725b79e7847d68239a25413c482883e44024704 100644 (file)
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct vmw_display_unit *du = vmw_connector_to_du(connector);
 
-       mutex_lock(&dev_priv->hw_mutex);
        num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        return ((vmw_connector_to_du(connector)->unit < num_displays &&
                 du->pref_active) ?
index 6529c09c46f0fe99a02ddfbcd5a643954e1d6e22..a7de26d1ac801383e2ecc57acad47f082c016ce4 100644 (file)
@@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON
          for those channels specified in the map.  This map can be provided
          either via platform data or the device tree bindings.
 
+config SENSORS_I5500
+       tristate "Intel 5500/5520/X58 temperature sensor"
+       depends on X86 && PCI
+       help
+         If you say yes here you get support for the temperature
+         sensor inside the Intel 5500, 5520 and X58 chipsets.
+
+         This driver can also be built as a module. If so, the module
+         will be called i5500_temp.
+
 config SENSORS_CORETEMP
        tristate "Intel Core/Core2/Atom temperature sensor"
        depends on X86
index 67280643bcf009e5b4af58bcf61787f963f3c043..6c941472e707a51b2dfb5f6889aea07bad4ff5b7 100644 (file)
@@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN)        += gpio-fan.o
 obj-$(CONFIG_SENSORS_HIH6130)  += hih6130.o
 obj-$(CONFIG_SENSORS_HTU21)    += htu21.o
 obj-$(CONFIG_SENSORS_ULTRA45)  += ultra45_env.o
+obj-$(CONFIG_SENSORS_I5500)    += i5500_temp.o
 obj-$(CONFIG_SENSORS_I5K_AMB)  += i5k_amb.o
 obj-$(CONFIG_SENSORS_IBMAEM)   += ibmaem.o
 obj-$(CONFIG_SENSORS_IBMPEX)   += ibmpex.o
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
new file mode 100644 (file)
index 0000000..3e3ccbf
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor
+ *
+ * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Register definitions from datasheet */
+#define REG_TSTHRCATA  0xE2
+#define REG_TSCTRL     0xE8
+#define REG_TSTHRRPEX  0xEB
+#define REG_TSTHRLO    0xEC
+#define REG_TSTHRHI    0xEE
+#define REG_CTHINT     0xF0
+#define REG_TSFSC      0xF3
+#define REG_CTSTS      0xF4
+#define REG_TSTHRRQPI  0xF5
+#define REG_CTCTRL     0xF7
+#define REG_TSTIMER    0xF8
+
+/*
+ * Sysfs stuff
+ */
+
+/* Sensor resolution : 0.5 degree C */
+static ssize_t show_temp(struct device *dev,
+                        struct device_attribute *devattr, char *buf)
+{
+       struct pci_dev *pdev = to_pci_dev(dev->parent);
+       long temp;
+       u16 tsthrhi;
+       s8 tsfsc;
+
+       pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
+       pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+       temp = ((long)tsthrhi - tsfsc) * 500;
+
+       return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_thresh(struct device *dev,
+                          struct device_attribute *devattr, char *buf)
+{
+       struct pci_dev *pdev = to_pci_dev(dev->parent);
+       int reg = to_sensor_dev_attr(devattr)->index;
+       long temp;
+       u16 tsthr;
+
+       pci_read_config_word(pdev, reg, &tsthr);
+       temp = tsthr * 500;
+
+       return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_alarm(struct device *dev,
+                         struct device_attribute *devattr, char *buf)
+{
+       struct pci_dev *pdev = to_pci_dev(dev->parent);
+       int nr = to_sensor_dev_attr(devattr)->index;
+       u8 ctsts;
+
+       pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+       return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+
+static struct attribute *i5500_temp_attrs[] = {
+       &dev_attr_temp1_input.attr,
+       &sensor_dev_attr_temp1_crit.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+       &sensor_dev_attr_temp1_max.dev_attr.attr,
+       &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(i5500_temp);
+
+static const struct pci_device_id i5500_temp_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
+       { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, i5500_temp_ids);
+
+static int i5500_temp_probe(struct pci_dev *pdev,
+                           const struct pci_device_id *id)
+{
+       int err;
+       struct device *hwmon_dev;
+       u32 tstimer;
+       s8 tsfsc;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to enable device\n");
+               return err;
+       }
+
+       pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+       pci_read_config_dword(pdev, REG_TSTIMER, &tstimer);
+       if (tsfsc == 0x7F && tstimer == 0x07D30D40) {
+               dev_notice(&pdev->dev, "Sensor seems to be disabled\n");
+               return -ENODEV;
+       }
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+                                                          "intel5500", NULL,
+                                                          i5500_temp_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct pci_driver i5500_temp_driver = {
+       .name = "i5500_temp",
+       .id_table = i5500_temp_ids,
+       .probe = i5500_temp_probe,
+};
+
+module_pci_driver(i5500_temp_driver);
+
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
+MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver");
+MODULE_LICENSE("GPL");
index d111ac779c4058e94c6051536cdb7fb5d2d34863..63cd031b2c28d40c9c1296ae466005a5d48843d3 100644 (file)
@@ -28,7 +28,7 @@
 #define AT91_AIC_IRQ_MIN_PRIORITY      0
 #define AT91_AIC_IRQ_MAX_PRIORITY      7
 
-#define AT91_AIC_SRCTYPE               GENMASK(7, 6)
+#define AT91_AIC_SRCTYPE               GENMASK(6, 5)
 #define AT91_AIC_SRCTYPE_LOW           (0 << 5)
 #define AT91_AIC_SRCTYPE_FALLING       (1 << 5)
 #define AT91_AIC_SRCTYPE_HIGH          (2 << 5)
@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
                return -EINVAL;
        }
 
-       *val &= AT91_AIC_SRCTYPE;
+       *val &= ~AT91_AIC_SRCTYPE;
        *val |= aic_type;
 
        return 0;
index 86e4684adeb12d8f8f493dfaa686310c3c0bc4de..d8996bdf0f61e95e45ee670e44e565d045fb9535 100644 (file)
@@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
         * of two entries. No, the architecture doesn't let you
         * express an ITT with a single entry.
         */
-       nr_ites = max(2, roundup_pow_of_two(nvecs));
+       nr_ites = max(2UL, roundup_pow_of_two(nvecs));
        sz = nr_ites * its->ite_size;
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kmalloc(sz, GFP_KERNEL);
index 29b8f21b74d0a5868b7ee4034bee285c07bb332b..6bc2deb73d533b3a226f66c2ef17ed2aa3eaf3bb 100644 (file)
@@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
         * It will be refined as each CPU probes its ID.
         */
        for (i = 0; i < NR_HIP04_CPU_IF; i++)
-               hip04_cpu_map[i] = 0xff;
+               hip04_cpu_map[i] = 0xffff;
 
        /*
         * Find out how many interrupts are supported.
index 7e342df6a62f58be6e5fcc6a60b71cc81ed15b9c..0b0d2c00a2df8dfda1000cbd92e7d5b1bad824ae 100644 (file)
@@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
                return -ENOMEM;
 
        chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
-       if (!chip_data->intpol_base) {
+       if (IS_ERR(chip_data->intpol_base)) {
                pr_err("mtk_sysirq: unable to map sysirq register\n");
-               ret = -ENOMEM;
+               ret = PTR_ERR(chip_data->intpol_base);
                goto out_free;
        }
 
index 28718d3e8281032d422d2c9985d7ca450b7763a1..c03f140acbaebf9b29c55548833ffe60a7163907 100644 (file)
@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
        return ret;
 }
 
-static int __init omap_init_irq_legacy(u32 base)
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
 {
        int j, irq_base;
 
@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
                irq_base = 0;
        }
 
-       domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
+       domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
                        &irq_domain_simple_ops, NULL);
 
        omap_irq_soft_reset();
@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
 {
        int ret;
 
-       if (node)
+       /*
+        * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+        * depends is still not ready for linear IRQ domains; because of that
+        * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+        * linear IRQ Domain until that driver is finally fixed.
+        */
+       if (of_device_is_compatible(node, "ti,omap2-intc") ||
+                       of_device_is_compatible(node, "ti,omap3-intc")) {
+               struct resource res;
+
+               if (of_address_to_resource(node, 0, &res))
+                       return -ENOMEM;
+
+               base = res.start;
+               ret = omap_init_irq_legacy(base, node);
+       } else if (node) {
                ret = omap_init_irq_of(node);
-       else
-               ret = omap_init_irq_legacy(base);
+       } else {
+               ret = omap_init_irq_legacy(base, NULL);
+       }
 
        if (ret == 0)
                omap_irq_enable_protection();
index 9fc616c2755ed752a50930e1095df8a7a34303f2..21b156242e42668b5f8a1b43910374fe2c7a77f6 100644 (file)
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
 } __packed;
 
 struct dm_cache_metadata {
+       atomic_t ref_count;
+       struct list_head list;
+
        struct block_device *bdev;
        struct dm_block_manager *bm;
        struct dm_space_map *metadata_sm;
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
 
 /*----------------------------------------------------------------*/
 
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
-                                                sector_t data_block_size,
-                                                bool may_format_device,
-                                                size_t policy_hint_size)
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+                                              sector_t data_block_size,
+                                              bool may_format_device,
+                                              size_t policy_hint_size)
 {
        int r;
        struct dm_cache_metadata *cmd;
@@ -683,6 +686,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
                return NULL;
        }
 
+       atomic_set(&cmd->ref_count, 1);
        init_rwsem(&cmd->root_lock);
        cmd->bdev = bdev;
        cmd->data_block_size = data_block_size;
@@ -705,10 +709,95 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
        return cmd;
 }
 
+/*
+ * We keep a little list of ref counted metadata objects to prevent two
+ * different target instances creating separate bufio instances.  This is
+ * an issue if a table is reloaded before the suspend.
+ */
+static DEFINE_MUTEX(table_lock);
+static LIST_HEAD(table);
+
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
+{
+       struct dm_cache_metadata *cmd;
+
+       list_for_each_entry(cmd, &table, list)
+               if (cmd->bdev == bdev) {
+                       atomic_inc(&cmd->ref_count);
+                       return cmd;
+               }
+
+       return NULL;
+}
+
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+                                               sector_t data_block_size,
+                                               bool may_format_device,
+                                               size_t policy_hint_size)
+{
+       struct dm_cache_metadata *cmd, *cmd2;
+
+       mutex_lock(&table_lock);
+       cmd = lookup(bdev);
+       mutex_unlock(&table_lock);
+
+       if (cmd)
+               return cmd;
+
+       cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+       if (cmd) {
+               mutex_lock(&table_lock);
+               cmd2 = lookup(bdev);
+               if (cmd2) {
+                       mutex_unlock(&table_lock);
+                       __destroy_persistent_data_objects(cmd);
+                       kfree(cmd);
+                       return cmd2;
+               }
+               list_add(&cmd->list, &table);
+               mutex_unlock(&table_lock);
+       }
+
+       return cmd;
+}
+
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
+{
+       if (cmd->data_block_size != data_block_size) {
+               DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+                     (unsigned long long) data_block_size,
+                     (unsigned long long) cmd->data_block_size);
+               return false;
+       }
+
+       return true;
+}
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+                                                sector_t data_block_size,
+                                                bool may_format_device,
+                                                size_t policy_hint_size)
+{
+       struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+                                                      may_format_device, policy_hint_size);
+       if (cmd && !same_params(cmd, data_block_size)) {
+               dm_cache_metadata_close(cmd);
+               return NULL;
+       }
+
+       return cmd;
+}
+
 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
 {
-       __destroy_persistent_data_objects(cmd);
-       kfree(cmd);
+       if (atomic_dec_and_test(&cmd->ref_count)) {
+               mutex_lock(&table_lock);
+               list_del(&cmd->list);
+               mutex_unlock(&table_lock);
+
+               __destroy_persistent_data_objects(cmd);
+               kfree(cmd);
+       }
 }
 
 /*
index 1e96d7889f51eaa08b7d65b04c1a43e063931708..e1650539cc2f826d9efe7f878352570bcc31e101 100644 (file)
@@ -221,7 +221,13 @@ struct cache {
        struct list_head need_commit_migrations;
        sector_t migration_threshold;
        wait_queue_head_t migration_wait;
-       atomic_t nr_migrations;
+       atomic_t nr_allocated_migrations;
+
+       /*
+        * The number of in flight migrations that are performing
+        * background io. eg, promotion, writeback.
+        */
+       atomic_t nr_io_migrations;
 
        wait_queue_head_t quiescing_wait;
        atomic_t quiescing;
@@ -258,7 +264,6 @@ struct cache {
        struct dm_deferred_set *all_io_ds;
 
        mempool_t *migration_pool;
-       struct dm_cache_migration *next_migration;
 
        struct dm_cache_policy *policy;
        unsigned policy_nr_args;
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
        dm_bio_prison_free_cell(cache->prison, cell);
 }
 
+static struct dm_cache_migration *alloc_migration(struct cache *cache)
+{
+       struct dm_cache_migration *mg;
+
+       mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+       if (mg) {
+               mg->cache = cache;
+               atomic_inc(&mg->cache->nr_allocated_migrations);
+       }
+
+       return mg;
+}
+
+static void free_migration(struct dm_cache_migration *mg)
+{
+       if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
+               wake_up(&mg->cache->migration_wait);
+
+       mempool_free(mg, mg->cache->migration_pool);
+}
+
 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
 {
        if (!p->mg) {
-               p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+               p->mg = alloc_migration(cache);
                if (!p->mg)
                        return -ENOMEM;
        }
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
                free_prison_cell(cache, p->cell1);
 
        if (p->mg)
-               mempool_free(p->mg, cache->migration_pool);
+               free_migration(p->mg);
 }
 
 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
  * Migration covers moving data from the origin device to the cache, or
  * vice versa.
  *--------------------------------------------------------------*/
-static void free_migration(struct dm_cache_migration *mg)
-{
-       mempool_free(mg, mg->cache->migration_pool);
-}
-
-static void inc_nr_migrations(struct cache *cache)
+static void inc_io_migrations(struct cache *cache)
 {
-       atomic_inc(&cache->nr_migrations);
+       atomic_inc(&cache->nr_io_migrations);
 }
 
-static void dec_nr_migrations(struct cache *cache)
+static void dec_io_migrations(struct cache *cache)
 {
-       atomic_dec(&cache->nr_migrations);
-
-       /*
-        * Wake the worker in case we're suspending the target.
-        */
-       wake_up(&cache->migration_wait);
+       atomic_dec(&cache->nr_io_migrations);
 }
 
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
        wake_worker(cache);
 }
 
-static void cleanup_migration(struct dm_cache_migration *mg)
+static void free_io_migration(struct dm_cache_migration *mg)
 {
-       struct cache *cache = mg->cache;
+       dec_io_migrations(mg->cache);
        free_migration(mg);
-       dec_nr_migrations(cache);
 }
 
 static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg)
                cell_defer(cache, mg->new_ocell, true);
        }
 
-       cleanup_migration(mg);
+       free_io_migration(mg);
 }
 
 static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
        if (mg->writeback) {
                clear_dirty(cache, mg->old_oblock, mg->cblock);
                cell_defer(cache, mg->old_ocell, false);
-               cleanup_migration(mg);
+               free_io_migration(mg);
                return;
 
        } else if (mg->demote) {
@@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
                                             mg->old_oblock);
                        if (mg->promote)
                                cell_defer(cache, mg->new_ocell, true);
-                       cleanup_migration(mg);
+                       free_io_migration(mg);
                        return;
                }
        } else {
                if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
                        DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
                        policy_remove_mapping(cache->policy, mg->new_oblock);
-                       cleanup_migration(mg);
+                       free_io_migration(mg);
                        return;
                }
        }
@@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
                } else {
                        if (mg->invalidate)
                                policy_remove_mapping(cache->policy, mg->old_oblock);
-                       cleanup_migration(mg);
+                       free_io_migration(mg);
                }
 
        } else {
@@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
                        bio_endio(mg->new_ocell->holder, 0);
                        cell_defer(cache, mg->new_ocell, false);
                }
-               cleanup_migration(mg);
+               free_io_migration(mg);
        }
 }
 
@@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = cell;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = NULL;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = new_ocell;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
        mg->new_ocell = NULL;
        mg->start_jiffies = jiffies;
 
-       inc_nr_migrations(cache);
+       inc_io_migrations(cache);
        quiesce_migration(mg);
 }
 
@@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
 
 static bool spare_migration_bandwidth(struct cache *cache)
 {
-       sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+       sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
                cache->sectors_per_block;
        return current_volume < cache->migration_threshold;
 }
@@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache)
 
 static void wait_for_migrations(struct cache *cache)
 {
-       wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+       wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
 }
 
 static void stop_worker(struct cache *cache)
@@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache)
 {
        unsigned i;
 
-       if (cache->next_migration)
-               mempool_free(cache->next_migration, cache->migration_pool);
-
        if (cache->migration_pool)
                mempool_destroy(cache->migration_pool);
 
@@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        INIT_LIST_HEAD(&cache->quiesced_migrations);
        INIT_LIST_HEAD(&cache->completed_migrations);
        INIT_LIST_HEAD(&cache->need_commit_migrations);
-       atomic_set(&cache->nr_migrations, 0);
+       atomic_set(&cache->nr_allocated_migrations, 0);
+       atomic_set(&cache->nr_io_migrations, 0);
        init_waitqueue_head(&cache->migration_wait);
 
        init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
                goto bad;
        }
 
-       cache->next_migration = NULL;
-
        cache->need_tick_bio = true;
        cache->sized = false;
        cache->invalidate = false;
index b98cd9d84435fe15ea1cb202850508f83b83204b..2caf5b374649afaecff37a5ab5153d7ccc5ec437 100644 (file)
@@ -206,6 +206,9 @@ struct mapped_device {
        /* zero-length flush that will be cloned and submitted to targets */
        struct bio flush_bio;
 
+       /* the number of internal suspends */
+       unsigned internal_suspend_count;
+
        struct dm_stats stats;
 };
 
@@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
 {
        struct dm_table *map = NULL;
 
-       if (dm_suspended_internally_md(md))
+       if (md->internal_suspend_count++)
                return; /* nested internal suspend */
 
        if (dm_suspended_md(md)) {
@@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
 
 static void __dm_internal_resume(struct mapped_device *md)
 {
-       if (!dm_suspended_internally_md(md))
+       BUG_ON(!md->internal_suspend_count);
+
+       if (--md->internal_suspend_count)
                return; /* resume from nested internal suspend */
 
        if (dm_suspended_md(md))
index db99ca2613ba422d20c60afcbe34a4750a9876c0..06931f6fa26cc0003db9789440ebf9c5277759df 100644 (file)
@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
                .portb          = CX23885_MPEG_DVB,
        },
        [CX23885_BOARD_HAUPPAUGE_HVR4400] = {
-               .name           = "Hauppauge WinTV-HVR4400",
+               .name           = "Hauppauge WinTV-HVR4400/HVR5500",
                .porta          = CX23885_ANALOG_VIDEO,
                .portb          = CX23885_MPEG_DVB,
                .portc          = CX23885_MPEG_DVB,
@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
                .tuner_addr     = 0x60, /* 0xc0 >> 1 */
                .tuner_bus      = 1,
        },
+       [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
+               .name           = "Hauppauge WinTV Starburst",
+               .portb          = CX23885_MPEG_DVB,
+       },
        [CX23885_BOARD_AVERMEDIA_HC81R] = {
                .name           = "AVerTV Hybrid Express Slim HC81R",
                .tuner_type     = TUNER_XC2028,
@@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = {
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc108,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc138,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc12a,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
        }, {
                .subvendor = 0x0070,
                .subdevice = 0xc1f8,
-               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400,
+               .card      = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
        }, {
                .subvendor = 0x1461,
                .subdevice = 0xd939,
@@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
                cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
                break;
        case CX23885_BOARD_HAUPPAUGE_HVR4400:
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
                /* GPIO-8 tda10071 demod reset */
-               /* GPIO-9 si2165 demod reset */
+               /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
 
                /* Put the parts into reset and back */
                cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
        case CX23885_BOARD_HAUPPAUGE_HVR1850:
        case CX23885_BOARD_HAUPPAUGE_HVR1290:
        case CX23885_BOARD_HAUPPAUGE_HVR4400:
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
        case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
                if (dev->i2c_bus[0].i2c_rc == 0)
                        hauppauge_eeprom(dev, eeprom+0xc0);
@@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
                ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
                ts2->src_sel_val   = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
                break;
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
+               ts1->gen_ctrl_val  = 0xc; /* Serial bus + punctured clock */
+               ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+               ts1->src_sel_val   = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+               break;
        case CX23885_BOARD_DVBSKY_T9580:
        case CX23885_BOARD_DVBSKY_T982:
                ts1->gen_ctrl_val  = 0x5; /* Parallel */
index 1d9d0f86ca8cbe3effb833fe7e43d26185aefb41..1ad49946d7fa9c1ef8f6cb71d8e91c85d566e4a6 100644 (file)
@@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
 
        cx23885_shutdown(dev);
 
-       pci_disable_device(pci_dev);
-
        /* unregister stuff */
        free_irq(pci_dev->irq, dev);
 
+       pci_disable_device(pci_dev);
+
        cx23885_dev_unregister(dev);
        vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
        v4l2_ctrl_handler_free(&dev->ctrl_handler);
index c47d18270cfc8899f262be3f338329e869bb4cb2..a9c450d4b54e4a7d52440b6af017d9068b27435c 100644 (file)
@@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port)
                        break;
                }
                break;
+       case CX23885_BOARD_HAUPPAUGE_STARBURST:
+               i2c_bus = &dev->i2c_bus[0];
+               fe0->dvb.frontend = dvb_attach(tda10071_attach,
+                                               &hauppauge_tda10071_config,
+                                               &i2c_bus->i2c_adap);
+               if (fe0->dvb.frontend != NULL) {
+                       dvb_attach(a8293_attach, fe0->dvb.frontend,
+                                  &i2c_bus->i2c_adap,
+                                  &hauppauge_a8293_config);
+               }
+               break;
        case CX23885_BOARD_DVBSKY_T9580:
        case CX23885_BOARD_DVBSKY_S950:
                i2c_bus = &dev->i2c_bus[0];
index f55cd12da0fde35b55ae5c3c75d56253f64fd9ac..36f2f96c40e4362713f7e2f06987af911d6472c2 100644 (file)
@@ -99,6 +99,7 @@
 #define CX23885_BOARD_DVBSKY_S950              49
 #define CX23885_BOARD_DVBSKY_S952              50
 #define CX23885_BOARD_DVBSKY_T982              51
+#define CX23885_BOARD_HAUPPAUGE_STARBURST      52
 
 #define GPIO_0 0x00000001
 #define GPIO_1 0x00000002
index b463fe172d164a0f14400c42f13f33aa9dbc82dd..3fe9047ef466faddcb822453e08282bf381c366b 100644 (file)
@@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
        strlcpy(cap->card, video->video.name, sizeof(cap->card));
        strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
 
+       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+               | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+
        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
-               cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+               cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
        else
-               cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+               cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
 
        return 0;
 }
index 8efe40337608951db5b72137506dd61e13e20b06..6d885239b16abf0b01b721de4f27cf924f6b7f4a 100644 (file)
@@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
 {
        strcpy(cap->driver, "atmel-isi");
        strcpy(cap->card, "Atmel Image Sensor Interface");
-       cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
-                               V4L2_CAP_STREAMING);
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
        return 0;
 }
 
index ce72bd26a6acaef29a740863eaae99232401ec6c..192377f55840b540e82ab26658830457b858f98f 100644 (file)
@@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the friendly caller:-> */
        strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index a60c3bb0e4ccdf952307142c2196333ee6842ab5..0b3299dee05d453c87a492cd610d66e507f8a7ba 100644 (file)
@@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the firendly caller:-> */
        strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index e6b93281f246c394eee1006deadb1bfac2a12be9..16f65ecb70a3e1fdf13d49cf002d2801559b83d5 100644 (file)
@@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the friendly caller:-> */
        strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index 951226af0ebacca484127cc7792802c9eeafa577..8d6e343fec0f28bf1dcc67d0ce96ff48dc6f7250 100644 (file)
@@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
 {
        /* cap->name is set by the firendly caller:-> */
        strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
 
        return 0;
 }
index 0c1f5564810627ec88242ac373600f03a854387d..9f1473c0a0cfa493227f50ac1920ae34818e670b 100644 (file)
@@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
                             struct v4l2_capability *cap)
 {
        strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
        return 0;
 }
 
index 8b27b3eb2b2538d7e75451ffff0c06d27d2cafd6..71787702d4a26cce7ba7a1b02f2ab367ed388db5 100644 (file)
@@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
                                  struct v4l2_capability *cap)
 {
        strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
-       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
        return 0;
 }
 
index 0f345b1f90145dd856c4830cecf82c710eeeaeeb..f327c49d7e0936baf1e529746e82166129423916 100644 (file)
@@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
                {
                        "Mygica T230 DVB-T/T2/C",
                        { NULL },
-                       { &cxusb_table[22], NULL },
+                       { &cxusb_table[20], NULL },
                },
        }
 };
index 1b158f1167ed0722793a8b58b990a1fc378e4a34..536210b39428c98ce1f8385b48ab1a13ba2cf7d2 100644 (file)
@@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
 module_param_array(vbi_nr, int, NULL, 0444);
 MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor");
 
-static struct v4l2_capability pvr_capability ={
-       .driver         = "pvrusb2",
-       .card           = "Hauppauge WinTV pvr-usb2",
-       .bus_info       = "usb",
-       .version        = LINUX_VERSION_CODE,
-       .capabilities   = (V4L2_CAP_VIDEO_CAPTURE |
-                          V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
-                          V4L2_CAP_READWRITE),
-};
-
 static struct v4l2_fmtdesc pvr_fmtdesc [] = {
        {
                .index          = 0,
@@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
        struct pvr2_v4l2_fh *fh = file->private_data;
        struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
 
-       memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+       strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver));
        strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
                        sizeof(cap->bus_info));
        strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+       cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+                           V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+                           V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
+       switch (fh->pdi->devbase.vfl_type) {
+       case VFL_TYPE_GRABBER:
+               cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+               break;
+       case VFL_TYPE_RADIO:
+               cap->device_caps = V4L2_CAP_RADIO;
+               break;
+       }
+       cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
        return 0;
 }
 
index d09a8916e94005180f0f6beaf0ff53d7b2e932a4..bc08a829bc132068c0b51661f9459293ef30c142 100644 (file)
@@ -3146,27 +3146,26 @@ static int vb2_thread(void *data)
                        prequeue--;
                } else {
                        call_void_qop(q, wait_finish, q);
-                       ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+                       if (!threadio->stop)
+                               ret = vb2_internal_dqbuf(q, &fileio->b, 0);
                        call_void_qop(q, wait_prepare, q);
                        dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
                }
-               if (threadio->stop)
-                       break;
-               if (ret)
+               if (ret || threadio->stop)
                        break;
                try_to_freeze();
 
                vb = q->bufs[fileio->b.index];
                if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
-                       ret = threadio->fnc(vb, threadio->priv);
-               if (ret)
-                       break;
+                       if (threadio->fnc(vb, threadio->priv))
+                               break;
                call_void_qop(q, wait_finish, q);
                if (set_timestamp)
                        v4l2_get_timestamp(&fileio->b.timestamp);
-               ret = vb2_internal_qbuf(q, &fileio->b);
+               if (!threadio->stop)
+                       ret = vb2_internal_qbuf(q, &fileio->b);
                call_void_qop(q, wait_prepare, q);
-               if (ret)
+               if (ret || threadio->stop)
                        break;
        }
 
@@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q)
        threadio->stop = true;
        vb2_internal_streamoff(q, q->type);
        call_void_qop(q, wait_prepare, q);
+       err = kthread_stop(threadio->thread);
        q->fileio = NULL;
        fileio->req.count = 0;
        vb2_reqbufs(q, &fileio->req);
        kfree(fileio);
-       err = kthread_stop(threadio->thread);
        threadio->thread = NULL;
        kfree(threadio);
        q->fileio = NULL;
index f94a9fa60488ed8e23523f5f8c3665133f039dbb..c672c4dcffac14349d20d362dd5b199dd9bcd95c 100644 (file)
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
 
        c_can_irq_control(priv, false);
 
+       /* put ctrl to init on stop to end ongoing transmission */
+       priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
+
        /* deactivate pins */
        pinctrl_pm_select_sleep_state(dev->dev.parent);
        priv->can.state = CAN_STATE_STOPPED;
index c32cd61073bcc71048899f0ccb04cbcef2fe293c..7af379ca861b11a1631764071bb31c8a8d4b8916 100644 (file)
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
                          usb_sndbulkpipe(dev->udev,
                                          dev->bulk_out->bEndpointAddress),
                          buf, msg->len,
-                         kvaser_usb_simple_msg_callback, priv);
+                         kvaser_usb_simple_msg_callback, netdev);
        usb_anchor_urb(urb, &priv->tx_submitted);
 
        err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
        priv = dev->nets[channel];
        stats = &priv->netdev->stats;
 
-       if (status & M16C_STATE_BUS_RESET) {
-               kvaser_usb_unlink_tx_urbs(priv);
-               return;
-       }
-
        skb = alloc_can_err_skb(priv->netdev, &cf);
        if (!skb) {
                stats->rx_dropped++;
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
        netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
 
-       if (status & M16C_STATE_BUS_OFF) {
+       if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
                cf->can_id |= CAN_ERR_BUSOFF;
 
                priv->can.can_stats.bus_off++;
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
                }
 
                new_state = CAN_STATE_ERROR_PASSIVE;
-       }
-
-       if (status == M16C_STATE_BUS_ERROR) {
+       } else if (status & M16C_STATE_BUS_ERROR) {
                if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
                    ((txerr >= 96) || (rxerr >= 96))) {
                        cf->can_id |= CAN_ERR_CRTL;
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
                        priv->can.can_stats.error_warning++;
                        new_state = CAN_STATE_ERROR_WARNING;
-               } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+               } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
+                          ((txerr < 96) && (rxerr < 96))) {
                        cf->can_id |= CAN_ERR_PROT;
                        cf->data[2] = CAN_ERR_PROT_ACTIVE;
 
@@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 {
        struct kvaser_usb *dev;
        int err = -ENOMEM;
-       int i;
+       int i, retry = 3;
 
        dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
@@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, dev);
 
-       err = kvaser_usb_get_software_info(dev);
+       /* On some x86 laptops, plugging a Kvaser device again after
+        * an unplug makes the firmware always ignore the very first
+        * command. For such a case, provide some room for retries
+        * instead of completely exiting the driver.
+        */
+       do {
+               err = kvaser_usb_get_software_info(dev);
+       } while (--retry && err == -ETIMEDOUT);
+
        if (err) {
                dev_err(&intf->dev,
                        "Cannot get software infos, error %d\n", err);
index 75b08c63d39f4a469a389c0327e1f3071932596f..29a09271b64a39b71a46ac1d5beb5a6472160509 100644 (file)
 #define MTL_Q_RQOMR                    0x40
 #define MTL_Q_RQMPOCR                  0x44
 #define MTL_Q_RQDR                     0x4c
+#define MTL_Q_RQFCR                    0x50
 #define MTL_Q_IER                      0x70
 #define MTL_Q_ISR                      0x74
 
 /* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX          1
+#define MTL_Q_RQFCR_RFA_WIDTH          6
+#define MTL_Q_RQFCR_RFD_INDEX          17
+#define MTL_Q_RQFCR_RFD_WIDTH          6
 #define MTL_Q_RQOMR_EHFC_INDEX         7
 #define MTL_Q_RQOMR_EHFC_WIDTH         1
-#define MTL_Q_RQOMR_RFA_INDEX          8
-#define MTL_Q_RQOMR_RFA_WIDTH          3
-#define MTL_Q_RQOMR_RFD_INDEX          13
-#define MTL_Q_RQOMR_RFD_WIDTH          3
 #define MTL_Q_RQOMR_RQS_INDEX          16
 #define MTL_Q_RQOMR_RQS_WIDTH          9
 #define MTL_Q_RQOMR_RSF_INDEX          5
index 53f5f66ec2ee43fe5533697860f70d49a458b919..4c66cd1d1e604f19a36e65fc88b6392ecfa66d3b 100644 (file)
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
 
        for (i = 0; i < pdata->rx_q_count; i++) {
                /* Activate flow control when less than 4k left in fifo */
-               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
 
                /* De-activate flow control when more than 6k left in fifo */
-               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
        }
 }
 
index 1d1147c93d5972147a9aa17650eeaadb0dda7066..e468ed3f210f8e1a121b3a47d6d7da1168063592 100644 (file)
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
                }
 #endif
                if (!bnx2x_fp_lock_napi(fp))
-                       return work_done;
+                       return budget;
 
                for_each_cos_in_tx_queue(fp, cos)
                        if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
index b29e027c476e538b93a7a10b9302deee1253c0b5..e356afa44e7d8400fdaa59c47dfa8294e1c0bb8c 100644 (file)
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        int err;
 
        if (!enic_poll_lock_napi(&enic->rq[rq]))
-               return work_done;
+               return budget;
        /* Service RQ
         */
 
index a62fc38f045e1b802730c95484c0b60a340ec1c6..1c75829eb1668fe094af3a9049fb53bb0c8b4bb5 100644 (file)
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
 #define IS_TSO_HEADER(txq, addr) \
        ((addr >= txq->tso_hdrs_dma) && \
         (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
 /*
  * RX/TX descriptors.
  */
@@ -362,6 +366,7 @@ struct tx_queue {
        dma_addr_t tso_hdrs_dma;
 
        struct tx_desc *tx_desc_area;
+       char *tx_desc_mapping; /* array to track the type of the dma mapping */
        dma_addr_t tx_desc_dma;
        int tx_desc_area_size;
 
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
        desc = &txq->tx_desc_area[tx_index];
+       txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 
        desc->l4i_chk = 0;
        desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
                skb_frag_t *this_frag;
                int tx_index;
                struct tx_desc *desc;
-               void *addr;
 
                this_frag = &skb_shinfo(skb)->frags[frag];
-               addr = page_address(this_frag->page.p) + this_frag->page_offset;
                tx_index = txq->tx_curr_desc++;
                if (txq->tx_curr_desc == txq->tx_ring_size)
                        txq->tx_curr_desc = 0;
                desc = &txq->tx_desc_area[tx_index];
+               txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
 
                /*
                 * The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 
                desc->l4i_chk = 0;
                desc->byte_cnt = skb_frag_size(this_frag);
-               desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
-                                              desc->byte_cnt, DMA_TO_DEVICE);
+               desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+                                                this_frag, 0, desc->byte_cnt,
+                                                DMA_TO_DEVICE);
        }
 }
 
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
        desc = &txq->tx_desc_area[tx_index];
+       txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 
        if (nr_frags) {
                txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                int tx_index;
                struct tx_desc *desc;
                u32 cmd_sts;
+               char desc_dma_map;
 
                tx_index = txq->tx_used_desc;
                desc = &txq->tx_desc_area[tx_index];
+               desc_dma_map = txq->tx_desc_mapping[tx_index];
+
                cmd_sts = desc->cmd_sts;
 
                if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                reclaimed++;
                txq->tx_desc_count--;
 
-               if (!IS_TSO_HEADER(txq, desc->buf_ptr))
-                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
-                                        desc->byte_cnt, DMA_TO_DEVICE);
+               if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+                       if (desc_dma_map == DESC_DMA_MAP_PAGE)
+                               dma_unmap_page(mp->dev->dev.parent,
+                                              desc->buf_ptr,
+                                              desc->byte_cnt,
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(mp->dev->dev.parent,
+                                                desc->buf_ptr,
+                                                desc->byte_cnt,
+                                                DMA_TO_DEVICE);
+               }
 
                if (cmd_sts & TX_ENABLE_INTERRUPT) {
                        struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
        struct tx_queue *txq = mp->txq + index;
        struct tx_desc *tx_desc;
        int size;
+       int ret;
        int i;
 
        txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
                                        nexti * sizeof(struct tx_desc);
        }
 
+       txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+                                      GFP_KERNEL);
+       if (!txq->tx_desc_mapping) {
+               ret = -ENOMEM;
+               goto err_free_desc_area;
+       }
+
        /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
        txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
                                           txq->tx_ring_size * TSO_HEADER_SIZE,
                                           &txq->tso_hdrs_dma, GFP_KERNEL);
        if (txq->tso_hdrs == NULL) {
-               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
-                                 txq->tx_desc_area, txq->tx_desc_dma);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_free_desc_mapping;
        }
        skb_queue_head_init(&txq->tx_skb);
 
        return 0;
+
+err_free_desc_mapping:
+       kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+       if (index == 0 && size <= mp->tx_desc_sram_size)
+               iounmap(txq->tx_desc_area);
+       else
+               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+                                 txq->tx_desc_area, txq->tx_desc_dma);
+       return ret;
 }
 
 static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
        else
                dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
                                  txq->tx_desc_area, txq->tx_desc_dma);
+       kfree(txq->tx_desc_mapping);
+
        if (txq->tso_hdrs)
                dma_free_coherent(mp->dev->dev.parent,
                                  txq->tx_ring_size * TSO_HEADER_SIZE,
index 613037584d08e785ef2700ca1d2221b50b256e9c..c531c8ae1be4e2ad3d509b9656369c3560f8ad78 100644 (file)
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
 
        work_done = netxen_process_rcv_ring(sds_ring, budget);
 
-       if ((work_done < budget) && tx_complete) {
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                if (test_bit(__NX_DEV_UP, &adapter->state))
                        netxen_nic_enable_int(sds_ring);
index 6576243222af74f593419e6e338cdd9387b949af..04283fe0e6a7248877e5dffc1de75e4dedf6748b 100644 (file)
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_ADRL31]    = 0x01fc,
 };
 
+static void sh_eth_rcv_snd_disable(struct net_device *ndev);
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+
 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
 {
        return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
        int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
        int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
        int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       dma_addr_t dma_addr;
 
        mdp->cur_rx = 0;
        mdp->cur_tx = 0;
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
                /* skb */
                mdp->rx_skbuff[i] = NULL;
                skb = netdev_alloc_skb(ndev, skbuff_size);
-               mdp->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
                sh_eth_set_receive_align(skb);
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
                rxdesc = &mdp->rx_ring[i];
                /* The size of the buffer is a multiple of 16 bytes. */
                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
-               dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
-                              DMA_FROM_DEVICE);
-               rxdesc->addr = virt_to_phys(skb->data);
+               dma_addr = dma_map_single(&ndev->dev, skb->data,
+                                         rxdesc->buffer_length,
+                                         DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                       kfree_skb(skb);
+                       break;
+               }
+               mdp->rx_skbuff[i] = skb;
+               rxdesc->addr = dma_addr;
                rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 
                /* Rx descriptor address set */
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
                     RFLR);
 
        sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
-       if (start)
+       if (start) {
+               mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+       }
 
        /* PAUSE Prohibition */
        val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        return ret;
 }
 
+static void sh_eth_dev_exit(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int i;
+
+       /* Deactivate all TX descriptors, so DMA should stop at next
+        * packet boundary if it's currently running
+        */
+       for (i = 0; i < mdp->num_tx_ring; i++)
+               mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
+
+       /* Disable TX FIFO egress to MAC */
+       sh_eth_rcv_snd_disable(ndev);
+
+       /* Stop RX DMA at next packet boundary */
+       sh_eth_write(ndev, 0, EDRRR);
+
+       /* Aside from TX DMA, we can't tell when the hardware is
+        * really stopped, so we need to reset to make sure.
+        * Before doing that, wait for long enough to *probably*
+        * finish transmitting the last packet and poll stats.
+        */
+       msleep(2); /* max frame time at 10 Mbps < 1250 us */
+       sh_eth_get_stats(ndev);
+       sh_eth_reset(ndev);
+}
+
 /* free Tx skb function */
 static int sh_eth_txfree(struct net_device *ndev)
 {
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        u16 pkt_len = 0;
        u32 desc_status;
        int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       dma_addr_t dma_addr;
 
        boguscnt = min(boguscnt, *quota);
        limit = boguscnt;
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        mdp->rx_skbuff[entry] = NULL;
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
-                       dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
-                                               ALIGN(mdp->rx_buf_sz, 16),
-                                               DMA_FROM_DEVICE);
+                       dma_unmap_single(&ndev->dev, rxdesc->addr,
+                                        ALIGN(mdp->rx_buf_sz, 16),
+                                        DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, ndev);
                        netif_receive_skb(skb);
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 
                if (mdp->rx_skbuff[entry] == NULL) {
                        skb = netdev_alloc_skb(ndev, skbuff_size);
-                       mdp->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        sh_eth_set_receive_align(skb);
-                       dma_map_single(&ndev->dev, skb->data,
-                                      rxdesc->buffer_length, DMA_FROM_DEVICE);
+                       dma_addr = dma_map_single(&ndev->dev, skb->data,
+                                                 rxdesc->buffer_length,
+                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                               kfree_skb(skb);
+                               break;
+                       }
+                       mdp->rx_skbuff[entry] = skb;
 
                        skb_checksum_none_assert(skb);
-                       rxdesc->addr = virt_to_phys(skb->data);
+                       rxdesc->addr = dma_addr;
                }
                if (entry >= mdp->num_rx_ring - 1)
                        rxdesc->status |=
@@ -1573,7 +1617,6 @@ ignore_link:
                if (intr_status & EESR_RFRMER) {
                        /* Receive Frame Overflow int */
                        ndev->stats.rx_frame_errors++;
-                       netif_err(mdp, rx_err, ndev, "Receive Abort\n");
                }
        }
 
@@ -1592,13 +1635,11 @@ ignore_link:
        if (intr_status & EESR_RDE) {
                /* Receive Descriptor Empty int */
                ndev->stats.rx_over_errors++;
-               netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
        }
 
        if (intr_status & EESR_RFE) {
                /* Receive FIFO Overflow int */
                ndev->stats.rx_fifo_errors++;
-               netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
        }
 
        if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
                ret = IRQ_HANDLED;
        else
-               goto other_irq;
+               goto out;
+
+       if (!likely(mdp->irq_enabled)) {
+               sh_eth_write(ndev, 0, EESIPR);
+               goto out;
+       }
 
        if (intr_status & EESR_RX_CHECK) {
                if (napi_schedule_prep(&mdp->napi)) {
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                sh_eth_error(ndev, intr_status);
        }
 
-other_irq:
+out:
        spin_unlock(&mdp->lock);
 
        return ret;
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
        napi_complete(napi);
 
        /* Reenable Rx interrupts */
-       sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+       if (mdp->irq_enabled)
+               sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 out:
        return budget - quota;
 }
@@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
                return -EINVAL;
 
        if (netif_running(ndev)) {
+               netif_device_detach(ndev);
                netif_tx_disable(ndev);
-               /* Disable interrupts by clearing the interrupt mask. */
-               sh_eth_write(ndev, 0x0000, EESIPR);
-               /* Stop the chip's Tx and Rx processes. */
-               sh_eth_write(ndev, 0, EDTRR);
-               sh_eth_write(ndev, 0, EDRRR);
+
+               /* Serialise with the interrupt handler and NAPI, then
+                * disable interrupts.  We have to clear the
+                * irq_enabled flag first to ensure that interrupts
+                * won't be re-enabled.
+                */
+               mdp->irq_enabled = false;
                synchronize_irq(ndev->irq);
-       }
+               napi_synchronize(&mdp->napi);
+               sh_eth_write(ndev, 0x0000, EESIPR);
 
-       /* Free all the skbuffs in the Rx queue. */
-       sh_eth_ring_free(ndev);
-       /* Free DMA buffer */
-       sh_eth_free_dma_buffer(mdp);
+               sh_eth_dev_exit(ndev);
+
+               /* Free all the skbuffs in the Rx queue. */
+               sh_eth_ring_free(ndev);
+               /* Free DMA buffer */
+               sh_eth_free_dma_buffer(mdp);
+       }
 
        /* Set new parameters */
        mdp->num_rx_ring = ring->rx_pending;
        mdp->num_tx_ring = ring->tx_pending;
 
-       ret = sh_eth_ring_init(ndev);
-       if (ret < 0) {
-               netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
-               return ret;
-       }
-       ret = sh_eth_dev_init(ndev, false);
-       if (ret < 0) {
-               netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
-               return ret;
-       }
-
        if (netif_running(ndev)) {
+               ret = sh_eth_ring_init(ndev);
+               if (ret < 0) {
+                       netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
+                                  __func__);
+                       return ret;
+               }
+               ret = sh_eth_dev_init(ndev, false);
+               if (ret < 0) {
+                       netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
+                                  __func__);
+                       return ret;
+               }
+
+               mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
                /* Setting the Rx mode will start the Rx process. */
                sh_eth_write(ndev, EDRRR_R, EDRRR);
-               netif_wake_queue(ndev);
+               netif_device_attach(ndev);
        }
 
        return 0;
@@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        spin_unlock_irqrestore(&mdp->lock, flags);
 
+       if (skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
        entry = mdp->cur_tx % mdp->num_tx_ring;
        mdp->tx_skbuff[entry] = skb;
        txdesc = &mdp->tx_ring[entry];
@@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                 skb->len + 2);
        txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
                                      DMA_TO_DEVICE);
-       if (skb->len < ETH_ZLEN)
-               txdesc->buffer_length = ETH_ZLEN;
-       else
-               txdesc->buffer_length = skb->len;
+       if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+       txdesc->buffer_length = skb->len;
 
        if (entry >= mdp->num_tx_ring - 1)
                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev)
 
        netif_stop_queue(ndev);
 
-       /* Disable interrupts by clearing the interrupt mask. */
+       /* Serialise with the interrupt handler and NAPI, then disable
+        * interrupts.  We have to clear the irq_enabled flag first to
+        * ensure that interrupts won't be re-enabled.
+        */
+       mdp->irq_enabled = false;
+       synchronize_irq(ndev->irq);
+       napi_disable(&mdp->napi);
        sh_eth_write(ndev, 0x0000, EESIPR);
 
-       /* Stop the chip's Tx and Rx processes. */
-       sh_eth_write(ndev, 0, EDTRR);
-       sh_eth_write(ndev, 0, EDRRR);
+       sh_eth_dev_exit(ndev);
 
-       sh_eth_get_stats(ndev);
        /* PHY Disconnect */
        if (mdp->phydev) {
                phy_stop(mdp->phydev);
@@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev)
 
        free_irq(ndev->irq, ndev);
 
-       napi_disable(&mdp->napi);
-
        /* Free all the skbuffs in the Rx queue. */
        sh_eth_ring_free(ndev);
 
index 71f5de1171bd93d004beacf880c387a4168eaada..332d3c16d48388ec9ae9281ec07f8310dcbce671 100644 (file)
@@ -513,6 +513,7 @@ struct sh_eth_private {
        u32 rx_buf_sz;                  /* Based on MTU+slack. */
        int edmac_endian;
        struct napi_struct napi;
+       bool irq_enabled;
        /* MII transceiver section. */
        u32 phy_id;                     /* PHY ID */
        struct mii_bus *mii_bus;        /* MDIO bus control */
index 8c6b7c1651e5f82329882a179fcca12e0a622982..cf62ff4c8c56c6d3f4d8e8421cbd1b71e86e192b 100644 (file)
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
  * @addr: iobase memory address
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * on success the new private structure is returned, otherwise the error
+ * pointer.
  */
 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
                                     struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 
        ndev = alloc_etherdev(sizeof(struct stmmac_priv));
        if (!ndev)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        SET_NETDEV_DEV(ndev, device);
 
index e068d48b0f21f713e3537fdf293741c2c7526baa..a39131f494ec1f87cf5c4cd794945f68cd8e180d 100644 (file)
@@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               /* In dual EMAC, reserved VLAN id should not be used for
+                * creating VLAN interfaces as this can break the dual
+                * EMAC port separation
+                */
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        return cpsw_add_vlan_ale_entry(priv, vid);
 }
@@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
        if (ret != 0)
index a14d87783245a94986232fbbe20b64a57194359d..2e195289ddf4cf4ae4bab1c6fd0068789a60f8ed 100644 (file)
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
        };
 
        dst = ip6_route_output(dev_net(dev), NULL, &fl6);
-       if (IS_ERR(dst))
+       if (dst->error) {
+               ret = dst->error;
+               dst_release(dst);
                goto err;
-
+       }
        skb_dst_drop(skb);
        skb_dst_set(skb, dst);
        err = ip6_local_out(skb);
index 9a72640237cb7678500468343e6f09101c169e81..62b0bf4fdf6b0144f4696703113f344b2993546c 100644 (file)
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
 
        __ath_cancel_work(sc);
 
+       disable_irq(sc->irq);
        tasklet_disable(&sc->intr_tq);
        tasklet_disable(&sc->bcon_tasklet);
        spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
                r = -EIO;
 
 out:
+       enable_irq(sc->irq);
        spin_unlock_bh(&sc->sc_pcu_lock);
        tasklet_enable(&sc->bcon_tasklet);
        tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
        if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
                return IRQ_NONE;
 
-       if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
-               return IRQ_NONE;
-
        /* shared irq, not for us */
        if (!ath9k_hw_intrpend(ah))
                return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
        ath9k_debug_sync_cause(sc, sync_cause);
        status &= ah->imask;    /* discard unasked-for bits */
 
-       if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+       if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
                return IRQ_HANDLED;
 
        /*
index 1bbe4fc47b97bcbcd4c828ce094f487c81edd357..660ddb1b7d8a4280e2ed62f679b4f32867a053ec 100644 (file)
@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
  *     regardless of the band or the number of the probes. FW will calculate
  *     the actual dwell time.
+ * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
        IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
+       IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = BIT(16),
 };
 
 /**
index 201846de94e7d949819afacd8f5bac3d9379a6ec..cfc0e65b34a5e14494d83af1b560d0a33493aa66 100644 (file)
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
 };
 
 /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
- * @flags: enum iwl_scan_channel_flgs
- * @non_ebs_ratio: how many regular scan iteration before EBS
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ *     involved.
+ *     1 - EBS is disabled.
+ *     2 - every second scan will be full scan(and so on).
  */
 struct iwl_scan_channel_opt {
        __le16 flags;
index e880f9d4717ba4de642916e252bfaf7279ade237..20915587c8207a46bf7525db41df3af98c46404a 100644 (file)
@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
                msk |= mvmsta->tfd_queue_msk;
        }
 
-       if (drop) {
-               if (iwl_mvm_flush_tx_path(mvm, msk, true))
-                       IWL_ERR(mvm, "flush request fail\n");
-               mutex_unlock(&mvm->mutex);
-       } else {
-               mutex_unlock(&mvm->mutex);
+       msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
 
-               /* this can take a while, and we may need/want other operations
-                * to succeed while doing this, so do it without the mutex held
-                */
-               iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
-       }
+       if (iwl_mvm_flush_tx_path(mvm, msk, true))
+               IWL_ERR(mvm, "flush request fail\n");
+       mutex_unlock(&mvm->mutex);
+
+       /* this can take a while, and we may need/want other operations
+        * to succeed while doing this, so do it without the mutex held
+        */
+       iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
 }
 
 const struct ieee80211_ops iwl_mvm_hw_ops = {
index ec9a8e7bae1de2934d9fddcd26e7d4b481cdafb9..844bf7c4c8def639dbc65329fe9f4e157715cbdf 100644 (file)
@@ -72,6 +72,8 @@
 
 #define IWL_PLCP_QUIET_THRESH 1
 #define IWL_ACTIVE_QUIET_TIME 10
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
 
 struct iwl_mvm_scan_params {
        u32 max_out_time;
@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
                return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
                                          notify);
 
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+               return 0;
+
+       if (iwl_mvm_is_radio_killed(mvm))
+               goto out;
+
        if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
            (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
             mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
        if (mvm->scan_status == IWL_MVM_SCAN_OS)
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 
+out:
        mvm->scan_status = IWL_MVM_SCAN_NONE;
 
        if (notify) {
@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
        cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
        cmd->iter_num = cpu_to_le32(1);
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
-           mvm->last_ebs_successful) {
-               cmd->channel_opt[0].flags =
-                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-               cmd->channel_opt[1].flags =
-                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-       }
-
        if (iwl_mvm_rrm_scan_needed(mvm))
                cmd->scan_flags |=
                        cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
        cmd->schedule[1].iterations = 0;
        cmd->schedule[1].full_scan_mul = 0;
 
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+           mvm->last_ebs_successful) {
+               cmd->channel_opt[0].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[0].non_ebs_ratio =
+                       cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+               cmd->channel_opt[1].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[1].non_ebs_ratio =
+                       cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+       }
+
        for (i = 1; i <= req->req.n_ssids; i++)
                ssid_bitmap |= BIT(i);
 
@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
        cmd->schedule[1].iterations = 0xff;
        cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+           mvm->last_ebs_successful) {
+               cmd->channel_opt[0].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[0].non_ebs_ratio =
+                       cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+               cmd->channel_opt[1].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[1].non_ebs_ratio =
+                       cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+       }
+
        iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
                                       ssid_bitmap, cmd);
 
index 4333306ccdee75a02952288196897d63a349b6b0..c59d07567d9041688401c0345376e9dc52de3157 100644 (file)
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        if (ieee80211_is_probe_resp(fc))
                tx_flags |= TX_CMD_FLG_TSF;
-       else if (ieee80211_is_back_req(fc))
-               tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
 
        if (ieee80211_has_morefrags(fc))
                tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                u8 *qc = ieee80211_get_qos_ctl(hdr);
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+       } else if (ieee80211_is_back_req(fc)) {
+               struct ieee80211_bar *bar = (void *)skb->data;
+               u16 control = le16_to_cpu(bar->control);
+
+               tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+               tx_cmd->tid_tspec = (control &
+                                    IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+                       IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+               WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
        } else {
                tx_cmd->tid_tspec = IWL_TID_NON_QOS;
                if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
index ea63fbd228ed684234722c6f35db36060559accf..352b4f28f82cd729fb842a210e7460ff9f123833 100644 (file)
@@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
                ret = of_overlay_apply_one(ov, tchild, child);
                if (ret)
                        return ret;
-
-               /* The properties are already copied, now do the child nodes */
-               for_each_child_of_node(child, grandchild) {
-                       ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
-                       if (ret) {
-                               pr_err("%s: Failed to apply single node @%s/%s\n",
-                                       __func__, tchild->full_name,
-                                       grandchild->name);
-                               return ret;
-                       }
-               }
        }
 
        return ret;
index 5b33c6a2180752888a36e7a15f81b81f29c8477a..b0d50d70a8a1d4772cae2075223959406059ea10 100644 (file)
@@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev)
                size = dev->coherent_dma_mask;
        } else {
                offset = PFN_DOWN(paddr - dma_addr);
-               dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+               dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
        }
        dev->dma_pfn_offset = offset;
 
@@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb,
                if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
                        return NOTIFY_OK;       /* not for us */
 
+               /* already populated? (driver using of_populate manually) */
+               if (of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
                /* pdev_parent may be NULL when no bus platform device */
                pdev_parent = of_find_device_by_node(rd->dn->parent);
                pdev = of_platform_device_create(rd->dn, NULL,
@@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb,
                break;
 
        case OF_RECONFIG_CHANGE_REMOVE:
+
+               /* already depopulated? */
+               if (!of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
                /* find our device by node */
                pdev = of_find_device_by_node(rd->dn);
                if (pdev == NULL)
index 75976da22b2e2c997fe70d7ea1b6c9a1c622face..a2b687d5f324700a0adff54e991905053445a167 100644 (file)
                        };
                };
 
+               overlay10 {
+                       fragment@0 {
+                               target-path = "/testcase-data/overlay-node/test-bus";
+                               __overlay__ {
+
+                                       /* suppress DTC warning */
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+
+                                       test-selftest10 {
+                                               compatible = "selftest";
+                                               status = "okay";
+                                               reg = <10>;
+
+                                               #address-cells = <1>;
+                                               #size-cells = <0>;
+
+                                               test-selftest101 {
+                                                       compatible = "selftest";
+                                                       status = "okay";
+                                                       reg = <1>;
+                                               };
+
+                                       };
+                               };
+                       };
+               };
+
+               overlay11 {
+                       fragment@0 {
+                               target-path = "/testcase-data/overlay-node/test-bus";
+                               __overlay__ {
+
+                                       /* suppress DTC warning */
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+
+                                       test-selftest11 {
+                                               compatible = "selftest";
+                                               status = "okay";
+                                               reg = <11>;
+
+                                               #address-cells = <1>;
+                                               #size-cells = <0>;
+
+                                               test-selftest111 {
+                                                       compatible = "selftest";
+                                                       status = "okay";
+                                                       reg = <1>;
+                                               };
+
+                                       };
+                               };
+                       };
+               };
        };
 };
index 844838e11ef1a1f8f4d6ac9b52721a1b668dc6aa..41a4a138f53b26c547296805eaf425ebb359268f 100644 (file)
@@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev)
        }
 
        dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+       of_platform_populate(np, NULL, NULL, &pdev->dev);
+
        return 0;
 }
 
@@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void)
        selftest(1, "overlay test %d passed\n", 8);
 }
 
+/* test insertion of a bus with parent devices */
+static void of_selftest_overlay_10(void)
+{
+       int ret;
+       char *child_path;
+
+       /* device should disable */
+       ret = of_selftest_apply_overlay_check(10, 10, 0, 1);
+       if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10))
+               return;
+
+       child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
+                       selftest_path(10));
+       if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
+               return;
+
+       ret = of_path_platform_device_exists(child_path);
+       kfree(child_path);
+       if (selftest(ret, "overlay test %d failed; no child device\n", 10))
+               return;
+}
+
+/* test insertion of a bus with parent devices (and revert) */
+static void of_selftest_overlay_11(void)
+{
+       int ret;
+
+       /* device should disable */
+       ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1);
+       if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11))
+               return;
+}
+
 static void __init of_selftest_overlay(void)
 {
        struct device_node *bus_np = NULL;
@@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void)
        of_selftest_overlay_6();
        of_selftest_overlay_8();
 
+       of_selftest_overlay_10();
+       of_selftest_overlay_11();
+
 out:
        of_node_put(bus_np);
 }
index 37e71ff6408dca41ff5790417f549eead9ddea97..dceb9ddfd99af6d754b0190fe2eb99431f64f4a6 100644 (file)
@@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus)
                int i;
                /* PCI-PCI Bridge */
                pci_read_bridge_bases(bus);
-               for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
-                       pci_claim_resource(bus->self, i);
-               }
+               for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
+                       pci_claim_bridge_resource(bus->self, i);
        } else {
                /* Host-PCI Bridge */
                int err;
index 73aef51a28f0760fefa6b4344235e7f341bebb3d..8fb16188cd82aaff9d346a70f46e0257e468fe29 100644 (file)
@@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
 }
 EXPORT_SYMBOL(pci_bus_alloc_resource);
 
+/*
+ * The @idx resource of @dev should be a PCI-PCI bridge window.  If this
+ * resource fits inside a window of an upstream bridge, do nothing.  If it
+ * overlaps an upstream window but extends outside it, clip the resource so
+ * it fits completely inside.
+ */
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+{
+       struct pci_bus *bus = dev->bus;
+       struct resource *res = &dev->resource[idx];
+       struct resource orig_res = *res;
+       struct resource *r;
+       int i;
+
+       pci_bus_for_each_resource(bus, r, i) {
+               resource_size_t start, end;
+
+               if (!r)
+                       continue;
+
+               if (resource_type(res) != resource_type(r))
+                       continue;
+
+               start = max(r->start, res->start);
+               end = min(r->end, res->end);
+
+               if (start > end)
+                       continue;       /* no overlap */
+
+               if (res->start == start && res->end == end)
+                       return false;   /* no change */
+
+               res->start = start;
+               res->end = end;
+               dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+                                &orig_res, res);
+
+               return true;
+       }
+
+       return false;
+}
+
 void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
 
 /**
index cab05f31223f08166cc0a8e179759fc99812a286..e9d4fd861ba1c84a82f9561841d4e1da26715a2a 100644 (file)
@@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
 {
        struct pci_dev *pdev;
 
-       if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
+       if (pci_is_root_bus(dev->bus) || dev->subordinate ||
+           !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
                return -ENOTTY;
 
        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
 {
        struct pci_dev *pdev;
 
-       if (dev->subordinate || !dev->slot)
+       if (dev->subordinate || !dev->slot ||
+           dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
                return -ENOTTY;
 
        list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(pci_try_reset_function);
 
+/* Do any devices on or below this bus prevent a bus reset? */
+static bool pci_bus_resetable(struct pci_bus *bus)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &bus->devices, bus_list) {
+               if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+                   (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+                       return false;
+       }
+
+       return true;
+}
+
 /* Lock devices from the top of the tree down */
 static void pci_bus_lock(struct pci_bus *bus)
 {
@@ -3607,6 +3623,22 @@ unlock:
        return 0;
 }
 
+/* Do any devices on or below this slot prevent a bus reset? */
+static bool pci_slot_resetable(struct pci_slot *slot)
+{
+       struct pci_dev *dev;
+
+       list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+               if (!dev->slot || dev->slot != slot)
+                       continue;
+               if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+                   (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+                       return false;
+       }
+
+       return true;
+}
+
 /* Lock devices from the top of the tree down */
 static void pci_slot_lock(struct pci_slot *slot)
 {
@@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
 {
        int rc;
 
-       if (!slot)
+       if (!slot || !pci_slot_resetable(slot))
                return -ENOTTY;
 
        if (!probe)
@@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
 
 static int pci_bus_reset(struct pci_bus *bus, int probe)
 {
-       if (!bus->self)
+       if (!bus->self || !pci_bus_resetable(bus))
                return -ENOTTY;
 
        if (probe)
index 8aff29a804ffa6e9ddaa30e2277d5d654f1df508..d54632a1db43cbed30d89fb91b84fef489cbc4a4 100644 (file)
@@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
 void __pci_bus_assign_resources(const struct pci_bus *bus,
                                struct list_head *realloc_head,
                                struct list_head *fail_head);
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
 
 /**
  * pci_ari_enabled - query ARI forwarding status
index ed6f89b6efe5c2cab127dd15613cab59be736fd7..e52356aa09b87adc778a29bbc3ca40c1ffdf638d 100644 (file)
@@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
                         quirk_broken_intx_masking);
 
+static void quirk_no_bus_reset(struct pci_dev *dev)
+{
+       dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+}
+
+/*
+ * Atheros AR93xx chips do not behave after a bus reset.  The device will
+ * throw a Link Down error on AER-capable systems and regardless of AER,
+ * config space of the device is never accessible again and typically
+ * causes the system to hang or reset when access is attempted.
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+
 #ifdef CONFIG_ACPI
 /*
  * Apple: Shutdown Cactus Ridge Thunderbolt controller.
index 0482235eee9262f79ecd69f907c8c41c2364d914..e3e17f3c0f0f2929da94d7411c0740a391dc56c6 100644 (file)
@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
    config space writes, so it's quite possible that an I/O window of
    the bridge will have some undesirable address (e.g. 0) after the
    first write. Ditto 64-bit prefetchable MMIO.  */
-static void pci_setup_bridge_io(struct pci_bus *bus)
+static void pci_setup_bridge_io(struct pci_dev *bridge)
 {
-       struct pci_dev *bridge = bus->self;
        struct resource *res;
        struct pci_bus_region region;
        unsigned long io_mask;
@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
                io_mask = PCI_IO_1K_RANGE_MASK;
 
        /* Set up the top and bottom of the PCI I/O segment for this bus. */
-       res = bus->resource[0];
+       res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_IO) {
                pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
        pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
 }
 
-static void pci_setup_bridge_mmio(struct pci_bus *bus)
+static void pci_setup_bridge_mmio(struct pci_dev *bridge)
 {
-       struct pci_dev *bridge = bus->self;
        struct resource *res;
        struct pci_bus_region region;
        u32 l;
 
        /* Set up the top and bottom of the PCI Memory segment for this bus. */
-       res = bus->resource[1];
+       res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_MEM) {
                l = (region.start >> 16) & 0xfff0;
@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
        pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
 }
 
-static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
+static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
 {
-       struct pci_dev *bridge = bus->self;
        struct resource *res;
        struct pci_bus_region region;
        u32 l, bu, lu;
@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
 
        /* Set up PREF base/limit. */
        bu = lu = 0;
-       res = bus->resource[2];
+       res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
        pcibios_resource_to_bus(bridge->bus, &region, res);
        if (res->flags & IORESOURCE_PREFETCH) {
                l = (region.start >> 16) & 0xfff0;
@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
                 &bus->busn_res);
 
        if (type & IORESOURCE_IO)
-               pci_setup_bridge_io(bus);
+               pci_setup_bridge_io(bridge);
 
        if (type & IORESOURCE_MEM)
-               pci_setup_bridge_mmio(bus);
+               pci_setup_bridge_mmio(bridge);
 
        if (type & IORESOURCE_PREFETCH)
-               pci_setup_bridge_mmio_pref(bus);
+               pci_setup_bridge_mmio_pref(bridge);
 
        pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
 }
@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
        __pci_setup_bridge(bus, type);
 }
 
+
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
+{
+       if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
+               return 0;
+
+       if (pci_claim_resource(bridge, i) == 0)
+               return 0;       /* claimed the window */
+
+       if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+               return 0;
+
+       if (!pci_bus_clip_resource(bridge, i))
+               return -EINVAL; /* clipping didn't change anything */
+
+       switch (i - PCI_BRIDGE_RESOURCES) {
+       case 0:
+               pci_setup_bridge_io(bridge);
+               break;
+       case 1:
+               pci_setup_bridge_mmio(bridge);
+               break;
+       case 2:
+               pci_setup_bridge_mmio_pref(bridge);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (pci_claim_resource(bridge, i) == 0)
+               return 0;       /* claimed a smaller window */
+
+       return -EINVAL;
+}
+
 /* Check whether the bridge supports optional I/O and
    prefetchable memory ranges. If not, the respective
    base/limit registers must be read-only and read as 0. */
index 9411eae39a4ec5fc32f993d1583a3c329e2365b1..3d21efe11d7b77c511ff651201ca9a14c467e058 100644 (file)
@@ -2,11 +2,9 @@
  *  Driver for Dell laptop extras
  *
  *  Copyright (c) Red Hat <mjg@redhat.com>
- *  Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- *  Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
  *
- *  Based on documentation in the libsmbios package:
- *  Copyright (C) 2005-2014 Dell Inc.
+ *  Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
+ *  Inc.
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License version 2 as
 #include "../../firmware/dcdbas.h"
 
 #define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
 
 /* This structure will be modified by the firmware when we enter
  * system management mode, hence the volatiles */
@@ -71,13 +62,6 @@ struct calling_interface_structure {
 
 struct quirk_entry {
        u8 touchpad_led;
-
-       int needs_kbd_timeouts;
-       /*
-        * Ordered list of timeouts expressed in seconds.
-        * The list must end with -1
-        */
-       int kbd_timeouts[];
 };
 
 static struct quirk_entry *quirks;
@@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
        return 1;
 }
 
-/*
- * These values come from Windows utility provided by Dell. If any other value
- * is used then BIOS silently set timeout to 0 without any error message.
- */
-static struct quirk_entry quirk_dell_xps13_9333 = {
-       .needs_kbd_timeouts = 1,
-       .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
-};
-
 static int da_command_address;
 static int da_command_code;
 static int da_num_tokens;
@@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
                },
                .driver_data = &quirk_dell_vostro_v130,
        },
-       {
-               .callback = dmi_matched,
-               .ident = "Dell XPS13 9333",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
-               },
-               .driver_data = &quirk_dell_xps13_9333,
-       },
        { }
 };
 
@@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
        }
 }
 
-static int find_token_id(int tokenid)
+static int find_token_location(int tokenid)
 {
        int i;
-
        for (i = 0; i < da_num_tokens; i++) {
                if (da_tokens[i].tokenID == tokenid)
-                       return i;
+                       return da_tokens[i].location;
        }
 
        return -1;
 }
 
-static int find_token_location(int tokenid)
-{
-       int id;
-
-       id = find_token_id(tokenid);
-       if (id == -1)
-               return -1;
-
-       return da_tokens[id].location;
-}
-
 static struct calling_interface_buffer *
 dell_send_request(struct calling_interface_buffer *buffer, int class,
                  int select)
@@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
        return buffer;
 }
 
-static inline int dell_smi_error(int value)
-{
-       switch (value) {
-       case 0: /* Completed successfully */
-               return 0;
-       case -1: /* Completed with error */
-               return -EIO;
-       case -2: /* Function not supported */
-               return -ENXIO;
-       default: /* Unknown error */
-               return -EINVAL;
-       }
-}
-
 /* Derived from information in DellWirelessCtl.cpp:
    Class 17, select 11 is radio control. It returns an array of 32-bit values.
 
@@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd)
        else
                dell_send_request(buffer, 1, 1);
 
- out:
+out:
        release_buffer();
        return ret;
 }
@@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd)
 
        ret = buffer->output[1];
 
- out:
+out:
        release_buffer();
        return ret;
 }
@@ -849,984 +789,6 @@ static void touchpad_led_exit(void)
        led_classdev_unregister(&touchpad_led);
 }
 
-/*
- * Derived from information in smbios-keyboard-ctl:
- *
- * cbClass 4
- * cbSelect 11
- * Keyboard illumination
- * cbArg1 determines the function to be performed
- *
- * cbArg1 0x0 = Get Feature Information
- *  cbRES1         Standard return codes (0, -1, -2)
- *  cbRES2, word0  Bitmap of user-selectable modes
- *     bit 0     Always off (All systems)
- *     bit 1     Always on (Travis ATG, Siberia)
- *     bit 2     Auto: ALS-based On; ALS-based Off (Travis ATG)
- *     bit 3     Auto: ALS- and input-activity-based On; input-activity based Off
- *     bit 4     Auto: Input-activity-based On; input-activity based Off
- *     bit 5     Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- *     bit 6     Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- *     bit 7     Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- *     bit 8     Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- *     bits 9-15 Reserved for future use
- *  cbRES2, byte2  Reserved for future use
- *  cbRES2, byte3  Keyboard illumination type
- *     0         Reserved
- *     1         Tasklight
- *     2         Backlight
- *     3-255     Reserved for future use
- *  cbRES3, byte0  Supported auto keyboard illumination trigger bitmap.
- *     bit 0     Any keystroke
- *     bit 1     Touchpad activity
- *     bit 2     Pointing stick
- *     bit 3     Any mouse
- *     bits 4-7  Reserved for future use
- *  cbRES3, byte1  Supported timeout unit bitmap
- *     bit 0     Seconds
- *     bit 1     Minutes
- *     bit 2     Hours
- *     bit 3     Days
- *     bits 4-7  Reserved for future use
- *  cbRES3, byte2  Number of keyboard light brightness levels
- *  cbRES4, byte0  Maximum acceptable seconds value (0 if seconds not supported).
- *  cbRES4, byte1  Maximum acceptable minutes value (0 if minutes not supported).
- *  cbRES4, byte2  Maximum acceptable hours value (0 if hours not supported).
- *  cbRES4, byte3  Maximum acceptable days value (0 if days not supported)
- *
- * cbArg1 0x1 = Get Current State
- *  cbRES1         Standard return codes (0, -1, -2)
- *  cbRES2, word0  Bitmap of current mode state
- *     bit 0     Always off (All systems)
- *     bit 1     Always on (Travis ATG, Siberia)
- *     bit 2     Auto: ALS-based On; ALS-based Off (Travis ATG)
- *     bit 3     Auto: ALS- and input-activity-based On; input-activity based Off
- *     bit 4     Auto: Input-activity-based On; input-activity based Off
- *     bit 5     Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- *     bit 6     Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- *     bit 7     Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- *     bit 8     Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- *     bits 9-15 Reserved for future use
- *     Note: Only One bit can be set
- *  cbRES2, byte2  Currently active auto keyboard illumination triggers.
- *     bit 0     Any keystroke
- *     bit 1     Touchpad activity
- *     bit 2     Pointing stick
- *     bit 3     Any mouse
- *     bits 4-7  Reserved for future use
- *  cbRES2, byte3  Current Timeout
- *     bits 7:6  Timeout units indicator:
- *     00b       Seconds
- *     01b       Minutes
- *     10b       Hours
- *     11b       Days
- *     bits 5:0  Timeout value (0-63) in sec/min/hr/day
- *     NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
- *     are set upon return from the [Get feature information] call.
- *  cbRES3, byte0  Current setting of ALS value that turns the light on or off.
- *  cbRES3, byte1  Current ALS reading
- *  cbRES3, byte2  Current keyboard light level.
- *
- * cbArg1 0x2 = Set New State
- *  cbRES1         Standard return codes (0, -1, -2)
- *  cbArg2, word0  Bitmap of current mode state
- *     bit 0     Always off (All systems)
- *     bit 1     Always on (Travis ATG, Siberia)
- *     bit 2     Auto: ALS-based On; ALS-based Off (Travis ATG)
- *     bit 3     Auto: ALS- and input-activity-based On; input-activity based Off
- *     bit 4     Auto: Input-activity-based On; input-activity based Off
- *     bit 5     Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- *     bit 6     Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- *     bit 7     Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- *     bit 8     Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- *     bits 9-15 Reserved for future use
- *     Note: Only One bit can be set
- *  cbArg2, byte2  Desired auto keyboard illumination triggers. Must remain inactive to allow
- *                 keyboard to turn off automatically.
- *     bit 0     Any keystroke
- *     bit 1     Touchpad activity
- *     bit 2     Pointing stick
- *     bit 3     Any mouse
- *     bits 4-7  Reserved for future use
- *  cbArg2, byte3  Desired Timeout
- *     bits 7:6  Timeout units indicator:
- *     00b       Seconds
- *     01b       Minutes
- *     10b       Hours
- *     11b       Days
- *     bits 5:0  Timeout value (0-63) in sec/min/hr/day
- *  cbArg3, byte0  Desired setting of ALS value that turns the light on or off.
- *  cbArg3, byte2  Desired keyboard light level.
- */
-
-
-enum kbd_timeout_unit {
-       KBD_TIMEOUT_SECONDS = 0,
-       KBD_TIMEOUT_MINUTES,
-       KBD_TIMEOUT_HOURS,
-       KBD_TIMEOUT_DAYS,
-};
-
-enum kbd_mode_bit {
-       KBD_MODE_BIT_OFF = 0,
-       KBD_MODE_BIT_ON,
-       KBD_MODE_BIT_ALS,
-       KBD_MODE_BIT_TRIGGER_ALS,
-       KBD_MODE_BIT_TRIGGER,
-       KBD_MODE_BIT_TRIGGER_25,
-       KBD_MODE_BIT_TRIGGER_50,
-       KBD_MODE_BIT_TRIGGER_75,
-       KBD_MODE_BIT_TRIGGER_100,
-};
-
-#define kbd_is_als_mode_bit(bit) \
-       ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
-#define kbd_is_trigger_mode_bit(bit) \
-       ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-#define kbd_is_level_mode_bit(bit) \
-       ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-
-struct kbd_info {
-       u16 modes;
-       u8 type;
-       u8 triggers;
-       u8 levels;
-       u8 seconds;
-       u8 minutes;
-       u8 hours;
-       u8 days;
-};
-
-struct kbd_state {
-       u8 mode_bit;
-       u8 triggers;
-       u8 timeout_value;
-       u8 timeout_unit;
-       u8 als_setting;
-       u8 als_value;
-       u8 level;
-};
-
-static const int kbd_tokens[] = {
-       KBD_LED_OFF_TOKEN,
-       KBD_LED_AUTO_25_TOKEN,
-       KBD_LED_AUTO_50_TOKEN,
-       KBD_LED_AUTO_75_TOKEN,
-       KBD_LED_AUTO_100_TOKEN,
-       KBD_LED_ON_TOKEN,
-};
-
-static u16 kbd_token_bits;
-
-static struct kbd_info kbd_info;
-static bool kbd_als_supported;
-static bool kbd_triggers_supported;
-
-static u8 kbd_mode_levels[16];
-static int kbd_mode_levels_count;
-
-static u8 kbd_previous_level;
-static u8 kbd_previous_mode_bit;
-
-static bool kbd_led_present;
-
-/*
- * NOTE: there are three ways to set the keyboard backlight level.
- * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
- * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
- * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
- *
- * There are laptops which support only one of these methods. If we want to
- * support as many machines as possible we need to implement all three methods.
- * The first two methods use the kbd_state structure. The third uses SMBIOS
- * tokens. If kbd_info.levels == 0, the machine does not support setting the
- * keyboard backlight level via kbd_state.level.
- */
-
-static int kbd_get_info(struct kbd_info *info)
-{
-       u8 units;
-       int ret;
-
-       get_buffer();
-
-       buffer->input[0] = 0x0;
-       dell_send_request(buffer, 4, 11);
-       ret = buffer->output[0];
-
-       if (ret) {
-               ret = dell_smi_error(ret);
-               goto out;
-       }
-
-       info->modes = buffer->output[1] & 0xFFFF;
-       info->type = (buffer->output[1] >> 24) & 0xFF;
-       info->triggers = buffer->output[2] & 0xFF;
-       units = (buffer->output[2] >> 8) & 0xFF;
-       info->levels = (buffer->output[2] >> 16) & 0xFF;
-
-       if (units & BIT(0))
-               info->seconds = (buffer->output[3] >> 0) & 0xFF;
-       if (units & BIT(1))
-               info->minutes = (buffer->output[3] >> 8) & 0xFF;
-       if (units & BIT(2))
-               info->hours = (buffer->output[3] >> 16) & 0xFF;
-       if (units & BIT(3))
-               info->days = (buffer->output[3] >> 24) & 0xFF;
-
- out:
-       release_buffer();
-       return ret;
-}
-
-static unsigned int kbd_get_max_level(void)
-{
-       if (kbd_info.levels != 0)
-               return kbd_info.levels;
-       if (kbd_mode_levels_count > 0)
-               return kbd_mode_levels_count - 1;
-       return 0;
-}
-
-static int kbd_get_level(struct kbd_state *state)
-{
-       int i;
-
-       if (kbd_info.levels != 0)
-               return state->level;
-
-       if (kbd_mode_levels_count > 0) {
-               for (i = 0; i < kbd_mode_levels_count; ++i)
-                       if (kbd_mode_levels[i] == state->mode_bit)
-                               return i;
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-static int kbd_set_level(struct kbd_state *state, u8 level)
-{
-       if (kbd_info.levels != 0) {
-               if (level != 0)
-                       kbd_previous_level = level;
-               if (state->level == level)
-                       return 0;
-               state->level = level;
-               if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
-                       state->mode_bit = kbd_previous_mode_bit;
-               else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
-                       kbd_previous_mode_bit = state->mode_bit;
-                       state->mode_bit = KBD_MODE_BIT_OFF;
-               }
-               return 0;
-       }
-
-       if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
-               if (level != 0)
-                       kbd_previous_level = level;
-               state->mode_bit = kbd_mode_levels[level];
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-static int kbd_get_state(struct kbd_state *state)
-{
-       int ret;
-
-       get_buffer();
-
-       buffer->input[0] = 0x1;
-       dell_send_request(buffer, 4, 11);
-       ret = buffer->output[0];
-
-       if (ret) {
-               ret = dell_smi_error(ret);
-               goto out;
-       }
-
-       state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
-       if (state->mode_bit != 0)
-               state->mode_bit--;
-
-       state->triggers = (buffer->output[1] >> 16) & 0xFF;
-       state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
-       state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
-       state->als_setting = buffer->output[2] & 0xFF;
-       state->als_value = (buffer->output[2] >> 8) & 0xFF;
-       state->level = (buffer->output[2] >> 16) & 0xFF;
-
- out:
-       release_buffer();
-       return ret;
-}
-
-static int kbd_set_state(struct kbd_state *state)
-{
-       int ret;
-
-       get_buffer();
-       buffer->input[0] = 0x2;
-       buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
-       buffer->input[1] |= (state->triggers & 0xFF) << 16;
-       buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
-       buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
-       buffer->input[2] = state->als_setting & 0xFF;
-       buffer->input[2] |= (state->level & 0xFF) << 16;
-       dell_send_request(buffer, 4, 11);
-       ret = buffer->output[0];
-       release_buffer();
-
-       return dell_smi_error(ret);
-}
-
-static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
-{
-       int ret;
-
-       ret = kbd_set_state(state);
-       if (ret == 0)
-               return 0;
-
-       /*
-        * When setting the new state fails,try to restore the previous one.
-        * This is needed on some machines where BIOS sets a default state when
-        * setting a new state fails. This default state could be all off.
-        */
-
-       if (kbd_set_state(old))
-               pr_err("Setting old previous keyboard state failed\n");
-
-       return ret;
-}
-
-static int kbd_set_token_bit(u8 bit)
-{
-       int id;
-       int ret;
-
-       if (bit >= ARRAY_SIZE(kbd_tokens))
-               return -EINVAL;
-
-       id = find_token_id(kbd_tokens[bit]);
-       if (id == -1)
-               return -EINVAL;
-
-       get_buffer();
-       buffer->input[0] = da_tokens[id].location;
-       buffer->input[1] = da_tokens[id].value;
-       dell_send_request(buffer, 1, 0);
-       ret = buffer->output[0];
-       release_buffer();
-
-       return dell_smi_error(ret);
-}
-
-static int kbd_get_token_bit(u8 bit)
-{
-       int id;
-       int ret;
-       int val;
-
-       if (bit >= ARRAY_SIZE(kbd_tokens))
-               return -EINVAL;
-
-       id = find_token_id(kbd_tokens[bit]);
-       if (id == -1)
-               return -EINVAL;
-
-       get_buffer();
-       buffer->input[0] = da_tokens[id].location;
-       dell_send_request(buffer, 0, 0);
-       ret = buffer->output[0];
-       val = buffer->output[1];
-       release_buffer();
-
-       if (ret)
-               return dell_smi_error(ret);
-
-       return (val == da_tokens[id].value);
-}
-
-static int kbd_get_first_active_token_bit(void)
-{
-       int i;
-       int ret;
-
-       for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
-               ret = kbd_get_token_bit(i);
-               if (ret == 1)
-                       return i;
-       }
-
-       return ret;
-}
-
-static int kbd_get_valid_token_counts(void)
-{
-       return hweight16(kbd_token_bits);
-}
-
-static inline int kbd_init_info(void)
-{
-       struct kbd_state state;
-       int ret;
-       int i;
-
-       ret = kbd_get_info(&kbd_info);
-       if (ret)
-               return ret;
-
-       kbd_get_state(&state);
-
-       /* NOTE: timeout value is stored in 6 bits so max value is 63 */
-       if (kbd_info.seconds > 63)
-               kbd_info.seconds = 63;
-       if (kbd_info.minutes > 63)
-               kbd_info.minutes = 63;
-       if (kbd_info.hours > 63)
-               kbd_info.hours = 63;
-       if (kbd_info.days > 63)
-               kbd_info.days = 63;
-
-       /* NOTE: On tested machines ON mode did not work and caused
-        *       problems (turned backlight off) so do not use it
-        */
-       kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
-
-       kbd_previous_level = kbd_get_level(&state);
-       kbd_previous_mode_bit = state.mode_bit;
-
-       if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
-               kbd_previous_level = 1;
-
-       if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
-               kbd_previous_mode_bit =
-                       ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
-               if (kbd_previous_mode_bit != 0)
-                       kbd_previous_mode_bit--;
-       }
-
-       if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
-                             BIT(KBD_MODE_BIT_TRIGGER_ALS)))
-               kbd_als_supported = true;
-
-       if (kbd_info.modes & (
-           BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
-           BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
-           BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
-          ))
-               kbd_triggers_supported = true;
-
-       /* kbd_mode_levels[0] is reserved, see below */
-       for (i = 0; i < 16; ++i)
-               if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
-                       kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
-
-       /*
-        * Find the first supported mode and assign to kbd_mode_levels[0].
-        * This should be 0 (off), but we cannot depend on the BIOS to
-        * support 0.
-        */
-       if (kbd_mode_levels_count > 0) {
-               for (i = 0; i < 16; ++i) {
-                       if (BIT(i) & kbd_info.modes) {
-                               kbd_mode_levels[0] = i;
-                               break;
-                       }
-               }
-               kbd_mode_levels_count++;
-       }
-
-       return 0;
-
-}
-
-static inline void kbd_init_tokens(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
-               if (find_token_id(kbd_tokens[i]) != -1)
-                       kbd_token_bits |= BIT(i);
-}
-
-static void kbd_init(void)
-{
-       int ret;
-
-       ret = kbd_init_info();
-       kbd_init_tokens();
-
-       if (kbd_token_bits != 0 || ret == 0)
-               kbd_led_present = true;
-}
-
-static ssize_t kbd_led_timeout_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct kbd_state new_state;
-       struct kbd_state state;
-       bool convert;
-       int value;
-       int ret;
-       char ch;
-       u8 unit;
-       int i;
-
-       ret = sscanf(buf, "%d %c", &value, &ch);
-       if (ret < 1)
-               return -EINVAL;
-       else if (ret == 1)
-               ch = 's';
-
-       if (value < 0)
-               return -EINVAL;
-
-       convert = false;
-
-       switch (ch) {
-       case 's':
-               if (value > kbd_info.seconds)
-                       convert = true;
-               unit = KBD_TIMEOUT_SECONDS;
-               break;
-       case 'm':
-               if (value > kbd_info.minutes)
-                       convert = true;
-               unit = KBD_TIMEOUT_MINUTES;
-               break;
-       case 'h':
-               if (value > kbd_info.hours)
-                       convert = true;
-               unit = KBD_TIMEOUT_HOURS;
-               break;
-       case 'd':
-               if (value > kbd_info.days)
-                       convert = true;
-               unit = KBD_TIMEOUT_DAYS;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (quirks && quirks->needs_kbd_timeouts)
-               convert = true;
-
-       if (convert) {
-               /* Convert value from current units to seconds */
-               switch (unit) {
-               case KBD_TIMEOUT_DAYS:
-                       value *= 24;
-               case KBD_TIMEOUT_HOURS:
-                       value *= 60;
-               case KBD_TIMEOUT_MINUTES:
-                       value *= 60;
-                       unit = KBD_TIMEOUT_SECONDS;
-               }
-
-               if (quirks && quirks->needs_kbd_timeouts) {
-                       for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
-                               if (value <= quirks->kbd_timeouts[i]) {
-                                       value = quirks->kbd_timeouts[i];
-                                       break;
-                               }
-                       }
-               }
-
-               if (value <= kbd_info.seconds && kbd_info.seconds) {
-                       unit = KBD_TIMEOUT_SECONDS;
-               } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
-                       value /= 60;
-                       unit = KBD_TIMEOUT_MINUTES;
-               } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
-                       value /= (60 * 60);
-                       unit = KBD_TIMEOUT_HOURS;
-               } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
-                       value /= (60 * 60 * 24);
-                       unit = KBD_TIMEOUT_DAYS;
-               } else {
-                       return -EINVAL;
-               }
-       }
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       new_state = state;
-       new_state.timeout_value = value;
-       new_state.timeout_unit = unit;
-
-       ret = kbd_set_state_safe(&new_state, &state);
-       if (ret)
-               return ret;
-
-       return count;
-}
-
-static ssize_t kbd_led_timeout_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct kbd_state state;
-       int ret;
-       int len;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       len = sprintf(buf, "%d", state.timeout_value);
-
-       switch (state.timeout_unit) {
-       case KBD_TIMEOUT_SECONDS:
-               return len + sprintf(buf+len, "s\n");
-       case KBD_TIMEOUT_MINUTES:
-               return len + sprintf(buf+len, "m\n");
-       case KBD_TIMEOUT_HOURS:
-               return len + sprintf(buf+len, "h\n");
-       case KBD_TIMEOUT_DAYS:
-               return len + sprintf(buf+len, "d\n");
-       default:
-               return -EINVAL;
-       }
-
-       return len;
-}
-
-static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
-                  kbd_led_timeout_show, kbd_led_timeout_store);
-
-static const char * const kbd_led_triggers[] = {
-       "keyboard",
-       "touchpad",
-       /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
-       "mouse",
-};
-
-static ssize_t kbd_led_triggers_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count)
-{
-       struct kbd_state new_state;
-       struct kbd_state state;
-       bool triggers_enabled = false;
-       bool als_enabled = false;
-       bool disable_als = false;
-       bool enable_als = false;
-       int trigger_bit = -1;
-       char trigger[21];
-       int i, ret;
-
-       ret = sscanf(buf, "%20s", trigger);
-       if (ret != 1)
-               return -EINVAL;
-
-       if (trigger[0] != '+' && trigger[0] != '-')
-               return -EINVAL;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       if (kbd_als_supported)
-               als_enabled = kbd_is_als_mode_bit(state.mode_bit);
-
-       if (kbd_triggers_supported)
-               triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-
-       if (kbd_als_supported) {
-               if (strcmp(trigger, "+als") == 0) {
-                       if (als_enabled)
-                               return count;
-                       enable_als = true;
-               } else if (strcmp(trigger, "-als") == 0) {
-                       if (!als_enabled)
-                               return count;
-                       disable_als = true;
-               }
-       }
-
-       if (enable_als || disable_als) {
-               new_state = state;
-               if (enable_als) {
-                       if (triggers_enabled)
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
-                       else
-                               new_state.mode_bit = KBD_MODE_BIT_ALS;
-               } else {
-                       if (triggers_enabled) {
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
-                               kbd_set_level(&new_state, kbd_previous_level);
-                       } else {
-                               new_state.mode_bit = KBD_MODE_BIT_ON;
-                       }
-               }
-               if (!(kbd_info.modes & BIT(new_state.mode_bit)))
-                       return -EINVAL;
-               ret = kbd_set_state_safe(&new_state, &state);
-               if (ret)
-                       return ret;
-               kbd_previous_mode_bit = new_state.mode_bit;
-               return count;
-       }
-
-       if (kbd_triggers_supported) {
-               for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
-                       if (!(kbd_info.triggers & BIT(i)))
-                               continue;
-                       if (!kbd_led_triggers[i])
-                               continue;
-                       if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
-                               continue;
-                       if (trigger[0] == '+' &&
-                           triggers_enabled && (state.triggers & BIT(i)))
-                               return count;
-                       if (trigger[0] == '-' &&
-                           (!triggers_enabled || !(state.triggers & BIT(i))))
-                               return count;
-                       trigger_bit = i;
-                       break;
-               }
-       }
-
-       if (trigger_bit != -1) {
-               new_state = state;
-               if (trigger[0] == '+')
-                       new_state.triggers |= BIT(trigger_bit);
-               else {
-                       new_state.triggers &= ~BIT(trigger_bit);
-                       /* NOTE: trackstick bit (2) must be disabled when
-                        *       disabling touchpad bit (1), otherwise touchpad
-                        *       bit (1) will not be disabled */
-                       if (trigger_bit == 1)
-                               new_state.triggers &= ~BIT(2);
-               }
-               if ((kbd_info.triggers & new_state.triggers) !=
-                   new_state.triggers)
-                       return -EINVAL;
-               if (new_state.triggers && !triggers_enabled) {
-                       if (als_enabled)
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
-                       else {
-                               new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
-                               kbd_set_level(&new_state, kbd_previous_level);
-                       }
-               } else if (new_state.triggers == 0) {
-                       if (als_enabled)
-                               new_state.mode_bit = KBD_MODE_BIT_ALS;
-                       else
-                               kbd_set_level(&new_state, 0);
-               }
-               if (!(kbd_info.modes & BIT(new_state.mode_bit)))
-                       return -EINVAL;
-               ret = kbd_set_state_safe(&new_state, &state);
-               if (ret)
-                       return ret;
-               if (new_state.mode_bit != KBD_MODE_BIT_OFF)
-                       kbd_previous_mode_bit = new_state.mode_bit;
-               return count;
-       }
-
-       return -EINVAL;
-}
-
-static ssize_t kbd_led_triggers_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
-{
-       struct kbd_state state;
-       bool triggers_enabled;
-       int level, i, ret;
-       int len = 0;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       len = 0;
-
-       if (kbd_triggers_supported) {
-               triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-               level = kbd_get_level(&state);
-               for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
-                       if (!(kbd_info.triggers & BIT(i)))
-                               continue;
-                       if (!kbd_led_triggers[i])
-                               continue;
-                       if ((triggers_enabled || level <= 0) &&
-                           (state.triggers & BIT(i)))
-                               buf[len++] = '+';
-                       else
-                               buf[len++] = '-';
-                       len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
-               }
-       }
-
-       if (kbd_als_supported) {
-               if (kbd_is_als_mode_bit(state.mode_bit))
-                       len += sprintf(buf+len, "+als ");
-               else
-                       len += sprintf(buf+len, "-als ");
-       }
-
-       if (len)
-               buf[len - 1] = '\n';
-
-       return len;
-}
-
-static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
-                  kbd_led_triggers_show, kbd_led_triggers_store);
-
-static ssize_t kbd_led_als_store(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct kbd_state state;
-       struct kbd_state new_state;
-       u8 setting;
-       int ret;
-
-       ret = kstrtou8(buf, 10, &setting);
-       if (ret)
-               return ret;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       new_state = state;
-       new_state.als_setting = setting;
-
-       ret = kbd_set_state_safe(&new_state, &state);
-       if (ret)
-               return ret;
-
-       return count;
-}
-
-static ssize_t kbd_led_als_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct kbd_state state;
-       int ret;
-
-       ret = kbd_get_state(&state);
-       if (ret)
-               return ret;
-
-       return sprintf(buf, "%d\n", state.als_setting);
-}
-
-static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
-                  kbd_led_als_show, kbd_led_als_store);
-
-static struct attribute *kbd_led_attrs[] = {
-       &dev_attr_stop_timeout.attr,
-       &dev_attr_start_triggers.attr,
-       &dev_attr_als_setting.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(kbd_led);
-
-static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
-{
-       int ret;
-       u16 num;
-       struct kbd_state state;
-
-       if (kbd_get_max_level()) {
-               ret = kbd_get_state(&state);
-               if (ret)
-                       return 0;
-               ret = kbd_get_level(&state);
-               if (ret < 0)
-                       return 0;
-               return ret;
-       }
-
-       if (kbd_get_valid_token_counts()) {
-               ret = kbd_get_first_active_token_bit();
-               if (ret < 0)
-                       return 0;
-               for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
-                       num &= num - 1; /* clear the first bit set */
-               if (num == 0)
-                       return 0;
-               return ffs(num) - 1;
-       }
-
-       pr_warn("Keyboard brightness level control not supported\n");
-       return 0;
-}
-
-static void kbd_led_level_set(struct led_classdev *led_cdev,
-                             enum led_brightness value)
-{
-       struct kbd_state state;
-       struct kbd_state new_state;
-       u16 num;
-
-       if (kbd_get_max_level()) {
-               if (kbd_get_state(&state))
-                       return;
-               new_state = state;
-               if (kbd_set_level(&new_state, value))
-                       return;
-               kbd_set_state_safe(&new_state, &state);
-               return;
-       }
-
-       if (kbd_get_valid_token_counts()) {
-               for (num = kbd_token_bits; num != 0 && value > 0; --value)
-                       num &= num - 1; /* clear the first bit set */
-               if (num == 0)
-                       return;
-               kbd_set_token_bit(ffs(num) - 1);
-               return;
-       }
-
-       pr_warn("Keyboard brightness level control not supported\n");
-}
-
-static struct led_classdev kbd_led = {
-       .name           = "dell::kbd_backlight",
-       .brightness_set = kbd_led_level_set,
-       .brightness_get = kbd_led_level_get,
-       .groups         = kbd_led_groups,
-};
-
-static int __init kbd_led_init(struct device *dev)
-{
-       kbd_init();
-       if (!kbd_led_present)
-               return -ENODEV;
-       kbd_led.max_brightness = kbd_get_max_level();
-       if (!kbd_led.max_brightness) {
-               kbd_led.max_brightness = kbd_get_valid_token_counts();
-               if (kbd_led.max_brightness)
-                       kbd_led.max_brightness--;
-       }
-       return led_classdev_register(dev, &kbd_led);
-}
-
-static void brightness_set_exit(struct led_classdev *led_cdev,
-                               enum led_brightness value)
-{
-       /* Don't change backlight level on exit */
-};
-
-static void kbd_led_exit(void)
-{
-       if (!kbd_led_present)
-               return;
-       kbd_led.brightness_set = brightness_set_exit;
-       led_classdev_unregister(&kbd_led);
-}
-
 static int __init dell_init(void)
 {
        int max_intensity = 0;
@@ -1879,8 +841,6 @@ static int __init dell_init(void)
        if (quirks && quirks->touchpad_led)
                touchpad_led_init(&platform_device->dev);
 
-       kbd_led_init(&platform_device->dev);
-
        dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
        if (dell_laptop_dir != NULL)
                debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -1948,7 +908,6 @@ static void __exit dell_exit(void)
        debugfs_remove_recursive(dell_laptop_dir);
        if (quirks && quirks->touchpad_led)
                touchpad_led_exit();
-       kbd_led_exit();
        i8042_remove_filter(dell_laptop_i8042_filter);
        cancel_delayed_work_sync(&dell_rfkill_work);
        backlight_device_unregister(dell_backlight_device);
@@ -1965,7 +924,5 @@ module_init(dell_init);
 module_exit(dell_exit);
 
 MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
 MODULE_DESCRIPTION("Dell laptop driver");
 MODULE_LICENSE("GPL");
index e225711bb8bc0009114d6ec0d95ab8d2bb2d68f3..9c48fb32f6601bf4065db65cd2a099bc57f3812d 100644 (file)
@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
 }
 EXPORT_SYMBOL_GPL(regulator_get_optional);
 
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
 static void _regulator_put(struct regulator *regulator)
 {
        struct regulator_dev *rdev;
@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
        /* remove any sysfs entries */
        if (regulator->dev)
                sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+       mutex_lock(&rdev->mutex);
        kfree(regulator->supply_name);
        list_del(&regulator->list);
        kfree(regulator);
 
        rdev->open_count--;
        rdev->exclusive = 0;
+       mutex_unlock(&rdev->mutex);
 
        module_put(rdev->owner);
 }
index 2809ae0d6bcd9848bd15cbc8d4d45e69df1601a3..ff828117798fd3f4775cd5cc6c8e86d8fbe33a00 100644 (file)
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;
        .enable_mask    = S2MPS14_ENABLE_MASK                   \
 }
 
+#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) {        \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPS13_BUCK##num,                    \
+       .ops            = &s2mps14_reg_ops,                     \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = min,                                  \
+       .uV_step        = step,                                 \
+       .linear_min_sel = min_sel,                              \
+       .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPS13_BUCK_RAMP_DELAY,              \
+       .vsel_reg       = S2MPS13_REG_B1OUT + (num) * 2 - 1,    \
+       .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPS13_REG_B1CTRL + (num - 1) * 2,   \
+       .enable_mask    = S2MPS14_ENABLE_MASK                   \
+}
+
+#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) {     \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPS13_BUCK##num,                    \
+       .ops            = &s2mps14_reg_ops,                     \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = min,                                  \
+       .uV_step        = step,                                 \
+       .linear_min_sel = min_sel,                              \
+       .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPS13_BUCK_RAMP_DELAY,              \
+       .vsel_reg       = S2MPS13_REG_B1OUT + (num) * 2 - 1,    \
+       .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPS13_REG_B1CTRL + (num) * 2 - 1,   \
+       .enable_mask    = S2MPS14_ENABLE_MASK                   \
+}
+
 static const struct regulator_desc s2mps13_regulators[] = {
        regulator_desc_s2mps13_ldo(1,  MIN_800_MV,  STEP_12_5_MV, 0x00),
        regulator_desc_s2mps13_ldo(2,  MIN_1400_MV, STEP_50_MV,   0x0C),
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {
        regulator_desc_s2mps13_buck(4,  MIN_500_MV,  STEP_6_25_MV, 0x10),
        regulator_desc_s2mps13_buck(5,  MIN_500_MV,  STEP_6_25_MV, 0x10),
        regulator_desc_s2mps13_buck(6,  MIN_500_MV,  STEP_6_25_MV, 0x10),
-       regulator_desc_s2mps13_buck(7,  MIN_500_MV,  STEP_6_25_MV, 0x10),
-       regulator_desc_s2mps13_buck(8,  MIN_1000_MV, STEP_12_5_MV, 0x20),
-       regulator_desc_s2mps13_buck(9,  MIN_1000_MV, STEP_12_5_MV, 0x20),
-       regulator_desc_s2mps13_buck(10, MIN_500_MV,  STEP_6_25_MV, 0x10),
+       regulator_desc_s2mps13_buck7(7,  MIN_500_MV,  STEP_6_25_MV, 0x10),
+       regulator_desc_s2mps13_buck8_10(8,  MIN_1000_MV, STEP_12_5_MV, 0x20),
+       regulator_desc_s2mps13_buck8_10(9,  MIN_1000_MV, STEP_12_5_MV, 0x20),
+       regulator_desc_s2mps13_buck8_10(10, MIN_500_MV,  STEP_6_25_MV, 0x10),
 };
 
 static int s2mps14_regulator_enable(struct regulator_dev *rdev)
index b5e7c4670205ba15e82cba6f2325d5cd5b994bcc..89ac1d5083c66093b63caa973e37ff4897a1bb6b 100644 (file)
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
 static const struct platform_device_id s5m_rtc_id[] = {
        { "s5m-rtc",            S5M8767X },
        { "s2mps14-rtc",        S2MPS14X },
+       { },
 };
 
 static struct platform_driver s5m_rtc_driver = {
index f407e3763432648f3271a80e2d432b7da8736c65..642c77c76b8432d07534e393335353cfbc27233a 100644 (file)
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
        QETH_DBF_TEXT(SETUP, 2, "idxanswr");
        card = CARD_FROM_CDEV(channel->ccwdev);
        iob = qeth_get_buffer(channel);
+       if (!iob)
+               return -ENOMEM;
        iob->callback = idx_reply_cb;
        memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
        channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
        QETH_DBF_TEXT(SETUP, 2, "idxactch");
 
        iob = qeth_get_buffer(channel);
+       if (!iob)
+               return -ENOMEM;
        iob->callback = idx_reply_cb;
        memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
        channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
 }
 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
 
+/**
+ * qeth_send_control_data() -  send control command to the card
+ * @card:                      qeth_card structure pointer
+ * @len:                       size of the command buffer
+ * @iob:                       qeth_cmd_buffer pointer
+ * @reply_cb:                  callback function pointer
+ * @cb_card:                   pointer to the qeth_card structure
+ * @cb_reply:                  pointer to the qeth_reply structure
+ * @cb_cmd:                    pointer to the original iob for non-IPA
+ *                             commands, or to the qeth_ipa_cmd structure
+ *                             for the IPA commands.
+ * @reply_param:               private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
 int qeth_send_control_data(struct qeth_card *card, int len,
                struct qeth_cmd_buffer *iob,
-               int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
-                       unsigned long),
+               int (*reply_cb)(struct qeth_card *cb_card,
+                               struct qeth_reply *cb_reply,
+                               unsigned long cb_cmd),
                void *reply_param)
 {
        int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
 
-       iob = qeth_wait_for_buffer(&card->write);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+       iob = qeth_get_buffer(&card->write);
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+       } else {
+               dev_warn(&card->gdev->dev,
+                        "The qeth driver ran out of channel command buffers\n");
+               QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+                                dev_name(&card->gdev->dev));
+       }
 
        return iob;
 }
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 }
 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
 
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
                int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
                        unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
        QETH_DBF_TEXT(SETUP, 2, "strtlan");
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
        return rc;
 }
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
                                     QETH_PROT_IPV4);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
-       cmd->data.setadapterparms.hdr.command_code = command;
-       cmd->data.setadapterparms.hdr.used_total = 1;
-       cmd->data.setadapterparms.hdr.seq_no = 1;
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+               cmd->data.setadapterparms.hdr.command_code = command;
+               cmd->data.setadapterparms.hdr.used_total = 1;
+               cmd->data.setadapterparms.hdr.seq_no = 1;
+       }
 
        return iob;
 }
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
        QETH_CARD_TEXT(card, 3, "queryadp");
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
                                   sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
        return rc;
 }
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
 
        QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
        return rc;
 }
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
                return -ENOMEDIUM;
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
                                sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       if (!iob)
+               return -ENOMEM;
        return qeth_send_ipa_cmd(card, iob,
                                qeth_query_switch_attributes_cb, sw_info);
 }
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
 
        QETH_DBF_TEXT(SETUP, 2, "qdiagass");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 16;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
 
        QETH_DBF_TEXT(SETUP, 2, "diagtrap");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 80;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
                        sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return;
        cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.mode = mode;
        qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
                                   sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
        cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
                                   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
                                   sizeof(struct qeth_set_access_ctrl));
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
        access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
                                   QETH_SNMP_SETADP_CMDLENGTH + req_len);
+       if (!iob) {
+               rc = -ENOMEM;
+               goto out;
+       }
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
        rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
                if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
                        rc = -EFAULT;
        }
-
+out:
        kfree(ureq);
        kfree(qinfo.udata);
        return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
                                   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
                                   sizeof(struct qeth_query_oat));
+       if (!iob) {
+               rc = -ENOMEM;
+               goto out_free;
+       }
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        oat_req = &cmd->data.setadapterparms.data.query_oat;
        oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
                return -EOPNOTSUPP;
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
                sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       if (!iob)
+               return -ENOMEM;
        return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
                                        (void *)carrier_info);
 }
@@ -5060,11 +5133,23 @@ retriable:
        card->options.adp.supported_funcs = 0;
        card->options.sbp.supported_funcs = 0;
        card->info.diagass_support = 0;
-       qeth_query_ipassists(card, QETH_PROT_IPV4);
-       if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
-               qeth_query_setadapterparms(card);
-       if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
-               qeth_query_setdiagass(card);
+       rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+       if (rc == -ENOMEM)
+               goto out;
+       if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+               rc = qeth_query_setadapterparms(card);
+               if (rc < 0) {
+                       QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+                       goto out;
+               }
+       }
+       if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+               rc = qeth_query_setdiagass(card);
+               if (rc < 0) {
+                       QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+                       goto out;
+               }
+       }
        return 0;
 out:
        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
index d02cd1a679432fc7ef485295d308bac046f96df9..ce87ae72edbd62caae02d88035c991b6909d0bfd 100644 (file)
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
 static int qeth_l2_stop(struct net_device *);
 static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
 static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
-                          enum qeth_ipa_cmds,
-                          int (*reply_cb) (struct qeth_card *,
-                                           struct qeth_reply*,
-                                           unsigned long));
+                          enum qeth_ipa_cmds);
 static void qeth_l2_set_multicast_list(struct net_device *);
 static int qeth_l2_recover(void *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
        return ndev;
 }
 
-static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
-                               struct qeth_reply *reply,
-                               unsigned long data)
+static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
 {
-       struct qeth_ipa_cmd *cmd;
-       __u8 *mac;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Sgmacb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       mac = &cmd->data.setdelmac.mac[0];
-       /* MAC already registered, needed in couple/uncouple case */
-       if (cmd->hdr.return_code ==  IPA_RC_L2_DUP_MAC) {
-               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
-                         mac, QETH_CARD_IFNAME(card));
-               cmd->hdr.return_code = 0;
+       if (retcode)
+               QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+       switch (retcode) {
+       case IPA_RC_SUCCESS:
+               rc = 0;
+               break;
+       case IPA_RC_L2_UNSUPPORTED_CMD:
+               rc = -ENOSYS;
+               break;
+       case IPA_RC_L2_ADDR_TABLE_FULL:
+               rc = -ENOSPC;
+               break;
+       case IPA_RC_L2_DUP_MAC:
+       case IPA_RC_L2_DUP_LAYER3_MAC:
+               rc = -EEXIST;
+               break;
+       case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+       case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+               rc = -EPERM;
+               break;
+       case IPA_RC_L2_MAC_NOT_FOUND:
+               rc = -ENOENT;
+               break;
+       case -ENOMEM:
+               rc = -ENOMEM;
+               break;
+       default:
+               rc = -EIO;
+               break;
        }
-       if (cmd->hdr.return_code)
-               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
-                         mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
 {
-       QETH_CARD_TEXT(card, 2, "L2Sgmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
-                                         qeth_l2_send_setgroupmac_cb);
-}
-
-static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
-                               struct qeth_reply *reply,
-                               unsigned long data)
-{
-       struct qeth_ipa_cmd *cmd;
-       __u8 *mac;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Dgmacb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       mac = &cmd->data.setdelmac.mac[0];
-       if (cmd->hdr.return_code)
-               QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
-                         mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
-       return 0;
+       QETH_CARD_TEXT(card, 2, "L2Sgmac");
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_SETGMAC));
+       if (rc == -EEXIST)
+               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
+                       mac, QETH_CARD_IFNAME(card));
+       else if (rc)
+               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
+                       mac, QETH_CARD_IFNAME(card), rc);
+       return rc;
 }
 
 static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
 {
+       int rc;
+
        QETH_CARD_TEXT(card, 2, "L2Dgmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
-                                         qeth_l2_send_delgroupmac_cb);
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_DELGMAC));
+       if (rc)
+               QETH_DBF_MESSAGE(2,
+                       "Could not delete group MAC %pM on %s: %d\n",
+                       mac, QETH_CARD_IFNAME(card), rc);
+       return rc;
 }
 
 static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
        mc->is_vmac = vmac;
 
        if (vmac) {
-               rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
-                                       NULL);
+               rc = qeth_setdel_makerc(card,
+                       qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
        } else {
-               rc = qeth_l2_send_setgroupmac(card, mac);
+               rc = qeth_setdel_makerc(card,
+                       qeth_l2_send_setgroupmac(card, mac));
        }
 
        if (!rc)
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
                if (del) {
                        if (mc->is_vmac)
                                qeth_l2_send_setdelmac(card, mc->mc_addr,
-                                       IPA_CMD_DELVMAC, NULL);
+                                       IPA_CMD_DELVMAC);
                        else
                                qeth_l2_send_delgroupmac(card, mc->mc_addr);
                }
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
 
        QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
        iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setdelvlan.vlan_id = i;
        return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_vlan_vid *id;
+       int rc;
 
        QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
        if (!vid)
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
        id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
        if (id) {
                id->vid = vid;
-               qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+               rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+               if (rc) {
+                       kfree(id);
+                       return rc;
+               }
                spin_lock_bh(&card->vlanlock);
                list_add_tail(&id->list, &card->vid_list);
                spin_unlock_bh(&card->vlanlock);
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
 {
        struct qeth_vlan_vid *id, *tmpid = NULL;
        struct qeth_card *card = dev->ml_priv;
+       int rc = 0;
 
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
        }
        spin_unlock_bh(&card->vlanlock);
        if (tmpid) {
-               qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+               rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
                kfree(tmpid);
        }
        qeth_l2_set_multicast_list(card->dev);
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +560,62 @@ out:
 }
 
 static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
-                          enum qeth_ipa_cmds ipacmd,
-                          int (*reply_cb) (struct qeth_card *,
-                                           struct qeth_reply*,
-                                           unsigned long))
+                          enum qeth_ipa_cmds ipacmd)
 {
        struct qeth_ipa_cmd *cmd;
        struct qeth_cmd_buffer *iob;
 
        QETH_CARD_TEXT(card, 2, "L2sdmac");
        iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
        memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
-       return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+       return qeth_send_ipa_cmd(card, iob, NULL, NULL);
 }
 
-static int qeth_l2_send_setmac_cb(struct qeth_card *card,
-                          struct qeth_reply *reply,
-                          unsigned long data)
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 {
-       struct qeth_ipa_cmd *cmd;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Smaccb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       if (cmd->hdr.return_code) {
-               QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+       QETH_CARD_TEXT(card, 2, "L2Setmac");
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_SETVMAC));
+       if (rc == 0) {
+               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+               memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+               dev_info(&card->gdev->dev,
+                       "MAC address %pM successfully registered on device %s\n",
+                       card->dev->dev_addr, card->dev->name);
+       } else {
                card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-               switch (cmd->hdr.return_code) {
-               case IPA_RC_L2_DUP_MAC:
-               case IPA_RC_L2_DUP_LAYER3_MAC:
+               switch (rc) {
+               case -EEXIST:
                        dev_warn(&card->gdev->dev,
-                               "MAC address %pM already exists\n",
-                               cmd->data.setdelmac.mac);
+                               "MAC address %pM already exists\n", mac);
                        break;
-               case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
-               case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+               case -EPERM:
                        dev_warn(&card->gdev->dev,
-                               "MAC address %pM is not authorized\n",
-                               cmd->data.setdelmac.mac);
-                       break;
-               default:
+                               "MAC address %pM is not authorized\n", mac);
                        break;
                }
-       } else {
-               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-               memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
-                      OSA_ADDR_LEN);
-               dev_info(&card->gdev->dev,
-                       "MAC address %pM successfully registered on device %s\n",
-                       card->dev->dev_addr, card->dev->name);
-       }
-       return 0;
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
-       QETH_CARD_TEXT(card, 2, "L2Setmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
-                                         qeth_l2_send_setmac_cb);
-}
-
-static int qeth_l2_send_delmac_cb(struct qeth_card *card,
-                          struct qeth_reply *reply,
-                          unsigned long data)
-{
-       struct qeth_ipa_cmd *cmd;
-
-       QETH_CARD_TEXT(card, 2, "L2Dmaccb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       if (cmd->hdr.return_code) {
-               QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
-               return 0;
        }
-       card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
 {
+       int rc;
+
        QETH_CARD_TEXT(card, 2, "L2Delmac");
        if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
                return 0;
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
-                                         qeth_l2_send_delmac_cb);
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_DELVMAC));
+       if (rc == 0)
+               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+       return rc;
 }
 
 static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
                if (rc) {
                        QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
                                "device %s: x%x\n", CARD_BUS_ID(card), rc);
-                       QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
                        return rc;
                }
                QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
        rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
-       if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
+       if (!rc || (rc == -ENOENT))
                rc = qeth_l2_send_setmac(card, addr->sa_data);
        return rc ? -EINVAL : 0;
 }
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        recover_flag = card->state;
        rc = qeth_core_hardsetup_card(card);
        if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 2, "brqsuppo");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength =
                sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
                return -EOPNOTSUPP;
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength =
                sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        if (rc)
                return rc;
        rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
-       if (rc)
-               return rc;
-       return 0;
+       return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
 
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
        if (!(card->options.sbp.supported_funcs & setcmd))
                return -EOPNOTSUPP;
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength = cmdlength;
        cmd->data.sbp.hdr.command_code = setcmd;
index 625227ad16ee91cd2b4ca1aaa8a9541408e2a838..e2a0ee845399d64d48a5dfdb813161feeb9e58e2 100644 (file)
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
        QETH_CARD_TEXT(card, 4, "setdelmc");
 
        iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
        if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
        QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
 
        iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        if (addr->proto == QETH_PROT_IPV6) {
                memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "setroutg");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setrtg.type = (type);
        rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
        QETH_CARD_TEXT(card, 4, "getasscm");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
 
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setassparms.hdr.assist_no = ipa_func;
-       cmd->data.setassparms.hdr.length = 8 + len;
-       cmd->data.setassparms.hdr.command_code = cmd_code;
-       cmd->data.setassparms.hdr.return_code = 0;
-       cmd->data.setassparms.hdr.seq_no = 0;
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               cmd->data.setassparms.hdr.assist_no = ipa_func;
+               cmd->data.setassparms.hdr.length = 8 + len;
+               cmd->data.setassparms.hdr.command_code = cmd_code;
+               cmd->data.setassparms.hdr.return_code = 0;
+               cmd->data.setassparms.hdr.seq_no = 0;
+       }
 
        return iob;
 }
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
        QETH_CARD_TEXT(card, 4, "simassp6");
        iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
                                       0, QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob, 0, 0,
                                   qeth_l3_default_setassparms_cb, NULL);
        return rc;
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
                length = sizeof(__u32);
        iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
                                       length, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob, length, data,
                                   qeth_l3_default_setassparms_cb, NULL);
        return rc;
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
                                     QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
                        card->info.unique_id;
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
                                     QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
                        card->info.unique_id;
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
        QETH_DBF_TEXT(SETUP, 2, "diagtrac");
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 16;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
                        IPA_CMD_ASS_ARP_QUERY_INFO,
                        sizeof(struct qeth_arp_query_data) - sizeof(char),
                        prot);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
        cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
                                       IPA_CMD_ASS_ARP_ADD_ENTRY,
                                       sizeof(struct qeth_arp_cache_entry),
                                       QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob,
                                   sizeof(struct qeth_arp_cache_entry),
                                   (unsigned long) entry,
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
                                       IPA_CMD_ASS_ARP_REMOVE_ENTRY,
                                       12,
                                       QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob,
                                   12, (unsigned long)buf,
                                   qeth_l3_default_setassparms_cb, NULL);
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
 
 static int qeth_l3_setup_netdev(struct qeth_card *card)
 {
+       int rc;
+
        if (card->info.type == QETH_CARD_TYPE_OSD ||
            card->info.type == QETH_CARD_TYPE_OSX) {
                if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                        return -ENODEV;
                card->dev->flags |= IFF_NOARP;
                card->dev->netdev_ops = &qeth_l3_netdev_ops;
-               qeth_l3_iqd_read_initial_mac(card);
+               rc = qeth_l3_iqd_read_initial_mac(card);
+               if (rc)
+                       return rc;
                if (card->options.hsuid[0])
                        memcpy(card->dev->perm_addr, card->options.hsuid, 9);
        } else
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        recover_flag = card->state;
        rc = qeth_core_hardsetup_card(card);
        if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 contin:
        rc = qeth_l3_setadapter_parms(card);
        if (rc)
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
        if (!card->options.sniffer) {
                rc = qeth_l3_start_ipassists(card);
                if (rc) {
@@ -3410,10 +3438,10 @@ contin:
                }
                rc = qeth_l3_setrouting_v4(card);
                if (rc)
-                       QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
                rc = qeth_l3_setrouting_v6(card);
                if (rc)
-                       QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
        }
        netif_tx_disable(card->dev);
 
index df4e27cd996a3c68cf095e40d7a6ccf7ebf6a065..9219953ee949a9dfaf1ff0f044e41a3e5c13adc7 100644 (file)
@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
        ipr_reinit_ipr_cmnd(ipr_cmd);
        ipr_cmd->u.scratch = 0;
        ipr_cmd->sibling = NULL;
+       ipr_cmd->eh_comp = NULL;
        ipr_cmd->fast_done = fast_done;
        init_timer(&ipr_cmd->timer);
 }
@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
        scsi_cmd->scsi_done(scsi_cmd);
+       if (ipr_cmd->eh_comp)
+               complete(ipr_cmd->eh_comp);
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 }
 
@@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
        return rc;
 }
 
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd:   ipr command struct
+ * @device:            device to match (sdev)
+ *
+ * Returns:
+ *     1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+       if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+               return 1;
+       return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd:   ipr command struct
+ * @device:            device to match (sdev)
+ * @match:             match function to use
+ *
+ * Returns:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+                           int (*match)(struct ipr_cmnd *, void *))
+{
+       struct ipr_cmnd *ipr_cmd;
+       int wait;
+       unsigned long flags;
+       struct ipr_hrr_queue *hrrq;
+       signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+       DECLARE_COMPLETION_ONSTACK(comp);
+
+       ENTER;
+       do {
+               wait = 0;
+
+               for_each_hrrq(hrrq, ioa_cfg) {
+                       spin_lock_irqsave(hrrq->lock, flags);
+                       list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                               if (match(ipr_cmd, device)) {
+                                       ipr_cmd->eh_comp = &comp;
+                                       wait++;
+                               }
+                       }
+                       spin_unlock_irqrestore(hrrq->lock, flags);
+               }
+
+               if (wait) {
+                       timeout = wait_for_completion_timeout(&comp, timeout);
+
+                       if (!timeout) {
+                               wait = 0;
+
+                               for_each_hrrq(hrrq, ioa_cfg) {
+                                       spin_lock_irqsave(hrrq->lock, flags);
+                                       list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                                               if (match(ipr_cmd, device)) {
+                                                       ipr_cmd->eh_comp = NULL;
+                                                       wait++;
+                                               }
+                                       }
+                                       spin_unlock_irqrestore(hrrq->lock, flags);
+                               }
+
+                               if (wait)
+                                       dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+                               LEAVE;
+                               return wait ? FAILED : SUCCESS;
+                       }
+               }
+       } while (wait);
+
+       LEAVE;
+       return SUCCESS;
+}
+
 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg;
@@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
 {
        int rc;
+       struct ipr_ioa_cfg *ioa_cfg;
+
+       ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
 
        spin_lock_irq(cmd->device->host->host_lock);
        rc = __ipr_eh_dev_reset(cmd);
        spin_unlock_irq(cmd->device->host->host_lock);
 
+       if (rc == SUCCESS)
+               rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
        return rc;
 }
 
@@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
 {
        unsigned long flags;
        int rc;
+       struct ipr_ioa_cfg *ioa_cfg;
 
        ENTER;
 
+       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
        spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
        rc = ipr_cancel_op(scsi_cmd);
        spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
 
+       if (rc == SUCCESS)
+               rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
        LEAVE;
        return rc;
 }
index b4f3eec51bc9b19783f931116f63aa70bc2d235a..ec03b42fa2b9fe0f388bc266fd75e8190460f682 100644 (file)
@@ -1606,6 +1606,7 @@ struct ipr_cmnd {
                struct scsi_device *sdev;
        } u;
 
+       struct completion *eh_comp;
        struct ipr_hrr_queue *hrrq;
        struct ipr_ioa_cfg *ioa_cfg;
 };
index e02885451425dbd4af6272ce53d1b2b54f809a43..9b3829931f40d95c2cdd4e673c33a56b510edc9b 100644 (file)
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
                return -ENXIO;
        if (!get_device(&sdev->sdev_gendev))
                return -ENXIO;
-       /* We can fail this if we're doing SCSI operations
+       /* We can fail try_module_get if we're doing SCSI operations
         * from module exit (like cache flush) */
-       try_module_get(sdev->host->hostt->module);
+       __module_get(sdev->host->hostt->module);
 
        return 0;
 }
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
-#ifdef CONFIG_MODULE_UNLOAD
-       struct module *module = sdev->host->hostt->module;
-
-       /* The module refcount will be zero if scsi_device_get()
-        * was called from a module removal routine */
-       if (module && module_refcount(module) != 0)
-               module_put(module);
-#endif
+       module_put(sdev->host->hostt->module);
        put_device(&sdev->sdev_gendev);
 }
 EXPORT_SYMBOL(scsi_device_put);
index 7b8b51bc29b4353debc9bd9911fcbf939753c782..4aca1b0378c2458212945a3877b303062ea529ca 100644 (file)
@@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        req_opcode = cmd[3];
        req_sa = get_unaligned_be16(cmd + 4);
        alloc_len = get_unaligned_be32(cmd + 6);
-       if (alloc_len < 4 && alloc_len > 0xffff) {
+       if (alloc_len < 4 || alloc_len > 0xffff) {
                mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
                return check_condition_result;
        }
@@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
                a_len = 8192;
        else
                a_len = alloc_len;
-       arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
+       arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
        if (NULL == arr) {
                mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
                                INSUFF_RES_ASCQ);
index 6d5c0b8cb0bb47a040d7aec986dbcbae68ffa2b5..17bb541f7cc259a8a3f52c65ec1cae5636e87f65 100644 (file)
@@ -1143,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd)
                struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
                int ivecs, count;
 
-               BUG_ON(prot_sdb == NULL);
+               if (prot_sdb == NULL) {
+                       /*
+                        * This can happen if someone (e.g. multipath)
+                        * queues a command to a device on an adapter
+                        * that does not support DIX.
+                        */
+                       WARN_ON_ONCE(1);
+                       error = BLKPREP_KILL;
+                       goto err_exit;
+               }
+
                ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
 
                if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
index 7281316a5ecba79203dc7351b280ab25cd5f2d11..a67d37c7e3c00f9518694f267aa75efd5c73398a 100644 (file)
@@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
        iounmap(clk_reg);
 
        dws->num_cs = 16;
-       dws->fifo_len = 40;     /* FIFO has 40 words buffer */
 
 #ifdef CONFIG_SPI_DW_MID_DMA
        dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
index d0d5542efc06db7a74b46a6a7230a4ce65ba53d5..8edcd1b84562109799281fb48867df8ce73ac7b2 100644 (file)
@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
        if (!dws->fifo_len) {
                u32 fifo;
 
-               for (fifo = 2; fifo <= 257; fifo++) {
+               for (fifo = 2; fifo <= 256; fifo++) {
                        dw_writew(dws, DW_SPI_TXFLTR, fifo);
                        if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
                                break;
                }
 
-               dws->fifo_len = (fifo == 257) ? 0 : fifo;
+               dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
                dw_writew(dws, DW_SPI_TXFLTR, 0);
        }
 }
@@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        if (dws->dma_ops && dws->dma_ops->dma_init) {
                ret = dws->dma_ops->dma_init(dws);
                if (ret) {
-                       dev_warn(&master->dev, "DMA init failed\n");
+                       dev_warn(dev, "DMA init failed\n");
                        dws->dma_inited = 0;
                }
        }
index 05c623cfb078d6503bd6d501cc1e821f07784d9e..23822e7df6c1c6e1e2caa18ea19cfcb069c3796d 100644 (file)
@@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data)
                        cs_deassert(drv_data);
        }
 
-       spi_finalize_current_message(drv_data->master);
        drv_data->cur_chip = NULL;
+       spi_finalize_current_message(drv_data->master);
 }
 
 static void reset_sccr1(struct driver_data *drv_data)
index 96a5fc0878d86d4fc217b30d466621176b1bc24f..3ab7a21445fc253406eaf92abb87ce99974ce828 100644 (file)
@@ -82,7 +82,7 @@ struct sh_msiof_spi_priv {
 #define MDR1_SYNCMD_LR  0x30000000 /*   L/R mode */
 #define MDR1_SYNCAC_SHIFT       25 /* Sync Polarity (1 = Active-low) */
 #define MDR1_BITLSB_SHIFT       24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_FLD_MASK   0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_MASK   0x0000000c /* Frame Sync Signal Interval (0-3) */
 #define MDR1_FLD_SHIFT           2
 #define MDR1_XXSTP      0x00000001 /* Transmission/Reception Stop on FIFO */
 /* TMDR1 */
index 81784c6f7b8872ed7a0b45aaea1ec0124633db17..77d8753f6ba40df0099dee88b05116f1684e6e93 100644 (file)
@@ -1,6 +1,7 @@
 config VIDEO_TLG2300
        tristate "Telegent TLG2300 USB video capture support (Deprecated)"
        depends on VIDEO_DEV && I2C && SND && DVB_CORE
+       depends on MEDIA_USB_SUPPORT
        select VIDEO_TUNER
        select VIDEO_TVEEPROM
        depends on RC_CORE
index 5927c0a98a74b29cddac4d7894a71580590b7ff2..bcfd2a22208f34b621faf6922625e9e9fec83832 100644 (file)
@@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = {
        .shutdown       = cdns_wdt_shutdown,
        .driver         = {
                .name   = "cdns-wdt",
-               .owner  = THIS_MODULE,
                .of_match_table = cdns_wdt_of_match,
                .pm     = &cdns_wdt_pm_ops,
        },
index d6add516a7a7635662e5c1cb12a10206b93c5544..5142bbabe0279f0b36c92c854f7daa407e2e32a5 100644 (file)
@@ -52,6 +52,8 @@
 #define IMX2_WDT_WRSR          0x04            /* Reset Status Register */
 #define IMX2_WDT_WRSR_TOUT     (1 << 1)        /* -> Reset due to Timeout */
 
+#define IMX2_WDT_WMCR          0x08            /* Misc Register */
+
 #define IMX2_WDT_MAX_TIME      128
 #define IMX2_WDT_DEFAULT_TIME  60              /* in seconds */
 
@@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
 
        imx2_wdt_ping_if_active(wdog);
 
+       /*
+        * Disable the watchdog power down counter at boot. Otherwise the power
+        * down counter will pull down the #WDOG interrupt line for one clock
+        * cycle.
+        */
+       regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
+
        ret = watchdog_register_device(wdog);
        if (ret) {
                dev_err(&pdev->dev, "cannot register watchdog device\n");
@@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM_SLEEP
-/* Disable watchdog if it is active during suspend */
+/* Disable watchdog if it is active or non-active but still running */
 static int imx2_wdt_suspend(struct device *dev)
 {
        struct watchdog_device *wdog = dev_get_drvdata(dev);
        struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
 
-       imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
-       imx2_wdt_ping(wdog);
+       /* The watchdog IP block is running */
+       if (imx2_wdt_is_running(wdev)) {
+               imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+               imx2_wdt_ping(wdog);
 
-       /* Watchdog has been stopped but IP block is still running */
-       if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
-               del_timer_sync(&wdev->timer);
+               /* The watchdog is not active */
+               if (!watchdog_active(wdog))
+                       del_timer_sync(&wdev->timer);
+       }
 
        clk_disable_unprepare(wdev->clk);
 
@@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev)
        clk_prepare_enable(wdev->clk);
 
        if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
-               /* Resumes from deep sleep we need restart
-                * the watchdog again.
+               /*
+                * If the watchdog is still active and resumes
+                * from deep sleep state, need to restart the
+                * watchdog again.
                 */
                imx2_wdt_setup(wdog);
                imx2_wdt_set_timeout(wdog, wdog->timeout);
                imx2_wdt_ping(wdog);
        } else if (imx2_wdt_is_running(wdev)) {
+               /* Resuming from non-deep sleep state. */
+               imx2_wdt_set_timeout(wdog, wdog->timeout);
                imx2_wdt_ping(wdog);
-               mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+               /*
+                * But the watchdog is not active, then start
+                * the timer again.
+                */
+               if (!watchdog_active(wdog))
+                       mod_timer(&wdev->timer,
+                                 jiffies + wdog->timeout * HZ / 2);
        }
 
        return 0;
index ef6a298e8c45833843097d51499333ff9a4ecfef..1f4155ee3404de97e4eee3988dc7823c9874ecbd 100644 (file)
@@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = {
        .remove         = meson_wdt_remove,
        .shutdown       = meson_wdt_shutdown,
        .driver         = {
-               .owner          = THIS_MODULE,
                .name           = DRV_NAME,
                .of_match_table = meson_wdt_dt_ids,
        },
index 7e607416755a880fef1a06d3a8a3482417c0b364..0b180708bf79d87a36c9dcc78bbd6d72772101df 100644 (file)
@@ -1171,6 +1171,7 @@ struct btrfs_space_info {
        struct percpu_counter total_bytes_pinned;
 
        struct list_head list;
+       /* Protected by the spinlock 'lock'. */
        struct list_head ro_bgs;
 
        struct rw_semaphore groups_sem;
index 15116585e7142d3865d822828011ee1ac38f0519..a684086c3c8123702cc41caa4d4dfe085aa7db3b 100644 (file)
@@ -9422,7 +9422,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
         * are still on the list after taking the semaphore
         */
        list_del_init(&block_group->list);
-       list_del_init(&block_group->ro_list);
        if (list_empty(&block_group->space_info->block_groups[index])) {
                kobj = block_group->space_info->block_group_kobjs[index];
                block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9464,6 +9463,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        btrfs_remove_free_space_cache(block_group);
 
        spin_lock(&block_group->space_info->lock);
+       list_del_init(&block_group->ro_list);
        block_group->space_info->total_bytes -= block_group->key.offset;
        block_group->space_info->bytes_readonly -= block_group->key.offset;
        block_group->space_info->disk_total -= block_group->key.offset * factor;
index 4ebabd2371533788c496070122def464004328ac..790dbae3343c4f965eaa58e317c08d702ad0a7ff 100644 (file)
@@ -2190,7 +2190,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
 
                next = next_state(state);
 
-               failrec = (struct io_failure_record *)state->private;
+               failrec = (struct io_failure_record *)(unsigned long)state->private;
                free_extent_state(state);
                kfree(failrec);
 
index 9e1569ffbf6ea66f1324022db9f9aba559339c73..2f0fbc374e876f90ee1e07ecfe5704bd4021c570 100644 (file)
@@ -3053,7 +3053,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
 
        ppath = btrfs_alloc_path();
        if (!ppath) {
-               btrfs_free_path(ppath);
+               btrfs_free_path(path);
                return -ENOMEM;
        }
 
index 60f7cbe815e9c88362a1680f8e4259f51ca6f019..6f49b2872a6454330bac0ef912be3d0152e2ef4f 100644 (file)
@@ -1000,10 +1000,20 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
                         */
                        if (fs_info->pending_changes == 0)
                                return 0;
+                       /*
+                        * A non-blocking test if the fs is frozen. We must not
+                        * start a new transaction here otherwise a deadlock
+                        * happens. The pending operations are delayed to the
+                        * next commit after thawing.
+                        */
+                       if (__sb_start_write(sb, SB_FREEZE_WRITE, false))
+                               __sb_end_write(sb, SB_FREEZE_WRITE);
+                       else
+                               return 0;
                        trans = btrfs_start_transaction(root, 0);
-               } else {
-                       return PTR_ERR(trans);
                }
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
        }
        return btrfs_commit_transaction(trans, root);
 }
index a605d4e2f2bca98e14c430c31514588c1b7982f1..e88b59d13439690f15810359ee7be343ad86b7a9 100644 (file)
@@ -2118,7 +2118,7 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
        unsigned long prev;
        unsigned long bit;
 
-       prev = cmpxchg(&fs_info->pending_changes, 0, 0);
+       prev = xchg(&fs_info->pending_changes, 0);
        if (!prev)
                return;
 
index 45cb59bcc79188df595447ab93b56f9cf1a6f080..8b7898b7670f88c3ea9ec596129ef569eed183bd 100644 (file)
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
        }
 
        src_inode = file_inode(src_file.file);
+       rc = -EINVAL;
+       if (S_ISDIR(src_inode->i_mode))
+               goto out_fput;
 
        /*
         * Note: cifs case is easier than btrfs since server responsible for
         * checks for proper open modes and file type and if it wants
         * server could even support copy of range where source = target
         */
-
-       /* so we do not deadlock racing two ioctls on same files */
-       if (target_inode < src_inode) {
-               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
-               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
-       } else {
-               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
-               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
-       }
+       lock_two_nondirectories(target_inode, src_inode);
 
        /* determine range to clone */
        rc = -EINVAL;
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
 out_unlock:
        /* although unlocking in the reverse order from locking is not
           strictly necessary here it is a little cleaner to be consistent */
-       if (target_inode < src_inode) {
-               mutex_unlock(&src_inode->i_mutex);
-               mutex_unlock(&target_inode->i_mutex);
-       } else {
-               mutex_unlock(&target_inode->i_mutex);
-               mutex_unlock(&src_inode->i_mutex);
-       }
+       unlock_two_nondirectories(src_inode, target_inode);
 out_fput:
        fdput(src_file);
 out_drop_write:
index 1ea1b702fec2c49f5ca8d7443df6933ec88da175..d4110d5caa3efde4e0b44402ed8d4d8a00aed6cc 100644 (file)
@@ -7,14 +7,14 @@
 
 #include <dt-bindings/interrupt-controller/irq.h>
 
-/* interrupt specific cell 0 */
+/* interrupt specifier cell 0 */
 
 #define GIC_SPI 0
 #define GIC_PPI 1
 
 /*
  * Interrupt specifier cell 2.
- * The flaggs in irq.h are valid, plus those below.
+ * The flags in irq.h are valid, plus those below.
  */
 #define GIC_CPU_MASK_RAW(x) ((x) << 8)
 #define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1)
index ce5dda8958fe83af981a19669662fe808581c261..b1fd675fa36f36a7ec5b68d2ba9d76684ad3bcfd 100644 (file)
@@ -59,6 +59,7 @@ enum s2mps13_reg {
        S2MPS13_REG_B6CTRL,
        S2MPS13_REG_B6OUT,
        S2MPS13_REG_B7CTRL,
+       S2MPS13_REG_B7SW,
        S2MPS13_REG_B7OUT,
        S2MPS13_REG_B8CTRL,
        S2MPS13_REG_B8OUT,
@@ -102,6 +103,7 @@ enum s2mps13_reg {
        S2MPS13_REG_L26CTRL,
        S2MPS13_REG_L27CTRL,
        S2MPS13_REG_L28CTRL,
+       S2MPS13_REG_L29CTRL,
        S2MPS13_REG_L30CTRL,
        S2MPS13_REG_L31CTRL,
        S2MPS13_REG_L32CTRL,
index ebfb0e153c6a78de5cc26162b2f7e904a9890fe1..b653d7c0a05a0abbaf5e1b4759a3f79b398c8b25 100644 (file)
@@ -444,7 +444,7 @@ extern void __module_put_and_exit(struct module *mod, long code)
 #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
 
 #ifdef CONFIG_MODULE_UNLOAD
-unsigned long module_refcount(struct module *mod);
+int module_refcount(struct module *mod);
 void __symbol_put(const char *symbol);
 #define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
 void symbol_put_addr(void *addr);
index 7eeb9bbfb816f3afc11662f05e5dde3d297db489..f7556261fe3c54adb52b28789b7cb7b19b280b13 100644 (file)
@@ -26,7 +26,7 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
 void *module_alloc(unsigned long size);
 
 /* Free memory returned from module_alloc. */
-void module_free(struct module *mod, void *module_region);
+void module_memfree(void *module_region);
 
 /*
  * Apply the given relocation to the (simplified) ELF.  Return -error
@@ -82,4 +82,6 @@ int module_finalize(const Elf_Ehdr *hdr,
 /* Any cleanup needed when module leaves. */
 void module_arch_cleanup(struct module *mod);
 
+/* Any cleanup before freeing mod->module_init */
+void module_arch_freeing_init(struct module *mod);
 #endif
index 853698c721f7d1547df181a4fbfc904a67758f72..76200984d1e22081954234d49f32653e18478717 100644 (file)
@@ -85,11 +85,6 @@ static inline void oom_killer_enable(void)
        oom_killer_disabled = false;
 }
 
-static inline bool oom_gfp_allowed(gfp_t gfp_mask)
-{
-       return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
-}
-
 extern struct task_struct *find_lock_task_mm(struct task_struct *p);
 
 static inline bool task_will_free_mem(struct task_struct *task)
index 360a966a97a5807f7ff13441748c0c93850ff7e1..9603094ed59b2adb5defa9eb93a955c5ce93e543 100644 (file)
@@ -175,6 +175,8 @@ enum pci_dev_flags {
        PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
        /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
        PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
+       /* Do not use bus resets for device */
+       PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
 };
 
 enum pci_irq_reroute_variant {
@@ -1065,6 +1067,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
 void pci_bus_assign_resources(const struct pci_bus *bus);
 void pci_bus_size_bridges(struct pci_bus *bus);
 int pci_claim_resource(struct pci_dev *, int);
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
 void pci_assign_unassigned_resources(void);
 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
index c8f170324e643ccb895dacfd9b06a66a8afa7193..4d5bf5726578c58b739a79a5f093e5f7c4a009a3 100644 (file)
@@ -10,9 +10,6 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
-extern char *log_buf_addr_get(void);
-extern u32 log_buf_len_get(void);
-
 static inline int printk_get_level(const char *buffer)
 {
        if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -163,6 +160,8 @@ extern int kptr_restrict;
 
 extern void wake_up_klogd(void);
 
+char *log_buf_addr_get(void);
+u32 log_buf_len_get(void);
 void log_buf_kexec_setup(void);
 void __init setup_log_buf(int early);
 void dump_stack_set_arch_desc(const char *fmt, ...);
@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)
 {
 }
 
+static inline char *log_buf_addr_get(void)
+{
+       return NULL;
+}
+
+static inline u32 log_buf_len_get(void)
+{
+       return 0;
+}
+
 static inline void log_buf_kexec_setup(void)
 {
 }
index 203c2ad40d7184726b585b7c644b0a7f11ad6894..beebe3a02d43f5c527633cbd12af690c02e8288d 100644 (file)
@@ -110,6 +110,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
        return true;
 }
 
+static inline bool timeval_valid(const struct timeval *tv)
+{
+       /* Dates before 1970 are bogus */
+       if (tv->tv_sec < 0)
+               return false;
+
+       /* Can't have more microseconds then a second */
+       if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+               return false;
+
+       return true;
+}
+
 extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 
 #define CURRENT_TIME           (current_kernel_time())
index 0bb620702929e7ad3b48f7aa40e5c73df3638141..f7cbd703d15d24edca61cf9e159cb1ce3857cb5b 100644 (file)
@@ -39,11 +39,12 @@ struct inet_skb_parm {
        struct ip_options       opt;            /* Compiled IP options          */
        unsigned char           flags;
 
-#define IPSKB_FORWARDED                1
-#define IPSKB_XFRM_TUNNEL_SIZE 2
-#define IPSKB_XFRM_TRANSFORMED 4
-#define IPSKB_FRAG_COMPLETE    8
-#define IPSKB_REROUTED         16
+#define IPSKB_FORWARDED                BIT(0)
+#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
+#define IPSKB_XFRM_TRANSFORMED BIT(2)
+#define IPSKB_FRAG_COMPLETE    BIT(3)
+#define IPSKB_REROUTED         BIT(4)
+#define IPSKB_DOREDIRECT       BIT(5)
 
        u16                     frag_max_size;
 };
index 6edf1f2028cdb0e0801af85183e02d0416fa73ed..86b399c66c3d6d3c5f12d1cd8749777b9d5c89f9 100644 (file)
@@ -146,6 +146,14 @@ TRACE_EVENT(kvm_msi_set_irq,
 
 #if defined(CONFIG_HAVE_KVM_IRQFD)
 
+#ifdef kvm_irqchips
+#define kvm_ack_irq_string "irqchip %s pin %u"
+#define kvm_ack_irq_parm  __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
+#else
+#define kvm_ack_irq_string "irqchip %d pin %u"
+#define kvm_ack_irq_parm  __entry->irqchip, __entry->pin
+#endif
+
 TRACE_EVENT(kvm_ack_irq,
        TP_PROTO(unsigned int irqchip, unsigned int pin),
        TP_ARGS(irqchip, pin),
@@ -160,13 +168,7 @@ TRACE_EVENT(kvm_ack_irq,
                __entry->pin            = pin;
        ),
 
-#ifdef kvm_irqchips
-       TP_printk("irqchip %s pin %u",
-                 __print_symbolic(__entry->irqchip, kvm_irqchips),
-                __entry->pin)
-#else
-       TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
-#endif
+       TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
 );
 
 #endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
index d6594e457a25af32e41a4b46a00a43ddf0a2c46b..a64e7a207d2b5cd123b65f7143c6d659c0aed726 100644 (file)
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
 
 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
 {
-       module_free(NULL, hdr);
+       module_memfree(hdr);
 }
 #endif /* CONFIG_BPF_JIT */
 
index 088ac0b1b106ff772e9122529866eb5406effd14..536edc2be3072e91ab132555fc4f9bc3ce656604 100644 (file)
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        int ufd = attr->map_fd;
        struct fd f = fdget(ufd);
        struct bpf_map *map;
-       void *key, *value;
+       void *key, *value, *ptr;
        int err;
 
        if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
        if (copy_from_user(key, ukey, map->key_size) != 0)
                goto free_key;
 
-       err = -ENOENT;
-       rcu_read_lock();
-       value = map->ops->map_lookup_elem(map, key);
+       err = -ENOMEM;
+       value = kmalloc(map->value_size, GFP_USER);
        if (!value)
-               goto err_unlock;
+               goto free_key;
+
+       rcu_read_lock();
+       ptr = map->ops->map_lookup_elem(map, key);
+       if (ptr)
+               memcpy(value, ptr, map->value_size);
+       rcu_read_unlock();
+
+       err = -ENOENT;
+       if (!ptr)
+               goto free_value;
 
        err = -EFAULT;
        if (copy_to_user(uvalue, value, map->value_size) != 0)
-               goto err_unlock;
+               goto free_value;
 
        err = 0;
 
-err_unlock:
-       rcu_read_unlock();
+free_value:
+       kfree(value);
 free_key:
        kfree(key);
 err_put:
index bb263d0caab323810f2c30fc3799be467859a7ac..04cfe8ace52088a4c5ed092c389d08fd99117b19 100644 (file)
@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)
         *
         * And don't kill the default root.
         */
-       if (css_has_online_children(&root->cgrp.self) ||
+       if (!list_empty(&root->cgrp.self.children) ||
            root == &cgrp_dfl_root)
                cgroup_put(&root->cgrp);
        else
index f191bddf64b8ebdd0227556e119b1a804a30db88..7b40c5f07dce8d09e1ebaba547e401b5655befbb 100644 (file)
@@ -2023,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
                kdb_printf("%-20s%8u  0x%p ", mod->name,
                           mod->core_size, (void *)mod);
 #ifdef CONFIG_MODULE_UNLOAD
-               kdb_printf("%4ld ", module_refcount(mod));
+               kdb_printf("%4d ", module_refcount(mod));
 #endif
                if (mod->state == MODULE_STATE_GOING)
                        kdb_printf(" (Unloading)");
index 06f58309fed2d082b7a859f3effa5b2562e4d447..ee619929cf9091059406e8f82df32c50d609c0d9 100644 (file)
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
 
 static void free_insn_page(void *page)
 {
-       module_free(NULL, page);
+       module_memfree(page);
 }
 
 struct kprobe_insn_cache kprobe_insn_slots = {
index 3965511ae1333d5bcf0d50ac43bd031be76a823b..d856e96a3cce440f4c9bb0bc5e7fbf2eee4b1afe 100644 (file)
@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
        return 0;
 }
 
-unsigned long module_refcount(struct module *mod)
+/**
+ * module_refcount - return the refcount or -1 if unloading
+ *
+ * @mod:       the module we're checking
+ *
+ * Returns:
+ *     -1 if the module is in the process of unloading
+ *     otherwise the number of references in the kernel to the module
+ */
+int module_refcount(struct module *mod)
 {
-       return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
+       return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
 }
 EXPORT_SYMBOL(module_refcount);
 
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
        struct module_use *use;
        int printed_something = 0;
 
-       seq_printf(m, " %lu ", module_refcount(mod));
+       seq_printf(m, " %i ", module_refcount(mod));
 
        /*
         * Always include a trailing , so userspace can differentiate
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
 static ssize_t show_refcnt(struct module_attribute *mattr,
                           struct module_kobject *mk, char *buffer)
 {
-       return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
+       return sprintf(buffer, "%i\n", module_refcount(mk->mod));
 }
 
 static struct module_attribute modinfo_refcnt =
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { }
 static void unset_module_init_ro_nx(struct module *mod) { }
 #endif
 
-void __weak module_free(struct module *mod, void *module_region)
+void __weak module_memfree(void *module_region)
 {
        vfree(module_region);
 }
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod)
 {
 }
 
+void __weak module_arch_freeing_init(struct module *mod)
+{
+}
+
 /* Free a module, remove from lists, etc. */
 static void free_module(struct module *mod)
 {
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod)
 
        /* This may be NULL, but that's OK */
        unset_module_init_ro_nx(mod);
-       module_free(mod, mod->module_init);
+       module_arch_freeing_init(mod);
+       module_memfree(mod->module_init);
        kfree(mod->args);
        percpu_modfree(mod);
 
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod)
 
        /* Finally, free the core (containing the module structure) */
        unset_module_core_ro_nx(mod);
-       module_free(mod, mod->module_core);
+       module_memfree(mod->module_core);
 
 #ifdef CONFIG_MPU
        update_protections(current->mm);
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info)
                 */
                kmemleak_ignore(ptr);
                if (!ptr) {
-                       module_free(mod, mod->module_core);
+                       module_memfree(mod->module_core);
                        return -ENOMEM;
                }
                memset(ptr, 0, mod->init_size);
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
 static void module_deallocate(struct module *mod, struct load_info *info)
 {
        percpu_modfree(mod);
-       module_free(mod, mod->module_init);
-       module_free(mod, mod->module_core);
+       module_arch_freeing_init(mod);
+       module_memfree(mod->module_init);
+       module_memfree(mod->module_core);
 }
 
 int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod)
 #endif
 }
 
+/* For freeing module_init on success, in case kallsyms traversing */
+struct mod_initfree {
+       struct rcu_head rcu;
+       void *module_init;
+};
+
+static void do_free_init(struct rcu_head *head)
+{
+       struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
+       module_memfree(m->module_init);
+       kfree(m);
+}
+
 /* This is where the real work happens */
 static int do_init_module(struct module *mod)
 {
        int ret = 0;
+       struct mod_initfree *freeinit;
+
+       freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
+       if (!freeinit) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       freeinit->module_init = mod->module_init;
 
        /*
         * We want to find out whether @mod uses async during init.  Clear
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod)
        if (mod->init != NULL)
                ret = do_one_initcall(mod->init);
        if (ret < 0) {
-               /*
-                * Init routine failed: abort.  Try to protect us from
-                * buggy refcounters.
-                */
-               mod->state = MODULE_STATE_GOING;
-               synchronize_sched();
-               module_put(mod);
-               blocking_notifier_call_chain(&module_notify_list,
-                                            MODULE_STATE_GOING, mod);
-               free_module(mod);
-               wake_up_all(&module_wq);
-               return ret;
+               goto fail_free_freeinit;
        }
        if (ret > 0) {
                pr_warn("%s: '%s'->init suspiciously returned %d, it should "
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod)
        mod->strtab = mod->core_strtab;
 #endif
        unset_module_init_ro_nx(mod);
-       module_free(mod, mod->module_init);
+       module_arch_freeing_init(mod);
        mod->module_init = NULL;
        mod->init_size = 0;
        mod->init_ro_size = 0;
        mod->init_text_size = 0;
+       /*
+        * We want to free module_init, but be aware that kallsyms may be
+        * walking this with preempt disabled.  In all the failure paths,
+        * we call synchronize_rcu/synchronize_sched, but we don't want
+        * to slow down the success path, so use actual RCU here.
+        */
+       call_rcu(&freeinit->rcu, do_free_init);
        mutex_unlock(&module_mutex);
        wake_up_all(&module_wq);
 
        return 0;
+
+fail_free_freeinit:
+       kfree(freeinit);
+fail:
+       /* Try to protect us from buggy refcounters. */
+       mod->state = MODULE_STATE_GOING;
+       synchronize_sched();
+       module_put(mod);
+       blocking_notifier_call_chain(&module_notify_list,
+                                    MODULE_STATE_GOING, mod);
+       free_module(mod);
+       wake_up_all(&module_wq);
+       return ret;
 }
 
 static int may_init_module(void)
index 0af9b2c4e56c6cf604699096d0ac4d44c874b36e..728e05b167de984afe2e815f63207952051c3915 100644 (file)
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
        mk->mp->grp.attrs = new_attrs;
 
        /* Tack new one on the end. */
+       memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
        sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
        mk->mp->attrs[mk->mp->num].param = kp;
        mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
        /* Do not allow runtime DAC changes to make param writable. */
        if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
                mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
+       else
+               mk->mp->attrs[mk->mp->num].mattr.store = NULL;
        mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
        mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
        mk->mp->num++;
index a8c9f5a7dda68f9afd60449a3f9fe0b7d048a6f1..ea9c881098941ecd9bbb42fa69a3ddb958d2ec41 100644 (file)
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                up_write(&me->mm->mmap_sem);
                break;
        case PR_MPX_ENABLE_MANAGEMENT:
+               if (arg2 || arg3 || arg4 || arg5)
+                       return -EINVAL;
                error = MPX_ENABLE_MANAGEMENT(me);
                break;
        case PR_MPX_DISABLE_MANAGEMENT:
+               if (arg2 || arg3 || arg4 || arg5)
+                       return -EINVAL;
                error = MPX_DISABLE_MANAGEMENT(me);
                break;
        default:
index 87a346fd6d61ff1c5c1045c9db063ee3247d2361..28bf91c60a0b412d1c97911659c30d735ba40d78 100644 (file)
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
        if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
                return -EPERM;
 
+       if (txc->modes & ADJ_FREQUENCY) {
+               if (LONG_MIN / PPM_SCALE > txc->freq)
+                       return -EINVAL;
+               if (LONG_MAX / PPM_SCALE < txc->freq)
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
index 6390517e77d48abb83c0d0ffec27864b4695c4c4..2c85b7724af4b0081a112e1b12cbcce4ef831117 100644 (file)
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
        if (tv) {
                if (copy_from_user(&user_tv, tv, sizeof(*tv)))
                        return -EFAULT;
+
+               if (!timeval_valid(&user_tv))
+                       return -EINVAL;
+
                new_ts.tv_sec = user_tv.tv_sec;
                new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
        }
index 851924fa5170e177e94080da70c64a1a64bcf896..683b4782019b2c32626645155bb8175f3bf3a4a5 100644 (file)
@@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
 
        pr_info("Task in ");
        pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
-       pr_info(" killed as a result of limit of ");
+       pr_cont(" killed as a result of limit of ");
        pr_cont_cgroup_path(memcg->css.cgroup);
-       pr_info("\n");
+       pr_cont("\n");
 
        rcu_read_unlock();
 
index 7633c503a116c221e7447614c6d10ebaa38a0b1c..8e20f9c2fa5ab7a89fb29c5dbc3987ccd8690047 100644 (file)
@@ -2332,12 +2332,21 @@ static inline struct page *
 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
        nodemask_t *nodemask, struct zone *preferred_zone,
-       int classzone_idx, int migratetype)
+       int classzone_idx, int migratetype, unsigned long *did_some_progress)
 {
        struct page *page;
 
-       /* Acquire the per-zone oom lock for each zone */
+       *did_some_progress = 0;
+
+       if (oom_killer_disabled)
+               return NULL;
+
+       /*
+        * Acquire the per-zone oom lock for each zone.  If that
+        * fails, somebody else is making progress for us.
+        */
        if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
+               *did_some_progress = 1;
                schedule_timeout_uninterruptible(1);
                return NULL;
        }
@@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
                goto out;
 
        if (!(gfp_mask & __GFP_NOFAIL)) {
+               /* Coredumps can quickly deplete all memory reserves */
+               if (current->flags & PF_DUMPCORE)
+                       goto out;
                /* The OOM killer will not help higher order allocs */
                if (order > PAGE_ALLOC_COSTLY_ORDER)
                        goto out;
                /* The OOM killer does not needlessly kill tasks for lowmem */
                if (high_zoneidx < ZONE_NORMAL)
                        goto out;
+               /* The OOM killer does not compensate for light reclaim */
+               if (!(gfp_mask & __GFP_FS))
+                       goto out;
                /*
                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        }
        /* Exhausted what can be done so it's blamo time */
        out_of_memory(zonelist, gfp_mask, order, nodemask, false);
-
+       *did_some_progress = 1;
 out:
        oom_zonelist_unlock(zonelist, gfp_mask);
        return page;
@@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
            (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
                goto nopage;
 
-restart:
+retry:
        if (!(gfp_mask & __GFP_NO_KSWAPD))
                wake_all_kswapds(order, zonelist, high_zoneidx,
                                preferred_zone, nodemask);
@@ -2681,7 +2696,6 @@ restart:
                classzone_idx = zonelist_zone_idx(preferred_zoneref);
        }
 
-rebalance:
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2788,54 +2802,28 @@ rebalance:
        if (page)
                goto got_pg;
 
-       /*
-        * If we failed to make any progress reclaiming, then we are
-        * running out of options and have to consider going OOM
-        */
-       if (!did_some_progress) {
-               if (oom_gfp_allowed(gfp_mask)) {
-                       if (oom_killer_disabled)
-                               goto nopage;
-                       /* Coredumps can quickly deplete all memory reserves */
-                       if ((current->flags & PF_DUMPCORE) &&
-                           !(gfp_mask & __GFP_NOFAIL))
-                               goto nopage;
-                       page = __alloc_pages_may_oom(gfp_mask, order,
-                                       zonelist, high_zoneidx,
-                                       nodemask, preferred_zone,
-                                       classzone_idx, migratetype);
-                       if (page)
-                               goto got_pg;
-
-                       if (!(gfp_mask & __GFP_NOFAIL)) {
-                               /*
-                                * The oom killer is not called for high-order
-                                * allocations that may fail, so if no progress
-                                * is being made, there are no other options and
-                                * retrying is unlikely to help.
-                                */
-                               if (order > PAGE_ALLOC_COSTLY_ORDER)
-                                       goto nopage;
-                               /*
-                                * The oom killer is not called for lowmem
-                                * allocations to prevent needlessly killing
-                                * innocent tasks.
-                                */
-                               if (high_zoneidx < ZONE_NORMAL)
-                                       goto nopage;
-                       }
-
-                       goto restart;
-               }
-       }
-
        /* Check if we should retry the allocation */
        pages_reclaimed += did_some_progress;
        if (should_alloc_retry(gfp_mask, order, did_some_progress,
                                                pages_reclaimed)) {
+               /*
+                * If we fail to make progress by freeing individual
+                * pages, but the allocation wants us to keep going,
+                * start OOM killing tasks.
+                */
+               if (!did_some_progress) {
+                       page = __alloc_pages_may_oom(gfp_mask, order, zonelist,
+                                               high_zoneidx, nodemask,
+                                               preferred_zone, classzone_idx,
+                                               migratetype,&did_some_progress);
+                       if (page)
+                               goto got_pg;
+                       if (!did_some_progress)
+                               goto nopage;
+               }
                /* Wait for some write requests to complete then retry */
                wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
-               goto rebalance;
+               goto retry;
        } else {
                /*
                 * High-order allocations do not necessarily loop after
index ab2505c3ef5460e23facf80725633e977882ec8b..dcd90c891d8e53895d117f219001329e8cdeab46 100644 (file)
@@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
         * should make reasonable progress.
         */
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
-                                       gfp_mask, nodemask) {
+                                       gfp_zone(gfp_mask), nodemask) {
                if (zone_idx(zone) > ZONE_NORMAL)
                        continue;
 
index 515569ffde8a16af9eea82cc85ded4f8d5ce59cc..589aafd01fc5256a0fac138bac3240ab191b507e 100644 (file)
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
        snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
                        ds->index, ds->pd->sw_addr);
        ds->slave_mii_bus->parent = ds->master_dev;
+       ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
 }
 
 
index 3a83ce5efa80e3fc2c062ec08465840018159b14..787b3c294ce672244ce08c5426c03bbd1f71c0f3 100644 (file)
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
         *      We now generate an ICMP HOST REDIRECT giving the route
         *      we calculated.
         */
-       if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
+       if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
+           !skb_sec_path(skb))
                ip_rt_send_redirect(skb);
 
        skb->priority = rt_tos2priority(iph->tos);
index c0d82f78d364fe5561f819f953e3eb48d2f134da..2a3720fb5a5ff5401c5efbef49427fb18dbbfa5e 100644 (file)
@@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb)
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
        if (sk != NULL) {
+               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
                pr_debug("rcv on socket %p\n", sk);
-               ping_queue_rcv_skb(sk, skb_get(skb));
+               if (skb2)
+                       ping_queue_rcv_skb(sk, skb2);
                sock_put(sk);
                return true;
        }
index 6a2155b02602b100c7ce3bbfda28a38090add7a4..d58dd0ec3e5302c2862c8fe53bfd43ca05a3e669 100644 (file)
@@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb,
 
        do_cache = res->fi && !itag;
        if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
+           skb->protocol == htons(ETH_P_IP) &&
            (IN_DEV_SHARED_MEDIA(out_dev) ||
-            inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
-               flags |= RTCF_DOREDIRECT;
-               do_cache = false;
-       }
+            inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
+               IPCB(skb)->flags |= IPSKB_DOREDIRECT;
 
        if (skb->protocol != htons(ETH_P_IP)) {
                /* Not IP (i.e. ARP). Do not create route, if it is
@@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
        if (rt->rt_flags & RTCF_NOTIFY)
                r->rtm_flags |= RTM_F_NOTIFY;
+       if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
+               r->rtm_flags |= RTCF_DOREDIRECT;
 
        if (nla_put_be32(skb, RTA_DST, dst))
                goto nla_put_failure;
index 7927db0a927951a20b4502d38fca0bba9e94862c..4a000f1dd75753833b792f6979bf697337f4dd7a 100644 (file)
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
        s_slot = cb->args[0];
        num = s_num = cb->args[1];
 
-       for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+       for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
                struct sock *sk;
                struct hlist_nulls_node *node;
                struct udp_hslot *hslot = &table->hash[slot];
 
+               num = 0;
+
                if (hlist_nulls_empty(&hslot->head))
                        continue;
 
index b2d1838897c933f6f1ceb2a6f64a2f67495ce3c9..f1c6d5e9832253f15683fca6417255cee78618af 100644 (file)
@@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,
        return 0;
 }
 
+static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
+                         struct net *net)
+{
+       if (atomic_read(&rt->rt6i_ref) != 1) {
+               /* This route is used as dummy address holder in some split
+                * nodes. It is not leaked, but it still holds other resources,
+                * which must be released in time. So, scan ascendant nodes
+                * and replace dummy references to this route with references
+                * to still alive ones.
+                */
+               while (fn) {
+                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
+                               fn->leaf = fib6_find_prefix(net, fn);
+                               atomic_inc(&fn->leaf->rt6i_ref);
+                               rt6_release(rt);
+                       }
+                       fn = fn->parent;
+               }
+               /* No more references are possible at this point. */
+               BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
+       }
+}
+
 /*
  *     Insert routing information in a node.
  */
@@ -807,11 +830,12 @@ add:
                rt->dst.rt6_next = iter->dst.rt6_next;
                atomic_inc(&rt->rt6i_ref);
                inet6_rt_notify(RTM_NEWROUTE, rt, info);
-               rt6_release(iter);
                if (!(fn->fn_flags & RTN_RTINFO)) {
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               fib6_purge_rt(iter, fn, info->nl_net);
+               rt6_release(iter);
        }
 
        return 0;
@@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
                fn = fib6_repair_tree(net, fn);
        }
 
-       if (atomic_read(&rt->rt6i_ref) != 1) {
-               /* This route is used as dummy address holder in some split
-                * nodes. It is not leaked, but it still holds other resources,
-                * which must be released in time. So, scan ascendant nodes
-                * and replace dummy references to this route with references
-                * to still alive ones.
-                */
-               while (fn) {
-                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
-                               fn->leaf = fib6_find_prefix(net, fn);
-                               atomic_inc(&fn->leaf->rt6i_ref);
-                               rt6_release(rt);
-                       }
-                       fn = fn->parent;
-               }
-               /* No more references are possible at this point. */
-               BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
-       }
+       fib6_purge_rt(rt, fn, net);
 
        inet6_rt_notify(RTM_DELROUTE, rt, info);
        rt6_release(rt);
index 166e33bed222d94eaf5b72de183544bfc7f762de..495965358d22d88849d251aa890aaee6caaf4079 100644 (file)
@@ -1242,12 +1242,16 @@ restart:
                rt = net->ipv6.ip6_null_entry;
        else if (rt->dst.error) {
                rt = net->ipv6.ip6_null_entry;
-       } else if (rt == net->ipv6.ip6_null_entry) {
+               goto out;
+       }
+
+       if (rt == net->ipv6.ip6_null_entry) {
                fn = fib6_backtrack(fn, &fl6->saddr);
                if (fn)
                        goto restart;
        }
 
+out:
        dst_hold(&rt->dst);
 
        read_unlock_bh(&table->tb6_lock);
index 5f983644373a230890b25189865af73f5e2b3b44..48bf5a06847bd59db7834758b22aa9208d727940 100644 (file)
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
        struct flowi6 *fl6 = &fl->u.ip6;
        int onlyproto = 0;
-       u16 offset = skb_network_header_len(skb);
        const struct ipv6hdr *hdr = ipv6_hdr(skb);
+       u16 offset = sizeof(*hdr);
        struct ipv6_opt_hdr *exthdr;
        const unsigned char *nh = skb_network_header(skb);
-       u8 nexthdr = nh[IP6CB(skb)->nhoff];
+       u16 nhoff = IP6CB(skb)->nhoff;
        int oif = 0;
+       u8 nexthdr;
+
+       if (!nhoff)
+               nhoff = offsetof(struct ipv6hdr, nexthdr);
+
+       nexthdr = nh[nhoff];
 
        if (skb_dst(skb))
                oif = skb_dst(skb)->dev->ifindex;
index 612a5ddaf93b1ab1b5a524c5efef8d6b1f769038..799bafc2af39ea191e3753c88c3f921e44a77162 100644 (file)
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
        {
                .procname       = "ack",
                .data           = &sysctl_llc2_ack_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_ack_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "busy",
                .data           = &sysctl_llc2_busy_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_busy_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "p",
                .data           = &sysctl_llc2_p_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_p_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "rej",
                .data           = &sysctl_llc2_rej_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_rej_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
index 4c5192e0d66c7d5ae99913c2e9f2e27ff1626d71..4a95fe3cffbc9bd9e6d2ff6853b36ebe904b94a4 100644 (file)
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                }
        }
 
-       /* tear down aggregation sessions and remove STAs */
-       mutex_lock(&local->sta_mtx);
-       list_for_each_entry(sta, &local->sta_list, list) {
-               if (sta->uploaded) {
-                       enum ieee80211_sta_state state;
-
-                       state = sta->sta_state;
-                       for (; state > IEEE80211_STA_NOTEXIST; state--)
-                               WARN_ON(drv_sta_state(local, sta->sdata, sta,
-                                                     state, state - 1));
-               }
-       }
-       mutex_unlock(&local->sta_mtx);
-
        /* remove all interfaces that were created in the driver */
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                case NL80211_IFTYPE_STATION:
                        ieee80211_mgd_quiesce(sdata);
                        break;
+               case NL80211_IFTYPE_WDS:
+                       /* tear down aggregation sessions and remove STAs */
+                       mutex_lock(&local->sta_mtx);
+                       sta = sdata->u.wds.sta;
+                       if (sta && sta->uploaded) {
+                               enum ieee80211_sta_state state;
+
+                               state = sta->sta_state;
+                               for (; state > IEEE80211_STA_NOTEXIST; state--)
+                                       WARN_ON(drv_sta_state(local, sta->sdata,
+                                                             sta, state,
+                                                             state - 1));
+                       }
+                       mutex_unlock(&local->sta_mtx);
+                       break;
                default:
                        break;
                }
index 683b10f4650577c7d0172733e935f7c169f20292..d69ca513848e7eda2227a18d0f98b164591532a1 100644 (file)
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
                channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
        else if (rate)
-               channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+               channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
        else
                channel_flags |= IEEE80211_CHAN_2GHZ;
        put_unaligned_le16(channel_flags, pos);
index 84c8219c3e1ce18867466786ae061d13178d7519..f59adf8a4cd780738187d5a50622f1ff9d50a20b 100644 (file)
@@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
        }
 
        bpf_size = bpf_len * sizeof(*bpf_ops);
+       if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
+               ret = -EINVAL;
+               goto errout;
+       }
+
        bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
        if (bpf_ops == NULL) {
                ret = -ENOMEM;
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
                                   struct cls_bpf_head *head)
 {
        unsigned int i = 0x80000000;
+       u32 handle;
 
        do {
                if (++head->hgen == 0x7FFFFFFF)
                        head->hgen = 1;
        } while (--i > 0 && cls_bpf_get(tp, head->hgen));
-       if (i == 0)
+
+       if (unlikely(i == 0)) {
                pr_err("Insufficient number of handles\n");
+               handle = 0;
+       } else {
+               handle = head->hgen;
+       }
 
-       return i;
+       return handle;
 }
 
 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
index f791edd64d6c0c76dea92888c44530cd1af21a23..26d06dbcc1c8e137798be5071848990fe00a7fe6 100644 (file)
@@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
        asoc->peer.peer_hmacs = new->peer.peer_hmacs;
        new->peer.peer_hmacs = NULL;
 
-       sctp_auth_key_put(asoc->asoc_shared_key);
        sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
 }
 
index a2c33a4dc7bab45e520556557924b62d24951d85..418795caa8979fd0eb5d19e4d1fe3376fc0e9b20 100644 (file)
@@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
 static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
                                         struct sock_iocb *siocb)
 {
-       if (!is_sync_kiocb(iocb))
-               BUG();
-
        siocb->kiocb = iocb;
        iocb->private = siocb;
        return siocb;
index 7ca4b5133123f4464e289976d152ac1d2fc1769c..8887c6e5fca85c3e417fc21052d117ac72adccd4 100644 (file)
@@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->get_key)
                return -EOPNOTSUPP;
 
+       if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+               return -ENOENT;
+
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
                goto nla_put_failure;
 
-       if (pairwise && mac_addr &&
-           !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
-               return -ENOENT;
-
        err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
                           get_key_callback);
 
@@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
        wdev_lock(dev->ieee80211_ptr);
        err = nl80211_key_allowed(dev->ieee80211_ptr);
 
-       if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
+       if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
            !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
                err = -ENOENT;
 
index d0ac795445b7e40dc92ebb1e0bd9095fc4bab048..5488c3662f7d7671c184215ae1d74faf0190b126 100644 (file)
@@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
                goto out;
        }
 
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_has_order(fc))
+                       hdrlen += IEEE80211_HT_CTL_LEN;
+               goto out;
+       }
+
        if (ieee80211_is_ctl(fc)) {
                /*
                 * ACK and CTS are 10 bytes, all others 16. To see how
index e286b42307f30dfe840267be16c53b0580150905..6299ee95cd11b63112ae5b7875872cb72ca91208 100644 (file)
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
 
        /* iterate over two elements */
        assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
-              next_key == 2);
+              (next_key == 1 || next_key == 2));
        assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
-              next_key == 1);
+              (next_key == 1 || next_key == 2));
        assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
               errno == ENOENT);
 
This page took 0.227322 seconds and 5 git commands to generate.